Rewrote backend in Rust

This commit is contained in:
2022-10-10 23:07:40 +02:00
parent fccb823801
commit 89b6513905
75 changed files with 2508 additions and 7490 deletions

35
backend/src/config.rs Normal file
View File

@@ -0,0 +1,35 @@
use lazy_static::lazy_static;
lazy_static! {
pub static ref CONFIG: Config = Config::read();
}
pub struct Config {
pub gitlab_id: String,
pub gitlab_secret: String,
pub gitlab_url: String,
pub gitlab_api_url: String,
pub gitlab_redirect_url: String,
pub smtp_server: String,
pub smtp_port: u16,
pub smtp_user: String,
pub smtp_password: String
}
impl Config {
fn read() -> Self {
let config = std::fs::read_to_string("config.json").expect("Failed to read config.json");
let config = json::parse(config.as_str()).expect("Failed to parse config.json");
Self {
gitlab_id: config["gitlab_id"].as_str().expect("Config is missing 'gitlab_id'").to_string(),
gitlab_secret: config["gitlab_secret"].as_str().expect("Config is missing 'gitlab_secret'").to_string(),
gitlab_url: config["gitlab_url"].as_str().expect("Config is missing 'gitlab_url'").to_string(),
gitlab_api_url: config["gitlab_api_url"].as_str().expect("Config is missing 'gitlab_api_url'").to_string(),
gitlab_redirect_url: config["gitlab_redirect_url"].as_str().expect("Config is missing 'gitlab_redirect_url'").to_string(),
smtp_server: config["smtp_server"].as_str().expect("Config is missing 'smtp_server'").to_string(),
smtp_port: config["smtp_port"].as_u16().expect("Config is missing 'smtp_port'"),
smtp_user: config["smtp_user"].as_str().expect("Config is missing 'smtp_user'").to_string(),
smtp_password: config["smtp_password"].as_str().expect("Config is missing 'smtp_password'").to_string()
}
}
}

View File

@@ -1,110 +0,0 @@
#pragma clang diagnostic push
#pragma ide diagnostic ignored "performance-unnecessary-value-param"
#pragma ide diagnostic ignored "readability-convert-member-functions-to-static"
#include "controllers.h"
#include "dto/dto.h"
namespace api {
void admin::users(req_type, cbk_type cbk) {
db::MapperUser user_mapper(drogon::app().getDbClient());
std::vector<dto::Responses::GetUsersEntry> entries;
auto users = user_mapper.findAll();
for (const db::User& user : users)
entries.emplace_back(
user.getValueOfId(),
user.getValueOfGitlab() != 0,
db::User_getEnumTfaType(user) != db::tfaTypes::NONE,
user.getValueOfName(),
db::User_getEnumRole(user)
);
cbk(dto::Responses::get_admin_users_res(entries));
}
void admin::set_role(req_type req, cbk_type cbk) {
Json::Value& json = *req->jsonObject();
try {
uint64_t user_id = dto::json_get<uint64_t>(json, "user").value();
db::UserRole role = (db::UserRole)dto::json_get<int>(json, "role").value();
db::MapperUser user_mapper(drogon::app().getDbClient());
auto user = user_mapper.findByPrimaryKey(user_id);
user.setRole(role);
user_mapper.update(user);
cbk(dto::Responses::get_success_res());
} catch (const std::exception&) {
cbk(dto::Responses::get_badreq_res("Validation error"));
}
}
void admin::logout(req_type req, cbk_type cbk) {
Json::Value& json = *req->jsonObject();
try {
uint64_t user_id = dto::json_get<uint64_t>(json, "user").value();
db::MapperUser user_mapper(drogon::app().getDbClient());
auto user = user_mapper.findByPrimaryKey(user_id);
auth::revoke_all(user);
cbk(dto::Responses::get_success_res());
} catch (const std::exception&) {
cbk(dto::Responses::get_badreq_res("Validation error"));
}
}
void admin::delete_user(req_type req, cbk_type cbk) {
Json::Value& json = *req->jsonObject();
msd::channel<std::string> chan;
try {
uint64_t user_id = dto::json_get<uint64_t>(json, "user").value();
db::MapperUser user_mapper(drogon::app().getDbClient());
auto user = user_mapper.findByPrimaryKey(user_id);
auth::revoke_all(user);
std::unique_lock lock(*fs::get_user_mutex(user.getValueOfId()));
fs::delete_node(fs::get_node(user.getValueOfRootId()).value(), chan, true);
user_mapper.deleteOne(user);
cbk(dto::Responses::get_success_res());
} catch (const std::exception&) {
cbk(dto::Responses::get_badreq_res("Validation error"));
}
}
void admin::disable_2fa(req_type req, cbk_type cbk) {
Json::Value& json = *req->jsonObject();
try {
uint64_t user_id = dto::json_get<uint64_t>(json, "user").value();
db::MapperUser user_mapper(drogon::app().getDbClient());
auto user = user_mapper.findByPrimaryKey(user_id);
user.setTfaType(db::tfaTypes::NONE);
user_mapper.update(user);
cbk(dto::Responses::get_success_res());
} catch (const std::exception&) {
cbk(dto::Responses::get_badreq_res("Validation error"));
}
}
void admin::is_admin(req_type, cbk_type cbk) {
cbk(dto::Responses::get_success_res());
}
void admin::get_token(req_type, cbk_type cbk, uint64_t user) {
db::MapperUser user_mapper(drogon::app().getDbClient());
try {
const auto &db_user = user_mapper.findByPrimaryKey(user);
const std::string &token = auth::get_token(db_user);
cbk(dto::Responses::get_login_res(token));
} catch (const std::exception&) {
cbk(dto::Responses::get_badreq_res("Bad user"));
}
}
}
#pragma clang diagnostic pop

View File

@@ -1,99 +0,0 @@
#pragma clang diagnostic push
#pragma ide diagnostic ignored "readability-make-member-function-const"
#pragma ide diagnostic ignored "readability-convert-member-functions-to-static"
#include <botan/base32.h>
#include <botan/base64.h>
#include <qrcodegen.hpp>
#include <opencv2/opencv.hpp>
#include "controllers/controllers.h"
#include "db/db.h"
#include "dto/dto.h"
std::string create_totp_qrcode(const db::User& user, const std::string& b32_secret) {
constexpr int qrcode_pixel_size = 4;
std::stringstream code_ss;
code_ss << "otpauth://totp/MFileserver:"
<< user.getValueOfName()
<< "?secret="
<< b32_secret
<< "&issuer=MFileserver";
auto code = qrcodegen::QrCode::encodeText(code_ss.str().c_str(), qrcodegen::QrCode::Ecc::MEDIUM);
const int mod_count = code.getSize();
const int row_size = qrcode_pixel_size * mod_count;
cv::Mat image(mod_count, mod_count, CV_8UC1), scaled_image;
std::vector<uint8_t> image_encoded;
for (int y = 0; y < mod_count; y++) for (int x = 0; x < mod_count; x++)
image.at<uint8_t>(x, y) = code.getModule(x, y) ? 0 : 0xff;
cv::resize(image, scaled_image, cv::Size(), qrcode_pixel_size, qrcode_pixel_size, cv::INTER_NEAREST);
cv::imencode(".png", scaled_image, image_encoded);
return "data:image/png;base64," + Botan::base64_encode(image_encoded);
}
namespace api {
void auth::tfa_setup(req_type req, cbk_type cbk) {
db::User user = dto::get_user(req);
Json::Value &json = *req->jsonObject();
try {
bool mail = dto::json_get<bool>(json, "mail").value();
auto secret_uchar = rng->random_vec(32);
std::vector<char> secret(secret_uchar.data(), secret_uchar.data()+32);
user.setTfaSecret(secret);
db::MapperUser user_mapper(drogon::app().getDbClient());
user_mapper.update(user);
if (mail) {
send_mail(user);
cbk(dto::Responses::get_success_res());
} else {
std::string b32_secret = Botan::base32_encode(secret_uchar);
b32_secret.erase(std::remove(b32_secret.begin(), b32_secret.end(), '='), b32_secret.end());
std::string code = create_totp_qrcode(user, b32_secret);
cbk(dto::Responses::get_tfa_setup_res(b32_secret, code));
}
} catch (const std::exception&) {
cbk(dto::Responses::get_badreq_res("Validation error"));
}
}
void auth::tfa_complete(req_type req, cbk_type cbk) {
db::User user = dto::get_user(req);
Json::Value &json = *req->jsonObject();
try {
bool mail = dto::json_get<bool>(json, "mail").value();
uint32_t code = std::stoi(dto::json_get<std::string>(json, "code").value());
user.setTfaType(mail ? db::tfaTypes::EMAIL : db::tfaTypes::TOTP);
if (!verify2fa(user, code))
return cbk(dto::Responses::get_unauth_res("Incorrect 2fa"));
db::MapperUser user_mapper(drogon::app().getDbClient());
user_mapper.update(user);
revoke_all(user);
cbk(dto::Responses::get_success_res());
} catch (const std::exception&) {
cbk(dto::Responses::get_badreq_res("Validation error"));
}
}
void auth::tfa_disable(req_type req, cbk_type cbk) {
db::User user = dto::get_user(req);
db::MapperUser user_mapper(drogon::app().getDbClient());
user.setTfaType(db::tfaTypes::NONE);
user_mapper.update(user);
revoke_all(user);
cbk(dto::Responses::get_success_res());
}
}
#pragma clang diagnostic pop

View File

@@ -1,135 +0,0 @@
#pragma clang diagnostic push
#pragma ide diagnostic ignored "readability-make-member-function-const"
#pragma ide diagnostic ignored "readability-convert-member-functions-to-static"
#include <botan/argon2.h>
#include <botan/totp.h>
#include <jwt-cpp/traits/kazuho-picojson/traits.h>
#include <jwt-cpp/jwt.h>
#include "controllers/controllers.h"
#include "db/db.h"
#include "dto/dto.h"
namespace api {
void auth::login(req_type req, cbk_type cbk) {
Json::Value &json = *req->jsonObject();
try {
std::string username = dto::json_get<std::string>(json, "username").value();
std::string password = dto::json_get<std::string>(json, "password").value();
std::optional<std::string> otp = dto::json_get<std::string>(json, "otp");
auto db = drogon::app().getDbClient();
db::MapperUser user_mapper(db);
auto db_users = user_mapper.findBy(
db::Criteria(db::User::Cols::_name, db::CompareOps::EQ, username) &&
db::Criteria(db::User::Cols::_gitlab, db::CompareOps::EQ, 0)
);
if (db_users.empty()) {
cbk(dto::Responses::get_unauth_res("Invalid username or password"));
return;
}
db::User &db_user = db_users.at(0);
if (!Botan::argon2_check_pwhash(password.c_str(), password.size(), db_user.getValueOfPassword())) {
cbk(dto::Responses::get_unauth_res("Invalid username or password"));
return;
}
if (db::User_getEnumRole(db_user) == db::UserRole::DISABLED) {
cbk(dto::Responses::get_unauth_res("Account is disabled"));
return;
}
const auto tfa = db::User_getEnumTfaType(db_user);
if (tfa != db::tfaTypes::NONE) {
if (!otp.has_value()) {
if (tfa == db::tfaTypes::EMAIL) send_mail(db_user);
return cbk(dto::Responses::get_success_res());
}
if (!verify2fa(db_user, std::stoi(otp.value())))
return cbk(dto::Responses::get_unauth_res("Incorrect 2fa"));
}
cbk(dto::Responses::get_login_res(get_token(db_user)));
} catch (const std::exception&) {
cbk(dto::Responses::get_badreq_res("Validation error"));
}
}
void auth::signup(req_type req, cbk_type cbk) {
Json::Value &json = *req->jsonObject();
try {
std::string username = dto::json_get<std::string>(json, "username").value();
std::string password = dto::json_get<std::string>(json, "password").value();
db::MapperUser user_mapper(drogon::app().getDbClient());
auto existing_users = user_mapper.count(
db::Criteria(db::User::Cols::_name, db::CompareOps::EQ, username) &&
db::Criteria(db::User::Cols::_gitlab, db::CompareOps::EQ, 0)
);
if (existing_users != 0) {
cbk(dto::Responses::get_badreq_res("Username is already taken"));
return;
}
//std::string hash = Botan::argon2_generate_pwhash(password.c_str(), password.size(), *rng, 1, 256*1024, 2);
std::string hash = Botan::argon2_generate_pwhash(password.c_str(), password.size(), *rng, 1, 16*1024, 1);
db::User new_user;
new_user.setName(username);
new_user.setPassword(hash);
new_user.setGitlab(0);
new_user.setRole(db::UserRole::DISABLED);
new_user.setRootId(0);
new_user.setTfaType(db::tfaTypes::NONE);
user_mapper.insert(new_user);
generate_root(new_user);
cbk(dto::Responses::get_success_res());
} catch (const std::exception&) {
cbk(dto::Responses::get_badreq_res("Validation error"));
}
}
void auth::refresh(req_type req, cbk_type cbk) {
db::User user = dto::get_user(req);
db::Token token = dto::get_token(req);
db::MapperToken token_mapper(drogon::app().getDbClient());
token_mapper.deleteOne(token);
cbk(dto::Responses::get_login_res( get_token(user)));
}
void auth::logout_all(req_type req, cbk_type cbk) {
db::User user = dto::get_user(req);
revoke_all(user);
cbk(dto::Responses::get_success_res());
}
void auth::change_password(req_type req, cbk_type cbk) {
db::User user = dto::get_user(req);
Json::Value &json = *req->jsonObject();
try {
std::string old_pw = dto::json_get<std::string>(json, "oldPassword").value();
std::string new_pw = dto::json_get<std::string>(json, "newPassword").value();
auto db = drogon::app().getDbClient();
db::MapperUser user_mapper(db);
if (!Botan::argon2_check_pwhash(old_pw.c_str(), old_pw.size(), user.getValueOfPassword()))
return cbk(dto::Responses::get_unauth_res("Old password is wrong"));
std::string hash = Botan::argon2_generate_pwhash(new_pw.c_str(), new_pw.size(), *rng, 1, 256*1024, 2);
user.setPassword(hash);
user_mapper.update(user);
revoke_all(user);
cbk(dto::Responses::get_success_res());
} catch (const std::exception&) {
cbk(dto::Responses::get_badreq_res("Validation error"));
}
}
}
#pragma clang diagnostic pop

View File

@@ -1,110 +0,0 @@
#pragma clang diagnostic push
#pragma ide diagnostic ignored "readability-make-member-function-const"
#pragma ide diagnostic ignored "readability-convert-member-functions-to-static"
#include <chrono>
#include <filesystem>
#include <fstream>
#include <botan/uuid.h>
#include <botan/totp.h>
#if defined(BOTAN_HAS_SYSTEM_RNG)
#include <botan/system_rng.h>
#else
#include <botan/auto_rng.h>
#endif
#include <jwt-cpp/traits/kazuho-picojson/traits.h>
#include <jwt-cpp/jwt.h>
#include <SMTPMail.h>
#include "controllers/controllers.h"
#include "db/db.h"
namespace api {
#if defined(BOTAN_HAS_SYSTEM_RNG)
std::unique_ptr<Botan::RNG> auth::rng = std::make_unique<Botan::System_RNG>();
#else
std::unique_ptr<Botan::RNG> auth::rng = std::make_unique<Botan::AutoSeeded_RNG>();
#endif
bool auth::verify2fa(const db::User& user, uint32_t totp) {
size_t allowed_skew = db::User_getEnumTfaType(user) == db::tfaTypes::TOTP ? 0 : 10;
const auto& totp_secret = (const std::vector<uint8_t>&) user.getValueOfTfaSecret();
return Botan::TOTP(Botan::OctetString(totp_secret)).verify_totp(totp, std::chrono::system_clock::now(), allowed_skew);
}
void auth::send_mail(const db::User& user) {
std::time_t t = std::time(nullptr);
const auto& totp_secret = (const std::vector<uint8_t>&) user.getValueOfTfaSecret();
char totp[16];
std::snprintf(totp, 16, "%06d", Botan::TOTP(Botan::OctetString(totp_secret)).generate_totp(t));
auto config = drogon::app().getCustomConfig();
drogon::app().getPlugin<SMTPMail>()->sendEmail(
config["smtp_server"].asString(),
(uint16_t)config["smtp_port"].asUInt64(),
"fileserver@mattv.de",
user.getValueOfName(),
"MFileserver - Email 2fa code",
"Your code is: " + std::string(totp) +"\r\nIt is valid for 5 Minutes",
config["smtp_user"].asString(),
config["smtp_password"].asString(),
false
);
}
std::string auth::get_token(const db::User& user) {
auto db = drogon::app().getDbClient();
db::MapperToken token_mapper(db);
const auto iat = std::chrono::duration_cast<std::chrono::seconds>(std::chrono::system_clock::now().time_since_epoch());
const auto exp = iat + std::chrono::hours{24};
db::Token new_token;
new_token.setOwnerId(user.getValueOfId());
new_token.setExp(exp.count());
token_mapper.insert(new_token);
return jwt::create<jwt::traits::kazuho_picojson>()
.set_type("JWT")
.set_payload_claim("sub", picojson::value((int64_t)user.getValueOfId()))
.set_payload_claim("jti", picojson::value((int64_t)new_token.getValueOfId()))
.set_issued_at(std::chrono::system_clock::from_time_t(iat.count()))
.set_expires_at(std::chrono::system_clock::from_time_t(exp.count()))
.sign(jwt::algorithm::hs256{get_jwt_secret()});
}
void auth::generate_root(db::User& user) {
db::MapperUser user_mapper(drogon::app().getDbClient());
auto node = fs::create_node("", user, false, std::nullopt, true);
user.setRootId(std::get<db::INode>(node).getValueOfId());
user_mapper.update(user);
}
void auth::revoke_all(const db::User& user) {
db::MapperToken token_mapper(drogon::app().getDbClient());
token_mapper.deleteBy(db::Criteria(db::Token::Cols::_owner_id, db::CompareOps::EQ, user.getValueOfId()));
}
std::string auth::get_jwt_secret() {
static std::string token;
if (token.empty()) {
if (!std::filesystem::exists("jwt.secret")) {
auto new_token = rng->random_vec(128);
std::ofstream file("jwt.secret", std::ofstream::binary);
file.write((const char*)new_token.data(), (std::streamsize)new_token.size());
}
std::ifstream file("jwt.secret", std::ifstream::binary);
token = {std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>()};
}
return token;
}
}
#pragma clang diagnostic pop

View File

@@ -1,135 +0,0 @@
#pragma clang diagnostic push
#pragma ide diagnostic ignored "performance-unnecessary-value-param"
#pragma ide diagnostic ignored "readability-make-member-function-const"
#pragma ide diagnostic ignored "readability-convert-member-functions-to-static"
#include "controllers/controllers.h"
#include "dto/dto.h"
namespace config {
std::string get_id() {
static std::string val = drogon::app().getCustomConfig()["gitlab_id"].asString();
return val;
}
std::string get_secret() {
static std::string val = drogon::app().getCustomConfig()["gitlab_secret"].asString();
return val;
}
std::string get_url() {
static std::string val = drogon::app().getCustomConfig()["gitlab_url"].asString();
return val;
}
std::string get_api_url() {
static std::string val = drogon::app().getCustomConfig()["gitlab_api_url"].asString();
return val;
}
std::string get_redirect_url() {
static std::string val = drogon::app().getCustomConfig()["gitlab_redirect_url"].asString();
return val;
}
}
std::string get_redirect_uri() {
std::stringstream ss;
ss << config::get_redirect_url()
<< "/api/auth/gitlab_callback";
return drogon::utils::urlEncode(ss.str());
}
const drogon::HttpClientPtr& get_gitlab_client() {
static drogon::HttpClientPtr client = drogon::HttpClient::newHttpClient(config::get_api_url(), drogon::app().getLoop(), false, false);
return client;
}
namespace api {
std::optional<auth::gitlab_tokens> auth::get_gitlab_tokens(const std::string& code_or_token, bool token) {
std::stringstream ss;
ss << "/oauth/token"
<< "?redirect_uri=" << get_redirect_uri()
<< "&client_id=" << config::get_id()
<< "&client_secret=" << config::get_secret()
<< (token ? "&refresh_token=" : "&code=") << code_or_token
<< "&grant_type=" << (token ? "refresh_token" : "authorization_code");
auto gitlab_req = drogon::HttpRequest::newHttpRequest();
gitlab_req->setPathEncode(false);
gitlab_req->setPath(ss.str());
gitlab_req->setMethod(drogon::HttpMethod::Post);
auto res_tuple = get_gitlab_client()->sendRequest(gitlab_req);
auto res = res_tuple.second;
if ((res->statusCode() != drogon::HttpStatusCode::k200OK) && (res->statusCode() != drogon::HttpStatusCode::k201Created))
return std::nullopt;
auto json = *res->jsonObject();
return std::make_optional<gitlab_tokens>(
json["access_token"].as<std::string>(),
json["refresh_token"].as<std::string>()
);
}
std::optional<auth::gitlab_user> auth::get_gitlab_user(const std::string& at) {
auto gitlab_req = drogon::HttpRequest::newHttpRequest();
gitlab_req->setPath("/api/v4/user");
gitlab_req->addHeader("Authorization", "Bearer " + at);
gitlab_req->setMethod(drogon::HttpMethod::Get);
auto res_tuple = get_gitlab_client()->sendRequest(gitlab_req);
auto res = res_tuple.second;
if (res->statusCode() != drogon::HttpStatusCode::k200OK)
return std::nullopt;
auto json = *res->jsonObject();
return std::make_optional<gitlab_user>(
json["username"].as<std::string>(),
json.get("is_admin", false).as<bool>()
);
}
void auth::gitlab(req_type, cbk_type cbk) {
std::stringstream ss;
ss << config::get_url() << "/oauth/authorize"
<< "?redirect_uri=" << get_redirect_uri()
<< "&client_id=" << config::get_id()
<< "&scope=read_user&response_type=code";
cbk(drogon::HttpResponse::newRedirectionResponse(ss.str()));
}
std::string disabled_resp = "<!DOCTYPE html><html><h2>Your account is disabled, please contact an admin.&nbsp;<a href=\"/login\">Go to login page</a></h2></html>";
void auth::gitlab_callback(req_type, cbk_type cbk, std::string code) {
auto tokens = get_gitlab_tokens(code, false);
if (!tokens.has_value())
return cbk(dto::Responses::get_unauth_res("Invalid code"));
auto info = get_gitlab_user(tokens->at);
if (!info.has_value())
return cbk(dto::Responses::get_unauth_res("Invalid code"));
db::MapperUser user_mapper(drogon::app().getDbClient());
auto db_users = user_mapper.findBy(
db::Criteria(db::User::Cols::_name, db::CompareOps::EQ, info->name) &&
db::Criteria(db::User::Cols::_gitlab, db::CompareOps::EQ, 1)
);
if (db_users.empty()) {
db::User new_user;
new_user.setName(info->name);
new_user.setPassword("");
new_user.setGitlab(1);
new_user.setRole(info->is_admin ? db::UserRole::ADMIN : db::UserRole::DISABLED);
new_user.setRootId(0);
new_user.setTfaType(db::tfaTypes::NONE);
user_mapper.insert(new_user);
generate_root(new_user);
db_users.push_back(new_user);
}
db::User& db_user = db_users.at(0);
db_user.setGitlabAt(tokens->at);
db_user.setGitlabRt(tokens->rt);
user_mapper.update(db_user);
if (db::User_getEnumRole(db_user) == db::UserRole::DISABLED)
return cbk(drogon::HttpResponse::newFileResponse((unsigned char*)disabled_resp.data(), disabled_resp.size(), "", drogon::ContentType::CT_TEXT_HTML));
const std::string& token = get_token(db_user);
cbk(drogon::HttpResponse::newRedirectionResponse("/set_token?token="+token));
}
}
#pragma clang diagnostic pop

View File

@@ -1,161 +0,0 @@
#ifndef BACKEND_CONTROLLERS_H
#define BACKEND_CONTROLLERS_H
#include <variant>
#include <unordered_map>
#include <shared_mutex>
#include <drogon/drogon.h>
#include <botan/rng.h>
#include <msd/channel.hpp>
#include <trantor/net/EventLoopThread.h>
#include <kubazip/zip/zip.h>
#include "db/db.h"
using req_type = const drogon::HttpRequestPtr&;
using cbk_type = std::function<void(const drogon::HttpResponsePtr &)>&&;
namespace api {
class admin : public drogon::HttpController<admin> {
public:
METHOD_LIST_BEGIN
METHOD_ADD(admin::users, "/users", drogon::Get, "Login", "Admin");
METHOD_ADD(admin::set_role, "/set_role", drogon::Post, "Login", "Admin");
METHOD_ADD(admin::logout, "/logout", drogon::Post, "Login", "Admin");
METHOD_ADD(admin::delete_user, "/delete", drogon::Post, "Login", "Admin");
METHOD_ADD(admin::disable_2fa, "/disable_2fa", drogon::Post, "Login", "Admin");
METHOD_ADD(admin::is_admin, "/is_admin", drogon::Get, "Login", "Admin");
METHOD_ADD(admin::get_token, "/get_token/{}", drogon::Get, "Login", "Admin");
METHOD_LIST_END
void users(req_type, cbk_type);
void set_role(req_type, cbk_type);
void logout(req_type, cbk_type);
void delete_user(req_type, cbk_type);
void disable_2fa(req_type, cbk_type);
void is_admin(req_type, cbk_type);
void get_token(req_type, cbk_type, uint64_t user);
};
class auth : public drogon::HttpController<auth> {
public:
METHOD_LIST_BEGIN
METHOD_ADD(auth::gitlab, "/gitlab", drogon::Get);
METHOD_ADD(auth::gitlab_callback, "/gitlab_callback?code={}", drogon::Get);
METHOD_ADD(auth::signup, "/signup", drogon::Post);
METHOD_ADD(auth::login, "/login", drogon::Post);
METHOD_ADD(auth::refresh, "/refresh", drogon::Post, "Login");
METHOD_ADD(auth::tfa_setup, "/2fa/setup", drogon::Post, "Login");
METHOD_ADD(auth::tfa_complete, "/2fa/complete", drogon::Post, "Login");
METHOD_ADD(auth::tfa_disable, "/2fa/disable", drogon::Post, "Login");
METHOD_ADD(auth::change_password, "/change_password", drogon::Post, "Login");
METHOD_ADD(auth::logout_all, "/logout_all", drogon::Post, "Login");
METHOD_LIST_END
struct gitlab_tokens {
gitlab_tokens(std::string at, std::string rt) : at(std::move(at)), rt(std::move(rt)) {}
std::string at, rt;
};
struct gitlab_user {
gitlab_user(std::string name, bool isAdmin) : name(std::move(name)), is_admin(isAdmin) {}
std::string name;
bool is_admin;
};
static std::unique_ptr<Botan::RNG> rng;
static std::optional<gitlab_tokens> get_gitlab_tokens(const std::string&, bool token);
static std::optional<gitlab_user> get_gitlab_user(const std::string&);
static bool verify2fa(const db::User&, uint32_t totp);
static void send_mail(const db::User&);
static std::string get_token(const db::User&);
static void generate_root(db::User&);
static void revoke_all(const db::User&);
static std::string get_jwt_secret();
void gitlab(req_type, cbk_type);
void gitlab_callback(req_type, cbk_type, std::string code);
void signup(req_type, cbk_type);
void login(req_type, cbk_type);
void refresh(req_type, cbk_type);
void tfa_setup(req_type, cbk_type);
void tfa_complete(req_type, cbk_type);
void tfa_disable(req_type, cbk_type);
void change_password(req_type, cbk_type);
void logout_all(req_type, cbk_type);
};
class fs : public drogon::HttpController<fs> {
public:
METHOD_LIST_BEGIN
METHOD_ADD(fs::root, "/root", drogon::Get, "Login");
METHOD_ADD(fs::node, "/node/{}", drogon::Get, "Login");
METHOD_ADD(fs::path, "/path/{}", drogon::Get, "Login");
METHOD_ADD(fs::create_node_req<false>, "/createFolder", drogon::Post, "Login");
METHOD_ADD(fs::create_node_req<true>, "/createFile", drogon::Post, "Login");
METHOD_ADD(fs::delete_node_req, "/delete/{}", drogon::Post, "Login");
METHOD_ADD(fs::upload, "/upload/{}", drogon::Post, "Login");
METHOD_ADD(fs::create_zip, "/create_zip", drogon::Post, "Login");
METHOD_ADD(fs::download, "/download", drogon::Post, "Login");
METHOD_ADD(fs::download_multi, "/download_multi", drogon::Post, "Login");
METHOD_ADD(fs::download_preview, "/download_preview/{}", drogon::Get, "Login");
METHOD_ADD(fs::get_type, "/get_type/{}", drogon::Get, "Login");
METHOD_LIST_END
enum class create_node_error {
INVALID_NAME,
INVALID_PARENT,
FILE_PARENT
};
struct mutex_stream {
std::stringstream ss;
std::mutex mutex;
bool done = false;
};
static std::optional<db::INode> get_node(uint64_t node);
static std::optional<db::INode> get_node_and_validate(const db::User& user, uint64_t node);
static std::vector<db::INode> get_children(const db::INode& parent);
static std::variant<db::INode, fs::create_node_error, std::tuple<bool, uint64_t>>
create_node(std::string name, const db::User& owner, bool file, const std::optional<uint64_t> &parent, bool force = false);
static void delete_node(db::INode node, msd::channel<std::string>& chan, bool allow_root = false);
static std::shared_ptr<std::shared_mutex> get_user_mutex(uint64_t user_id);
void root(req_type, cbk_type);
void node(req_type, cbk_type, uint64_t node);
void path(req_type, cbk_type, uint64_t node);
template<bool file> void create_node_req(req_type req, cbk_type cbk);
void delete_node_req(req_type, cbk_type, uint64_t node);
void upload(req_type, cbk_type, uint64_t node);
void create_zip(req_type, cbk_type);
void download(req_type, cbk_type);
void download_multi(req_type, cbk_type);
void download_preview(req_type, cbk_type, uint64_t node);
void get_type(req_type, cbk_type, uint64_t node);
private:
static trantor::EventLoop* get_zip_loop();
static trantor::EventLoop* get_delete_loop();
static void generate_path(db::INode node, std::string& str);
static Json::Value generate_path(db::INode node);
static uint64_t calc_total_size(const db::INode& base);
static void add_to_zip(struct zip_t* zip, const std::string& key, const db::INode& node, const std::string& path);
static uint64_t next_temp_id;
static std::unordered_map<std::string, std::string> zip_to_temp_map;
static std::unordered_map<std::string, std::tuple<std::string, uint64_t, uint64_t>> in_progress_zips;
};
class user : public drogon::HttpController<user> {
public:
METHOD_LIST_BEGIN
METHOD_ADD(user::info, "/info", drogon::Get, "Login");
METHOD_ADD(user::delete_user, "/delete", drogon::Post, "Login");
METHOD_LIST_END
void info(req_type, cbk_type);
void delete_user(req_type, cbk_type);
};
}
#endif //BACKEND_CONTROLLERS_H

View File

@@ -1,257 +0,0 @@
#include <filesystem>
#include <fstream>
#include "controllers/controllers.h"
#include "dto/dto.h"
char windows_invalid_chars[] = "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F<>:\"/\\|";
namespace api {
uint64_t fs::next_temp_id = 0;
std::unordered_map<std::string, std::string> fs::zip_to_temp_map;
std::unordered_map<std::string, std::tuple<std::string, uint64_t, uint64_t>> fs::in_progress_zips;
std::optional<db::INode> fs::get_node(uint64_t node) {
db::MapperInode inode_mapper(drogon::app().getDbClient());
try {
return inode_mapper.findByPrimaryKey(node);
} catch (const std::exception&) {
return std::nullopt;
}
}
std::optional<db::INode> fs::get_node_and_validate(const db::User &user, uint64_t node) {
auto inode = get_node(node);
if (!inode.has_value()) return std::nullopt;
if (inode->getValueOfOwnerId() != user.getValueOfId()) return std::nullopt;
return inode;
}
std::vector<db::INode> fs::get_children(const db::INode& parent) {
db::MapperInode inode_mapper(drogon::app().getDbClient());
return inode_mapper.findBy(db::Criteria(db::INode::Cols::_parent_id, db::CompareOps::EQ, parent.getValueOfId()));
}
std::variant<db::INode, fs::create_node_error, std::tuple<bool, uint64_t>>
fs::create_node(std::string name, const db::User& owner, bool file, const std::optional<uint64_t> &parent, bool force) {
// Stolen from https://github.com/boostorg/filesystem/blob/develop/src/portability.cpp
if (!force)
if (name.empty() || name[0] == ' ' || name.find_first_of(windows_invalid_chars, 0, sizeof(windows_invalid_chars)) != std::string::npos || *(name.end() - 1) == ' ' || *(name.end() - 1) == '.' || name == "." || name == "..")
return {create_node_error::INVALID_NAME};
db::INode node;
node.setIsFile(file ? 1 : 0);
node.setName(name);
node.setOwnerId(owner.getValueOfId());
node.setHasPreview(0);
if (parent.has_value()) {
auto parent_node = get_node_and_validate(owner, *parent);
if (!parent_node.has_value())
return {create_node_error::INVALID_PARENT};
if (parent_node->getValueOfIsFile() != 0)
return {create_node_error::FILE_PARENT};
auto children = get_children(*parent_node);
for (const auto& child : children)
if (child.getValueOfName() == name)
return {std::make_tuple(
child.getValueOfIsFile() != 0,
child.getValueOfId()
)};
node.setParentId(*parent);
}
db::MapperInode inode_mapper(drogon::app().getDbClient());
inode_mapper.insert(node);
return {node};
}
void fs::delete_node(db::INode node, msd::channel<std::string>& chan, bool allow_root) {
if (node.getValueOfParentId() == 0 && (!allow_root)) return;
db::MapperInode inode_mapper(drogon::app().getDbClient());
const auto delete_file = [&chan, &inode_mapper](const db::INode& node) {
std::string entry = "Deleting ";
generate_path(node, entry);
entry >> chan;
std::filesystem::path p("./files");
p /= std::to_string(node.getValueOfId());
std::filesystem::remove(p);
if (node.getValueOfHasPreview() != 0)
std::filesystem::remove(p.string() + "_preview.png");
inode_mapper.deleteOne(node);
std::string(" Done\n") >> chan;
};
std::stack<db::INode> queue, files, folders;
if (node.getValueOfIsFile() == 0) queue.push(node);
else files.push(node);
while (!queue.empty()) {
while (!files.empty()) {
delete_file(files.top());
files.pop();
}
std::string entry = "Deleting ";
generate_path(queue.top(), entry);
entry += "\n";
entry >> chan;
auto children = get_children(queue.top());
folders.push(queue.top());
queue.pop();
for (const auto& child : children) {
if (child.getValueOfIsFile() == 0) queue.push(child);
else files.push(child);
}
}
while (!files.empty()) {
delete_file(files.top());
files.pop();
}
while (!folders.empty()) {
inode_mapper.deleteOne(folders.top());
folders.pop();
}
}
std::shared_ptr<std::shared_mutex> fs::get_user_mutex(uint64_t user_id) {
static std::unordered_map<uint64_t, std::shared_ptr<std::shared_mutex>> mutexes;
static std::mutex mutexes_mutex;
std::lock_guard guard(mutexes_mutex);
return (*mutexes.try_emplace(user_id, std::make_shared<std::shared_mutex>()).first).second;
}
trantor::EventLoop* fs::get_zip_loop() {
static bool init_done = false;
static trantor::EventLoopThread loop("ZipEventLoop");
if (!init_done) {
init_done = true;
loop.run();
loop.getLoop()->runEvery(30*60, []{
for (const auto& entry : std::filesystem::directory_iterator("./temp")) {
if (!entry.is_regular_file()) continue;
const std::string file_name = "./temp/" + entry.path().filename().string();
const auto& progress_pos = std::find_if(in_progress_zips.begin(), in_progress_zips.end(),
[&file_name](const std::pair<std::string, std::tuple<std::string, uint64_t, uint64_t>>& entry) {
return std::get<0>(entry.second) == file_name;
}
);
if (progress_pos != in_progress_zips.end()) return;
const auto& zip_map_pos = std::find_if(zip_to_temp_map.begin(), zip_to_temp_map.end(),
[&file_name](const std::pair<std::string, std::string>& entry){
return entry.second == file_name;
}
);
if (zip_map_pos != zip_to_temp_map.end()) return;
std::filesystem::remove(entry.path());
}
});
}
return loop.getLoop();
}
trantor::EventLoop* fs::get_delete_loop() {
static bool init_done = false;
static trantor::EventLoopThread loop("DeleteEventLoop");
if (!init_done) {
init_done = true;
loop.run();
}
return loop.getLoop();
}
void fs::generate_path(db::INode node, std::string& str) {
db::MapperInode inode_mapper(drogon::app().getDbClient());
std::stack<db::INode> path;
path.push(node);
while (node.getParentId() != nullptr) {
node = inode_mapper.findByPrimaryKey(node.getValueOfParentId());
path.push(node);
}
while (!path.empty()) {
const db::INode& seg = path.top();
str += seg.getValueOfName();
if (seg.getValueOfIsFile() == 0) str += "/";
path.pop();
}
}
Json::Value fs::generate_path(db::INode node) {
Json::Value segments = Json::Value(Json::ValueType::arrayValue);
db::MapperInode inode_mapper(drogon::app().getDbClient());
std::stack<db::INode> path;
path.push(node);
while (node.getParentId() != nullptr) {
node = inode_mapper.findByPrimaryKey(node.getValueOfParentId());
path.push(node);
}
while (!path.empty()) {
const db::INode& seg = path.top();
if (seg.getParentId() == nullptr) {
Json::Value json_seg;
json_seg["path"] = "/";
json_seg["node"] = seg.getValueOfId();
segments.append(json_seg);
} else {
Json::Value json_seg;
json_seg["path"] = seg.getValueOfName();
json_seg["node"] = seg.getValueOfId();
segments.append(json_seg);
if (seg.getValueOfIsFile() == 0) {
json_seg.removeMember("node");
json_seg["path"] = "/";
segments.append(json_seg);
}
}
path.pop();
}
Json::Value resp;
resp["segments"] = segments;
return resp;
}
uint64_t fs::calc_total_size(const db::INode& base) {
uint64_t size = 0;
std::stack<db::INode> queue;
queue.push(base);
while (!queue.empty()) {
const db::INode& node = queue.top();
if (node.getValueOfIsFile() == 0) {
auto children = api::fs::get_children(node);
queue.pop();
for (const auto& child : children) {
if (child.getValueOfIsFile() == 0) queue.push(child);
else if (child.getSize()) size += child.getValueOfSize();
}
} else {
size += node.getValueOfSize();
queue.pop();
}
}
return size;
}
void fs::add_to_zip(struct zip_t* zip, const std::string& key, const db::INode& node, const std::string& path) {
if (node.getValueOfIsFile() == 0) {
std::string new_path = path + node.getValueOfName() + "/";
zip_entry_opencasesensitive(zip, new_path.c_str());
zip_entry_close(zip);
auto children = api::fs::get_children(node);
for (const auto& child : children)
add_to_zip(zip, key, child, new_path);
} else {
zip_entry_opencasesensitive(zip, (path + node.getValueOfName()).c_str());
std::ifstream file("./files/" + std::to_string(node.getValueOfId()), std::ifstream::binary);
std::vector<char> buffer(64*1024);
while (!file.eof()) {
file.read(buffer.data(), (std::streamsize)buffer.size());
auto read = file.gcount();
zip_entry_write(zip, buffer.data(), read);
std::get<1>(in_progress_zips[key]) += read;
}
zip_entry_close(zip);
}
}
}

View File

@@ -1,349 +0,0 @@
#include <filesystem>
#include <fstream>
#include <opencv2/opencv.hpp>
#include <botan/base64.h>
#include "controllers/controllers.h"
#include "dto/dto.h"
// https://developer.mozilla.org/en-US/docs/Web/Media/Formats/Image_types#common_image_file_types
const std::unordered_map<std::string, std::string> mime_type_map = {
{ ".apng" , "image/apng" },
{ ".avif" , "image/avif" },
{ ".bmp" , "image/bmp" },
{ ".gif" , "image/gif" },
{ ".jpg" , "image/jpeg" },
{ ".jpeg" , "image/jpeg" },
{ ".jfif" , "image/jpeg" },
{ ".pjpeg", "image/jpeg" },
{ ".pjp" , "image/jpeg" },
{ ".png" , "image/png" },
{ ".svg" , "image/svg" },
{ ".webp" , "image/webp" },
{ ".aac" , "audio/aac" },
{ ".flac" , "audio/flac" },
{ ".mp3" , "audio/mp3" },
{ ".m4a" , "audio/mp4" },
{ ".oga" , "audio/ogg" },
{ ".ogg" , "audio/ogg" },
{ ".wav" , "audio/wav" },
{ ".3gp" , "video/3gpp" },
{ ".mpg" , "video/mpeg" },
{ ".mpeg" , "video/mpeg" },
{ ".mp4" , "video/mp4" },
{ ".m4v" , "video/mp4" },
{ ".m4p" , "video/mp4" },
{ ".ogv" , "video/ogg" },
{ ".mov" , "video/quicktime" },
{ ".webm" , "video/webm" },
{ ".mkv" , "video/x-matroska" },
{ ".mk3d" , "video/x-matroska" },
{ ".mks" , "video/x-matroska" },
{ ".pdf" , "application/pdf" }
};
template<typename InputIt>
std::string join_string(InputIt first, InputIt last, const std::string& separator = ",") {
std::ostringstream result;
if (first != last) {
result << *first;
while (++first != last) {
result << separator << *first;
}
}
return result.str();
}
namespace api {
void fs::root(req_type req, cbk_type cbk) {
db::User user = dto::get_user(req);
cbk(dto::Responses::get_root_res(user.getValueOfRootId()));
}
void fs::node(req_type req, cbk_type cbk, uint64_t node) {
db::User user = dto::get_user(req);
std::shared_lock lock(*get_user_mutex(user.getValueOfId()));
auto inode = get_node_and_validate(user, node);
if (!inode.has_value())
return cbk(dto::Responses::get_badreq_res("Unknown node"));
auto dto_node = dto::Responses::GetNodeEntry(*inode);
std::vector<dto::Responses::GetNodeEntry> children;
if (!dto_node.is_file) for (const db::INode& child : get_children(*inode)) children.emplace_back(child);
cbk(dto::Responses::get_node_res(dto_node, children));
}
void fs::path(req_type req, cbk_type cbk, uint64_t node) {
db::User user = dto::get_user(req);
std::shared_lock lock(*get_user_mutex(user.getValueOfId()));
auto inode = get_node_and_validate(user, node);
if (!inode.has_value())
cbk(dto::Responses::get_badreq_res("Unknown node"));
else {
auto path = generate_path(*inode);
cbk(dto::Responses::get_success_res(path));
}
}
template<bool file>
void fs::create_node_req(req_type req, cbk_type cbk) {
db::User user = dto::get_user(req);
Json::Value& json = *req->jsonObject();
try {
uint64_t parent = dto::json_get<uint64_t>(json, "parent").value();
std::string name = dto::json_get<std::string>(json, "name").value();
std::shared_lock lock(*get_user_mutex(user.getValueOfId()));
auto new_node = create_node(name, user, file, std::make_optional(parent));
if (std::holds_alternative<db::INode>(new_node))
cbk(dto::Responses::get_new_node_res(std::get<db::INode>(new_node).getValueOfId()));
else if (std::holds_alternative<create_node_error>(new_node))
switch (std::get<create_node_error>(new_node)) {
case create_node_error::INVALID_NAME: return cbk(dto::Responses::get_badreq_res("Invalid name"));
case create_node_error::INVALID_PARENT: return cbk(dto::Responses::get_badreq_res("Invalid parent"));
case create_node_error::FILE_PARENT: return cbk(dto::Responses::get_badreq_res("Parent is file"));
}
else {
auto tuple = std::get<std::tuple<bool, uint64_t>>(new_node);
cbk(dto::Responses::get_node_exists_res(std::get<1>(tuple), std::get<0>(tuple)));
}
} catch (const std::exception&) {
cbk(dto::Responses::get_badreq_res("Validation error"));
}
}
void fs::delete_node_req(req_type req, cbk_type cbk, uint64_t node) {
db::User user = dto::get_user(req);
std::unique_lock lock(*get_user_mutex(user.getValueOfId()));
auto inode = get_node_and_validate(user, node);
if (!inode.has_value())
cbk(dto::Responses::get_badreq_res("Unknown node"));
else if (inode->getValueOfParentId() == 0)
cbk(dto::Responses::get_badreq_res("Can't delete root"));
else {
auto chan = std::make_shared<msd::channel<std::string>>();
std::string("Waiting in queue...\n") >> (*chan);
get_delete_loop()->queueInLoop([chan, inode=*inode, user=user.getValueOfId()]{
std::unique_lock lock(*get_user_mutex(user));
delete_node(inode, *chan);
chan->close();
});
cbk(drogon::HttpResponse::newStreamResponse([chan](char* buf, std::size_t size) -> std::size_t{
if (buf == nullptr) return 0;
if (chan->closed() && chan->empty()) return 0;
std::string buffer;
buffer << *chan;
if (buffer.empty()) return 0;
std::size_t read = std::min(size, buffer.size());
std::memcpy(buf, buffer.data(), read); // NOLINT(bugprone-not-null-terminated-result)
return read;
}));
}
}
void fs::upload(req_type req, cbk_type cbk, uint64_t node) {
constexpr int image_height = 256;
db::User user = dto::get_user(req);
std::shared_lock lock(*get_user_mutex(user.getValueOfId()));
auto inode = get_node_and_validate(user, node);
if (!inode.has_value())
return cbk(dto::Responses::get_badreq_res("Unknown node"));
if (inode->getValueOfIsFile() == 0)
return cbk(dto::Responses::get_badreq_res("Can't upload to a directory"));
drogon::MultiPartParser mpp;
if (mpp.parse(req) != 0)
return cbk(dto::Responses::get_badreq_res("Failed to parse files"));
if (mpp.getFiles().size() != 1)
return cbk(dto::Responses::get_badreq_res("Exactly 1 file needed"));
const drogon::HttpFile& file = mpp.getFiles().at(0);
std::filesystem::path p("./files");
p /= std::to_string(inode->getValueOfId());
file.saveAs(p.string());
try {
if (file.fileLength() > 100 * 1024 * 1024) throw std::exception();
std::filesystem::path filename(inode->getValueOfName());
std::string ext = filename.extension().string();
std::transform(ext.begin(), ext.end(), ext.begin(), tolower);
const std::string& mime = mime_type_map.at(ext);
if (!mime.starts_with("image")) throw std::exception();
cv::_InputArray image_arr(file.fileData(), (int) file.fileLength());
cv::Mat image = cv::imdecode(image_arr, cv::IMREAD_COLOR);
if (!image.empty()) {
float h_ration = ((float) image_height) / ((float) image.rows);
cv::Mat preview;
cv::resize(image, preview, cv::Size((int) (((float) image.cols) * h_ration), image_height), 0, 0, cv::INTER_AREA);
cv::imwrite(p.string() + "_preview.png", preview);
inode->setHasPreview(1);
}
} catch (const std::exception&) {}
inode->setSize(file.fileLength());
db::MapperInode inode_mapper(drogon::app().getDbClient());
inode_mapper.update(*inode);
cbk(dto::Responses::get_success_res());
}
void fs::create_zip(req_type req, cbk_type cbk) {
db::User user = dto::get_user(req);
std::shared_lock lock(*get_user_mutex(user.getValueOfId()));
Json::Value& json = *req->jsonObject();
try {
if (!json.isMember("nodes")) throw std::exception();
Json::Value node_arr = json["nodes"];
if (!node_arr.isArray()) throw std::exception();
std::vector<uint64_t> node_ids;
for (const auto& node : node_arr)
node_ids.push_back(node.asUInt64());
std::vector<db::INode> nodes;
std::transform(node_ids.begin(), node_ids.end(), std::back_inserter(nodes), [&user](uint64_t node) {
return api::fs::get_node_and_validate(user, node).value();
});
std::string key = join_string(node_ids.begin(), node_ids.end());
if (zip_to_temp_map.contains(key)) return cbk(dto::Responses::get_create_zip_done_res());
if (in_progress_zips.contains(key)) {
auto progress = in_progress_zips.at(key);
return cbk(dto::Responses::get_create_zip_done_res(std::get<1>(progress), std::get<2>(progress)));
}
std::string file_name = "./temp/fs_" + std::to_string(next_temp_id++) + ".zip";
in_progress_zips.emplace(key, std::make_tuple(file_name, 0, 1));
get_zip_loop()->queueInLoop([key = std::move(key), nodes = std::move(nodes), file_name = std::move(file_name), user=user.getValueOfId()]{
{
std::shared_lock lock(*get_user_mutex(user));
uint64_t size = 0;
for (const auto& node : nodes) size += calc_total_size(node);
std::get<2>(in_progress_zips.at(key)) = size;
struct zip_t* zip = zip_open(file_name.c_str(), ZIP_DEFAULT_COMPRESSION_LEVEL, 'w');
for (const db::INode& node : nodes)
add_to_zip(zip, key, node, "");
zip_close(zip);
}
zip_to_temp_map.emplace(key, file_name);
in_progress_zips.erase(key);
});
return cbk(dto::Responses::get_create_zip_done_res(0, 1));
} catch (const std::exception&) {
cbk(dto::Responses::get_badreq_res("Validation error"));
}
}
void fs::download(req_type req, cbk_type cbk) {
db::User user = dto::get_user(req);
std::shared_lock lock(*get_user_mutex(user.getValueOfId()));
auto node_id = req->getOptionalParameter<uint64_t>("id");
if (!node_id.has_value()) {
cbk(dto::Responses::get_badreq_res("Invalid node"));
return;
}
auto inode = get_node_and_validate(user, *node_id);
if (!inode.has_value()) {
cbk(dto::Responses::get_badreq_res("Invalid node"));
return;
}
if (inode->getValueOfIsFile() != 0) {
std::filesystem::path p("./files");
p /= std::to_string(inode->getValueOfId());
cbk(drogon::HttpResponse::newFileResponse(
p.string(),
inode->getValueOfName()
));
} else {
try {
std::string key = std::to_string(inode->getValueOfId());
std::string file = zip_to_temp_map.at(key);
zip_to_temp_map.erase(key);
cbk(drogon::HttpResponse::newFileResponse(
file,
inode->getValueOfName() + ".zip"
));
} catch (const std::exception&) {
cbk(dto::Responses::get_badreq_res("Invalid node"));
}
}
}
void fs::download_multi(req_type req, cbk_type cbk) {
db::User user = dto::get_user(req);
std::shared_lock lock(*get_user_mutex(user.getValueOfId()));
auto node_ids_str = req->getOptionalParameter<std::string>("id");
if (!node_ids_str.has_value())
return cbk(dto::Responses::get_badreq_res("No nodes"));
std::stringstream node_ids_ss(*node_ids_str);
std::string temp;
try {
while (std::getline(node_ids_ss, temp, ','))
if (!get_node_and_validate(user, std::stoull(temp)).has_value()) throw std::exception();
std::string file = zip_to_temp_map.at(*node_ids_str);
zip_to_temp_map.erase(*node_ids_str);
cbk(drogon::HttpResponse::newFileResponse(
file,
"files.zip"
));
} catch (const std::exception&) {
cbk(dto::Responses::get_badreq_res("Invalid nodes"));
}
}
void fs::download_preview(req_type req, cbk_type cbk, uint64_t node) {
db::User user = dto::get_user(req);
std::shared_lock lock(*get_user_mutex(user.getValueOfId()));
auto inode = get_node_and_validate(user, node);
if (!inode.has_value())
return cbk(dto::Responses::get_badreq_res("Unknown node"));
if (inode->getValueOfHasPreview() == 0)
return cbk(dto::Responses::get_badreq_res("No preview"));
std::filesystem::path p("./files");
p /= std::to_string(inode->getValueOfId()) + "_preview.png";
std::ifstream file(p, std::ios::in | std::ios::binary);
std::vector<uint8_t> image((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
cbk(dto::Responses::get_download_base64_res("data:image/png;base64," + Botan::base64_encode(image)));
}
void fs::get_type(req_type req, cbk_type cbk, uint64_t node){
db::User user = dto::get_user(req);
std::shared_lock lock(*get_user_mutex(user.getValueOfId()));
auto inode = get_node_and_validate(user, node);
if (!inode.has_value())
return cbk(dto::Responses::get_badreq_res("Unknown node"));
std::filesystem::path name(inode->getValueOfName());
std::string ext = name.extension().string();
std::transform(ext.begin(), ext.end(), ext.begin(), tolower);
try {
cbk(dto::Responses::get_type_res(mime_type_map.at(ext)));
} catch (const std::exception&) {
cbk(dto::Responses::get_badreq_res("Invalid file type"));
}
}
}

View File

@@ -1,33 +0,0 @@
#pragma clang diagnostic push
#pragma ide diagnostic ignored "performance-unnecessary-value-param"
#pragma ide diagnostic ignored "readability-convert-member-functions-to-static"
#include "controllers.h"
#include "dto/dto.h"
namespace api {
void user::info(req_type req, cbk_type cbk) {
db::User user = dto::get_user(req);
cbk(dto::Responses::get_user_info_res(
user.getValueOfName(),
user.getValueOfGitlab() != 0,
db::User_getEnumTfaType(user) != db::tfaTypes::NONE)
);
}
void user::delete_user(req_type req, cbk_type cbk) {
db::MapperUser user_mapper(drogon::app().getDbClient());
msd::channel<std::string> chan;
db::User user = dto::get_user(req);
auth::revoke_all(user);
std::unique_lock lock(*fs::get_user_mutex(user.getValueOfId()));
fs::delete_node((fs::get_node(user.getValueOfRootId())).value(), chan, true);
user_mapper.deleteOne(user);
cbk(dto::Responses::get_success_res());
}
}
#pragma clang diagnostic pop

View File

@@ -0,0 +1,157 @@
use diesel::prelude::*;
use crate::db::manager::DB_MANAGER;
pub struct DBConnection {
db: super::RawDBConnection
}
impl From<super::RawDBConnection> for DBConnection {
fn from(conn: super::RawDBConnection) -> Self {
Self {
db: conn
}
}
}
impl DBConnection {
// Users
pub fn create_user_password(
&mut self,
name: String,
password: String
) -> super::User {
let mut new_user: super::User = diesel::insert_into(crate::schema::user::table)
.values(super::user::NewUser {
name,
password,
gitlab: false,
role: super::UserRole::Disabled,
root_id: 0,
tfa_type: super::TfaTypes::None,
tfa_secret: None,
gitlab_at: None,
gitlab_rt: None
})
.get_result(&mut self.db)
.expect("Failed to insert new user");
let root_node = crate::routes::fs::create_node("".to_owned(), &new_user, false, None, true, self).expect("Couldn't create root node");
new_user.root_id = root_node.id;
self.save_user(&new_user);
new_user
}
pub fn create_user_gitlab(
&mut self,
name: String,
role: super::UserRole,
gitlab_at: String,
gitlab_rt: String
) -> super::User {
let mut new_user: super::User = diesel::insert_into(crate::schema::user::table)
.values(super::user::NewUser {
name,
password: "".to_owned(),
gitlab: true,
role,
root_id: 0,
tfa_type: super::TfaTypes::None,
tfa_secret: None,
gitlab_at: Some(gitlab_at),
gitlab_rt: Some(gitlab_rt)
})
.get_result(&mut self.db)
.expect("Failed to insert new user");
let root_node = crate::routes::fs::create_node("".to_owned(), &new_user, false, None, true, self).expect("Couldn't create root node");
new_user.root_id = root_node.id;
self.save_user(&new_user);
new_user
}
pub fn get_user(&mut self, _id: i32) -> Option<super::User> {
use crate::schema::user::dsl::*;
user.find(_id).first(&mut self.db).ok()
}
pub fn find_user(&mut self, _name: &str, _gitlab: bool) -> Option<super::User> {
use crate::schema::user::dsl::*;
user.filter(name.eq(name)).filter(gitlab.eq(_gitlab)).first(&mut self.db).ok()
}
pub fn get_users(&mut self) -> Vec<super::User> {
crate::schema::user::table.load(&mut self.db).expect("Could not load users")
}
pub fn save_user(&mut self, user: &super::User) {
diesel::update(user)
.set(user.clone())
.execute(&mut self.db)
.expect("Failed to save user");
}
pub fn delete_user(&mut self, user: &super::User) {
diesel::delete(user).execute(&mut self.db).expect("Failed to delete user");
}
// Tokens
pub fn create_token(&mut self, _owner: i32, _exp: i64) -> super::Token {
diesel::insert_into(crate::schema::tokens::table)
.values(&super::token::NewToken {
owner_id: _owner,
exp: _exp
})
.get_result(&mut self.db)
.expect("Failed to save new token to database")
}
pub fn get_token(&mut self, _id: i32) -> Option<super::Token> {
use crate::schema::tokens::dsl::*;
tokens.find(_id).first(&mut self.db).ok()
}
pub fn delete_token(&mut self, _id: i32) {
use crate::schema::tokens::dsl::*;
diesel::delete(tokens.find(_id))
.execute(&mut self.db)
.expect("Failed to delete token");
}
pub fn delete_all_tokens(&mut self, _owner: i32) {
use crate::schema::tokens::dsl::*;
diesel::delete(tokens.filter(owner_id.eq(_owner)))
.execute(&mut self.db)
.expect("Failed to delete token");
}
pub fn cleanup_tokens(&mut self) {
use crate::schema::tokens::dsl::*;
let current_time = chrono::Utc::now().timestamp();
diesel::delete(tokens.filter(exp.le(current_time))).execute(&mut self.db).expect("Failed to cleanup tokens");
}
// Nodes
pub async fn get_lock(user: i32) -> std::sync::Arc<tokio::sync::RwLock<()>> {
DB_MANAGER.get_lock(user).await
}
pub fn create_node(&mut self, file: bool, name: String, parent: Option<i32>, owner: i32) -> super::Inode {
DB_MANAGER.create_node(&mut self.db, file, name, parent, owner)
}
pub fn get_node(&mut self, id: i32) -> Option<super::Inode> {
DB_MANAGER.get_node(&mut self.db, id)
}
pub fn get_children(&mut self, id: i32) -> Vec<super::Inode> {
DB_MANAGER.get_children(&mut self.db, id)
}
pub fn save_node(&mut self, node: &super::Inode) {
DB_MANAGER.save_node(&mut self.db, node);
}
pub fn delete_node(&mut self, node: &super::Inode) {
DB_MANAGER.delete_node(&mut self.db, node);
}
}

View File

@@ -1,11 +0,0 @@
#include "db.h"
namespace db {
UserRole User_getEnumRole(const User& user) noexcept {
return (UserRole)user.getValueOfRole();
}
tfaTypes User_getEnumTfaType(const User& user) noexcept {
return (tfaTypes)user.getValueOfTfaType();
}
}

View File

@@ -1,41 +0,0 @@
#ifndef BACKEND_DB_H
#define BACKEND_DB_H
#include <utility>
#include <drogon/utils/coroutine.h>
#include <drogon/drogon.h>
#include "Inode.h"
#include "Tokens.h"
#include "User.h"
namespace db {
enum UserRole : int {
ADMIN = 2,
USER = 1,
DISABLED = 0
};
enum tfaTypes : int {
NONE = 0,
EMAIL = 1,
TOTP = 2
};
using INode = drogon_model::sqlite3::Inode;
using Token = drogon_model::sqlite3::Tokens;
using User = drogon_model::sqlite3::User;
using MapperInode = drogon::orm::Mapper<INode>;
using MapperToken = drogon::orm::Mapper<Token>;
using MapperUser = drogon::orm::Mapper<User>;
using Criteria = drogon::orm::Criteria;
using CompareOps = drogon::orm::CompareOperator;
UserRole User_getEnumRole(const User&) noexcept;
tfaTypes User_getEnumTfaType(const User&) noexcept;
}
#endif //BACKEND_DB_H

26
backend/src/db/inode.rs Normal file
View File

@@ -0,0 +1,26 @@
use diesel::prelude::*;
#[derive(Queryable, Identifiable, Eq, PartialEq, Debug, Associations, AsChangeset, Clone)]
#[diesel(belongs_to(crate::db::User, foreign_key = owner_id))]
#[diesel(table_name = crate::schema::inode)]
#[diesel(treat_none_as_null = true)]
pub struct Inode {
pub id: i32,
pub is_file: bool,
pub name: String,
pub parent_id: Option<i32>,
pub owner_id: i32,
pub size: Option<i64>,
pub has_preview: bool
}
#[derive(Insertable, Debug)]
#[diesel(table_name = crate::schema::inode)]
pub struct NewInode {
pub is_file: bool,
pub name: String,
pub parent_id: Option<i32>,
pub owner_id: i32,
pub size: Option<i64>,
pub has_preview: bool
}

109
backend/src/db/manager.rs Normal file
View File

@@ -0,0 +1,109 @@
use std::collections::HashMap;
use std::sync::Arc;
use lazy_static::lazy_static;
use stretto::Cache;
use tokio::sync::{Mutex, RwLock};
use diesel::prelude::*;
use crate::db::Inode;
lazy_static! {
pub(super) static ref DB_MANAGER: DBManager = DBManager::new();
}
pub(super) struct DBManager {
locks: Mutex<HashMap<i32, Arc<RwLock<()>>>>,
node_cache: Cache<i32, Inode>,
children_cache: Cache<i32, Vec<Inode>>
}
impl DBManager {
fn new() -> Self {
Self {
locks: Mutex::new(HashMap::new()),
node_cache: Cache::new(10000, 1000).expect("Failed to create node cache"),
children_cache: Cache::new(1000, 100).expect("Failed to create child cache")
}
}
pub fn create_node(&self, db: &mut super::RawDBConnection, file: bool, _name: String, parent: Option<i32>, owner: i32) -> Inode {
use crate::schema::inode::dsl::*;
let node: Inode = diesel::insert_into(inode)
.values(crate::db::inode::NewInode {
is_file: file,
name: _name,
parent_id: parent,
owner_id: owner,
size: None,
has_preview: false
})
.get_result(db)
.expect("Failed to insert new inode");
self.node_cache.insert(node.id, node.clone(), 1);
if let Some(parent) = parent {
self.children_cache.remove(&parent);
}
node
}
pub fn get_node(&self, db: &mut super::RawDBConnection, node_id: i32) -> Option<Inode> {
use crate::schema::inode::dsl::*;
let node = self.node_cache.get(&node_id);
match node {
Some(v) => Some(v.value().clone()),
None => {
let v: Inode = inode.find(node_id).first(db).ok()?;
self.node_cache.insert(node_id, v.clone(), 1);
Some(v)
}
}
}
pub fn get_children(&self, db: &mut super::RawDBConnection, node_id: i32) -> Vec<Inode> {
use crate::schema::inode::dsl::*;
let children = self.children_cache.get(&node_id);
match children {
Some(v) => v.value().clone(),
None => {
let v = inode.filter(parent_id.eq(node_id)).load(db).expect("Failed to get children of node");
self.children_cache.insert(node_id, v.clone(), 1);
v
}
}
}
pub fn save_node(&self, db: &mut super::RawDBConnection, node: &Inode) {
self.node_cache.insert(node.id, node.clone(), 1);
diesel::update(node)
.set(node.clone())
.execute(db)
.expect("Failed to save node");
}
pub fn delete_node(&self, db: &mut super::RawDBConnection, node: &Inode) {
if node.is_file {
let file_name = format!("./files/{}", node.id);
let file = std::path::Path::new(&file_name);
let preview_name = format!("./files/{}_preview.jpg", node.id);
let preview = std::path::Path::new(&preview_name);
if file.exists() {
std::fs::remove_file(file).expect("Failed to delete file");
}
if preview.exists() {
std::fs::remove_file(preview).expect("Failed to delete preview");
}
}
diesel::delete(node).execute(db).expect("Failed to delete node");
self.node_cache.remove(&node.id);
self.children_cache.remove(&node.id);
if let Some(p) = node.parent_id { self.children_cache.remove(&p); }
}
pub async fn get_lock(&self, user: i32) -> Arc<RwLock<()>> {
self.locks.lock().await
.entry(user)
.or_insert_with(|| Arc::new(RwLock::new(())))
.clone()
}
}

54
backend/src/db/mod.rs Normal file
View File

@@ -0,0 +1,54 @@
mod inode;
mod token;
mod user;
pub mod manager;
mod connection;
use diesel::connection::SimpleConnection;
use diesel::sqlite::SqliteConnection;
use diesel::r2d2::{ConnectionManager, Pool, PooledConnection};
use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness};
use warp::Filter;
pub use inode::Inode;
pub use token::Token;
pub use user::{User, TfaTypes, UserRole};
use crate::routes::AppError;
type RawDBConnection = PooledConnection<ConnectionManager<SqliteConnection>>;
pub type DBPool = Pool<ConnectionManager<SqliteConnection>>;
pub use connection::DBConnection;
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations");
#[derive(Debug)]
pub struct ConnectionOptions {}
impl diesel::r2d2::CustomizeConnection<SqliteConnection, diesel::r2d2::Error> for ConnectionOptions {
fn on_acquire(&self, conn: &mut SqliteConnection) -> Result<(), diesel::r2d2::Error> {
conn.batch_execute("PRAGMA journal_mode = WAL; PRAGMA synchronous = NORMAL; PRAGMA busy_timeout = 15000;")
.map_err(diesel::r2d2::Error::QueryError)
}
}
pub fn build_pool() -> Pool<ConnectionManager<SqliteConnection>> {
Pool::builder()
.connection_customizer(Box::new(ConnectionOptions {}))
.build(ConnectionManager::<SqliteConnection>::new("sqlite.db"))
.expect("Failed to open db")
}
pub fn run_migrations(db: &mut RawDBConnection) {
db.run_pending_migrations(MIGRATIONS).expect("Failed to run migrations");
}
pub fn with_db(pool: DBPool) -> impl Filter<Extract=(DBConnection, ), Error=warp::reject::Rejection> + Clone {
warp::any()
.map(move || pool.clone())
.and_then(|pool: DBPool| async move {
match pool.get() {
Ok(v) => Ok(DBConnection::from(v)),
Err(_) => AppError::InternalError("Failed to get a database connection").err()
}
})
}

18
backend/src/db/token.rs Normal file
View File

@@ -0,0 +1,18 @@
use diesel::prelude::*;
#[derive(Queryable, Identifiable, Eq, PartialEq, Debug, Associations, AsChangeset)]
#[diesel(belongs_to(crate::db::User, foreign_key = owner_id))]
#[diesel(table_name = crate::schema::tokens)]
#[diesel(treat_none_as_null = true)]
pub struct Token {
pub id: i32,
pub owner_id: i32,
pub exp: i64
}
#[derive(Insertable, Debug)]
#[diesel(table_name = crate::schema::tokens)]
pub struct NewToken {
pub owner_id: i32,
pub exp: i64
}

113
backend/src/db/user.rs Normal file
View File

@@ -0,0 +1,113 @@
use diesel::backend::RawValue;
use diesel::deserialize::{FromSql, FromSqlRow};
use diesel::prelude::*;
use diesel::serialize::{IsNull, Output, ToSql};
use diesel::sql_types::SmallInt;
use diesel::sqlite::Sqlite;
use serde_repr::{Deserialize_repr, Serialize_repr};
#[repr(i16)]
#[derive(Debug, Copy, Clone, Eq, PartialEq, Deserialize_repr, Serialize_repr, FromSqlRow)]
pub enum UserRole {
Disabled = 0,
User = 1,
Admin = 2
}
impl FromSql<SmallInt, Sqlite> for UserRole {
fn from_sql(bytes: RawValue<'_, Sqlite>) -> diesel::deserialize::Result<Self> {
match i16::from_sql(bytes)? {
1 => Ok(UserRole::User),
2 => Ok(UserRole::Admin),
_ => Ok(UserRole::Disabled)
}
}
}
impl ToSql<SmallInt, Sqlite> for UserRole {
fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Sqlite>) -> diesel::serialize::Result {
let val: i16 = (*self).into();
out.set_value(val as i32);
Ok(IsNull::No)
}
}
impl From<UserRole> for i16 {
fn from(v: UserRole) -> Self {
match v {
UserRole::Disabled => 0,
UserRole::User => 1,
UserRole::Admin => 2
}
}
}
#[repr(i16)]
#[derive(Debug, Copy, Clone, Eq, PartialEq, Deserialize_repr, Serialize_repr, FromSqlRow)]
pub enum TfaTypes {
None = 0,
Email = 1,
Totp = 2
}
impl FromSql<SmallInt, Sqlite> for TfaTypes {
fn from_sql(bytes: RawValue<'_, Sqlite>) -> diesel::deserialize::Result<Self> {
match i16::from_sql(bytes)? {
1 => Ok(TfaTypes::Email),
2 => Ok(TfaTypes::Totp),
_ => Ok(TfaTypes::None)
}
}
}
impl ToSql<SmallInt, Sqlite> for TfaTypes {
fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Sqlite>) -> diesel::serialize::Result {
let val: i16 = (*self).into();
out.set_value(val as i32);
Ok(IsNull::No)
}
}
impl From<TfaTypes> for i16 {
fn from(v: TfaTypes) -> Self {
match v {
TfaTypes::None => 0,
TfaTypes::Email => 1,
TfaTypes::Totp => 2
}
}
}
#[derive(Queryable, Identifiable, Eq, PartialEq, Debug, AsChangeset, Clone)]
#[diesel(table_name = crate::schema::user)]
#[diesel(treat_none_as_null = true)]
pub struct User {
pub id: i32,
pub gitlab: bool,
pub name: String,
pub password: String,
#[diesel(serialize_as = i16)]
pub role: UserRole,
pub root_id: i32,
#[diesel(serialize_as = i16)]
pub tfa_type: TfaTypes,
pub tfa_secret: Option<Vec<u8>>,
pub gitlab_at: Option<String>,
pub gitlab_rt: Option<String>
}
#[derive(Insertable, Debug)]
#[diesel(table_name = crate::schema::user)]
pub struct NewUser {
pub gitlab: bool,
pub name: String,
pub password: String,
#[diesel(serialize_as = i16)]
pub role: UserRole,
pub root_id: i32,
#[diesel(serialize_as = i16)]
pub tfa_type: TfaTypes,
pub tfa_secret: Option<Vec<u8>>,
pub gitlab_at: Option<String>,
pub gitlab_rt: Option<String>
}

194
backend/src/dto.rs Normal file
View File

@@ -0,0 +1,194 @@
#[allow(non_snake_case)]
pub mod responses {
use serde::{self, Deserialize, Serialize};
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct Error {
pub statusCode: i32,
pub message: String
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct Success {
pub statusCode: i32
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct Login {
pub statusCode: i32,
pub jwt: String
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct TfaSetup {
pub statusCode: i32,
pub secret: String,
pub qrCode: String
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct UserInfo {
pub statusCode: i32,
pub name: String,
pub gitlab: bool,
pub tfaEnabled: bool
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct AdminUsersEntry {
pub id: i32,
pub gitlab: bool,
pub name: String,
pub role: crate::db::UserRole,
pub tfaEnabled: bool
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct AdminUsers {
pub statusCode: i32,
pub users: Vec<AdminUsersEntry>
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct Root {
pub statusCode: i32,
pub rootId: i32
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct GetNodeEntry {
pub id: i32,
pub name: String,
pub isFile: bool,
pub preview: bool,
pub parent: Option<i32>,
pub size: Option<i64>
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct GetNode {
pub statusCode: i32,
pub id: i32,
pub name: String,
pub isFile: bool,
pub preview: bool,
pub parent: Option<i32>,
pub size: Option<i64>,
pub children: Option<Vec<GetNodeEntry>>
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct NewNode {
pub statusCode: i32,
pub id: i32
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct NodeExists {
pub statusCode: i32,
pub id: i32,
pub exists: bool,
pub isFile: bool
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct DownloadBase64 {
pub statusCode: i32,
pub data: String
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct Type {
pub statusCode: i32,
#[serde(rename = "type")]
pub _type: String
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct CreateZipDone {
pub statusCode: i32,
pub done: bool,
pub progress: Option<u64>,
pub total: Option<u64>
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct GetPathSegment {
pub path: String,
pub node: Option<i32>
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct GetPath {
pub segments: Vec<GetPathSegment>
}
}
#[allow(non_snake_case)]
pub mod requests {
use serde::{self, Deserialize, Serialize};
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct Admin {
pub user: i32
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct AdminSetRole {
pub user: i32,
pub role: crate::db::UserRole
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct SignUp {
pub username: String,
pub password: String
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct Login {
pub username: String,
pub password: String,
pub otp: Option<String>
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct TfaSetup {
pub mail: bool
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct TfaComplete {
pub mail: bool,
pub code: String
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct ChangePassword {
pub oldPassword: String,
pub newPassword: String
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct CreateNode {
pub parent: i32,
pub name: String
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct CreateZip {
pub nodes: Vec<i32>
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct Download {
pub jwtToken: String,
pub id: i32
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct DownloadMulti {
pub jwtToken: String,
pub id: String
}
}

View File

@@ -1,69 +0,0 @@
#ifndef BACKEND_DTO_H
#define BACKEND_DTO_H
#include <drogon/HttpResponse.h>
#include "db/db.h"
namespace dto {
template<typename T>
std::optional<T> json_get(const Json::Value& j, const std::string& key) {
return j.isMember(key)
? std::make_optional(j[key].as<T>())
: std::nullopt;
}
inline db::User get_user(const drogon::HttpRequestPtr& req) {
return req->attributes()->get<db::User>("user");
}
inline db::Token get_token(const drogon::HttpRequestPtr& req) {
return req->attributes()->get<db::Token>("token");
}
namespace Responses {
struct GetUsersEntry {
GetUsersEntry(uint64_t id, bool gitlab, bool tfa, std::string name, db::UserRole role)
: id(id), gitlab(gitlab), tfa(tfa), name(std::move(name)), role(role) {}
uint64_t id;
bool gitlab, tfa;
std::string name;
db::UserRole role;
};
struct GetNodeEntry {
explicit GetNodeEntry(const db::INode& node) : id(node.getValueOfId()), name(node.getValueOfName()), is_file(node.getValueOfIsFile() != 0), has_preview(node.getValueOfHasPreview() != 0), parent(node.getParentId()) {
if (node.getValueOfIsFile() != 0) size = node.getValueOfSize();
}
uint64_t id, size;
std::string name;
bool is_file, has_preview;
std::shared_ptr<uint64_t> parent;
};
drogon::HttpResponsePtr get_error_res(drogon::HttpStatusCode, const std::string &msg);
drogon::HttpResponsePtr get_success_res();
drogon::HttpResponsePtr get_success_res(Json::Value &);
inline drogon::HttpResponsePtr get_badreq_res(const std::string &msg) { return get_error_res(drogon::HttpStatusCode::k400BadRequest, msg); }
inline drogon::HttpResponsePtr get_unauth_res(const std::string &msg) { return get_error_res(drogon::HttpStatusCode::k401Unauthorized, msg); }
inline drogon::HttpResponsePtr get_forbdn_res(const std::string &msg) { return get_error_res(drogon::HttpStatusCode::k403Forbidden, msg); }
drogon::HttpResponsePtr get_login_res(const std::string &jwt);
drogon::HttpResponsePtr get_tfa_setup_res(const std::string& secret, const std::string& qrcode);
drogon::HttpResponsePtr get_user_info_res(const std::string& name, bool gitlab, bool tfa);
drogon::HttpResponsePtr get_admin_users_res(const std::vector<GetUsersEntry>& users);
drogon::HttpResponsePtr get_root_res(uint64_t root);
drogon::HttpResponsePtr get_node_res(const GetNodeEntry& node, const std::vector<GetNodeEntry>& children);
drogon::HttpResponsePtr get_new_node_res(uint64_t id);
drogon::HttpResponsePtr get_node_exists_res(uint64_t id, bool file);
drogon::HttpResponsePtr get_download_base64_res(const std::string& data);
drogon::HttpResponsePtr get_type_res(const std::string& type);
drogon::HttpResponsePtr get_create_zip_done_res();
drogon::HttpResponsePtr get_create_zip_done_res(uint64_t progress, uint64_t total);
}
}
#endif //BACKEND_DTO_H

View File

@@ -1,126 +0,0 @@
#include "dto.h"
namespace dto::Responses {
drogon::HttpResponsePtr get_error_res(drogon::HttpStatusCode code, const std::string& msg) {
Json::Value json;
json["statusCode"] = static_cast<int>(code);
json["message"] = msg;
auto res = drogon::HttpResponse::newHttpJsonResponse(json);
res->setStatusCode(code);
return res;
}
drogon::HttpResponsePtr get_success_res() {
Json::Value json;
return get_success_res(json);
}
drogon::HttpResponsePtr get_success_res(Json::Value& json) {
json["statusCode"] = 200;
auto res = drogon::HttpResponse::newHttpJsonResponse(json);
res->setStatusCode(drogon::HttpStatusCode::k200OK);
return res;
}
drogon::HttpResponsePtr get_login_res(const std::string &jwt) {
Json::Value json;
json["jwt"] = jwt;
return get_success_res(json);
}
drogon::HttpResponsePtr get_tfa_setup_res(const std::string& secret, const std::string& qrcode) {
Json::Value json;
json["secret"] = secret;
json["qrCode"] = qrcode;
return get_success_res(json);
}
drogon::HttpResponsePtr get_user_info_res(const std::string &name, bool gitlab, bool tfa) {
Json::Value json;
json["name"] = name;
json["gitlab"] = gitlab;
json["tfaEnabled"] = tfa;
return get_success_res(json);
}
drogon::HttpResponsePtr get_admin_users_res(const std::vector<GetUsersEntry>& users) {
Json::Value json;
for (const GetUsersEntry& user : users) {
Json::Value entry;
entry["id"] = user.id;
entry["gitlab"] = user.gitlab;
entry["name"] = user.name;
entry["role"] = user.role;
entry["tfaEnabled"] = user.tfa;
json["users"].append(entry);
}
return get_success_res(json);
}
drogon::HttpResponsePtr get_root_res(uint64_t root) {
Json::Value json;
json["rootId"] = root;
return get_success_res(json);
}
Json::Value parse_node(const GetNodeEntry& node) {
Json::Value json;
json["id"] = node.id;
json["name"] = node.name;
json["isFile"] = node.is_file;
json["preview"] = node.has_preview;
json["parent"] = (node.parent != nullptr) ? *node.parent : Json::Value::nullSingleton();
if (node.is_file) json["size"] = node.size;
return json;
}
drogon::HttpResponsePtr get_node_res(const GetNodeEntry& node, const std::vector<GetNodeEntry>& children) {
Json::Value json = parse_node(node);
if (!node.is_file) {
json["children"] = Json::Value(Json::arrayValue);
for (const GetNodeEntry& child : children)
json["children"].append(parse_node(child));
}
return get_success_res(json);
}
drogon::HttpResponsePtr get_new_node_res(uint64_t id) {
Json::Value json;
json["id"] = id;
return get_success_res(json);
}
drogon::HttpResponsePtr get_node_exists_res(uint64_t id, bool file) {
Json::Value json;
json["id"] = id;
json["exists"] = true;
json["isFile"] = file;
return get_success_res(json);
}
drogon::HttpResponsePtr get_download_base64_res(const std::string &data) {
Json::Value json;
json["data"] = data;
return get_success_res(json);
}
drogon::HttpResponsePtr get_type_res(const std::string &type) {
Json::Value json;
json["type"] = type;
return get_success_res(json);
}
drogon::HttpResponsePtr get_create_zip_done_res() {
Json::Value json;
json["done"] = true;
return get_success_res(json);
}
drogon::HttpResponsePtr get_create_zip_done_res(uint64_t progress, uint64_t total) {
Json::Value json;
json["done"] = false;
json["progress"] = progress;
json["total"] = total;
return get_success_res(json);
}
}

View File

@@ -1,82 +0,0 @@
#include "filters.h"
#include <drogon/utils/coroutine.h>
#include <jwt-cpp/traits/kazuho-picojson/traits.h>
#include <jwt-cpp/jwt.h>
#include "db/db.h"
#include "dto/dto.h"
#include "controllers/controllers.h"
void cleanup_tokens(db::MapperToken& mapper) {
const uint64_t now = std::chrono::duration_cast<std::chrono::seconds>(std::chrono::system_clock::now().time_since_epoch()).count();
mapper.deleteBy(
db::Criteria(db::Token::Cols::_exp, db::CompareOps::LE, now)
);
}
void Login::doFilter(const drogon::HttpRequestPtr& req, drogon::FilterCallback&& cb, drogon::FilterChainCallback&& ccb) {
std::string token_str;
if (req->path() == "/api/fs/download" || req->path() == "/api/fs/download_multi") {
token_str = req->getParameter("jwtToken");
} else {
std::string auth_header = req->getHeader("Authorization");
if (auth_header.empty() || (!auth_header.starts_with("Bearer ")))
return cb(dto::Responses::get_unauth_res("Unauthorized"));
token_str = auth_header.substr(7);
}
try {
auto token = jwt::decode<jwt::traits::kazuho_picojson>(token_str);
jwt::verify<jwt::traits::kazuho_picojson>()
.allow_algorithm(jwt::algorithm::hs256{api::auth::get_jwt_secret()})
.verify(token);
uint64_t token_id = token.get_payload_claim("jti").as_int();
uint64_t user_id = token.get_payload_claim("sub").as_int();
auto db = drogon::app().getDbClient();
db::MapperUser user_mapper(db);
db::MapperToken token_mapper(db);
cleanup_tokens(token_mapper);
db::Token db_token = token_mapper.findByPrimaryKey(token_id);
db::User db_user = user_mapper.findByPrimaryKey(db_token.getValueOfOwnerId());
if (db_user.getValueOfId() != user_id) throw std::exception();
if (db::User_getEnumRole(db_user) == db::UserRole::DISABLED) throw std::exception();
if (db_user.getValueOfGitlab() != 0) {
auto info = api::auth::get_gitlab_user(db_user.getValueOfGitlabAt());
if (!info.has_value()) {
auto tokens = api::auth::get_gitlab_tokens(db_user.getValueOfGitlabRt(), true);
info = api::auth::get_gitlab_user(tokens->at);
if (!tokens.has_value() || !info.has_value()) {
api::auth::revoke_all(db_user);
throw std::exception();
}
db_user.setGitlabAt(tokens->at);
db_user.setGitlabRt(tokens->rt);
user_mapper.update(db_user);
}
if (info->name != db_user.getValueOfName()) {
api::auth::revoke_all(db_user);
throw std::exception();
}
}
req->attributes()->insert("token", db_token);
req->attributes()->insert("user", db_user);
ccb();
} catch (const std::exception&) {
cb(dto::Responses::get_unauth_res("Unauthorized"));
}
}
void Admin::doFilter(const drogon::HttpRequestPtr& req, drogon::FilterCallback&& cb, drogon::FilterChainCallback&& ccb) {
db::User user = dto::get_user(req);
if (db::User_getEnumRole(user) != db::UserRole::ADMIN)
cb(dto::Responses::get_forbdn_res("Forbidden"));
else
ccb();
}

View File

@@ -1,14 +0,0 @@
#ifndef BACKEND_FILTERS_H
#define BACKEND_FILTERS_H
#include <drogon/HttpFilter.h>
struct Login : public drogon::HttpFilter<Login> {
void doFilter(const drogon::HttpRequestPtr&, drogon::FilterCallback&&, drogon::FilterChainCallback&&) override;
};
struct Admin : public drogon::HttpFilter<Admin> {
void doFilter(const drogon::HttpRequestPtr&, drogon::FilterCallback&&, drogon::FilterChainCallback&&) override;
};
#endif //BACKEND_FILTERS_H

View File

@@ -1,197 +0,0 @@
#include <filesystem>
#include <fstream>
#include <drogon/drogon.h>
#include "dto/dto.h"
bool dev_mode = false;
void cleanup() {
std::cout << "Stopping..." << std::endl;
drogon::app().quit();
std::cout << "Cleanup up uploads...";
std::filesystem::remove_all("uploads");
std::cout << " [Done]" << std::endl;
std::cout << "Removing temp folder..." << std::flush;
std::filesystem::remove_all("temp");
std::cout << " [Done]" << std::endl;
std::cout << "Goodbye!" << std::endl;
}
std::string get_index_content() {
std::ifstream file("./static/index.html");
return {std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>()};
}
void default_handler(const drogon::HttpRequestPtr& req, std::function<void(const drogon::HttpResponsePtr&)>&& cbk) {
static std::string index_html = get_index_content();
if (req->path().starts_with("/api")) {
std::cout << "Unknown api request: " << req->getMethodString() << " " << req->path() << std::endl;
cbk(drogon::HttpResponse::newNotFoundResponse());
} else {
if (dev_mode) cbk(drogon::HttpResponse::newFileResponse("./static/index.html"));
else cbk(drogon::HttpResponse::newFileResponse((unsigned char*)index_html.data(), index_html.size(), "", drogon::CT_TEXT_HTML));
}
}
int main(int argc, char* argv[]) {
std::vector<std::string> args(argv+1, argv+argc);
if (std::find(args.begin(), args.end(), "--dev") != args.end()) dev_mode = true;
if (dev_mode) std::cout << "Starting in development mode" << std::endl;
std::cout << "Setting up..." << std::endl;
if (!std::filesystem::exists("files")) {
std::cout << "Creating files..." << std::flush;
std::filesystem::create_directory("files");
std::cout << " [Done]" << std::endl;
}
if (!std::filesystem::exists("logs")) {
std::cout << "Creating logs..." << std::flush;
std::filesystem::create_directory("logs");
std::cout << " [Done]" << std::endl;
}
if (std::filesystem::exists("temp")) {
std::cout << "Removing existing temp folder..." << std::flush;
std::filesystem::remove_all("temp");
std::cout << " [Done]" << std::endl;
}
std::cout << "Creating temp folder..." << std::flush;
std::filesystem::create_directory("temp");
std::cout << " [Done]" << std::endl;
auto* loop = drogon::app().getLoop();
loop->queueInLoop([]{
std::cout << "Starting..." << std::endl;
std::cout << "Creating db tables..." << std::flush;
auto db = drogon::app().getDbClient();
db->execSqlSync("CREATE TABLE IF NOT EXISTS 'tokens' (\n"
" 'id' INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n"
" 'owner_id' INTEGER NOT NULL,\n"
" 'exp' INTEGER NOT NULL\n"
")");
db->execSqlSync("CREATE TABLE IF NOT EXISTS 'user' (\n"
" 'id' INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n"
" 'gitlab' INTEGER NOT NULL,\n"
" 'name' TEXT NOT NULL,\n"
" 'password' TEXT NOT NULL,\n"
" 'role' INTEGER NOT NULL,\n"
" 'root_id' INTEGER NOT NULL,\n"
" 'tfa_type' INTEGER NOT NULL,\n"
" 'tfa_secret' BLOB,\n"
" 'gitlab_at' TEXT,\n"
" 'gitlab_rt' TEXT\n"
")");
db->execSqlSync("CREATE TABLE IF NOT EXISTS 'inode' (\n"
" 'id' INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n"
" 'is_file' INTEGER NOT NULL,\n"
" 'name' TEXT,\n"
" 'parent_id' INTEGER,\n"
" 'owner_id' INTEGER NOT NULL,\n"
" 'size' INTEGER,\n"
" 'has_preview' INTEGER NOT NULL\n"
")");
std::cout << " [Done]" << std::endl;
std::cout << "Started!" << std::endl;
std::cout << "Registered paths: " << std::endl;
auto handlers = drogon::app().getHandlersInfo();
for (const auto& handler : handlers) {
std::cout << " ";
if (std::get<1>(handler) == drogon::HttpMethod::Post) std::cout << "POST ";
else std::cout << "GET ";
std::string func = std::get<2>(handler).substr(16);
func.resize(30, ' ');
std::cout << '[' << func << "] ";
std::cout << std::get<0>(handler) << std::endl;
}
std::cout << "Listening on:" << std::endl;
auto listeners = drogon::app().getListeners();
for (const auto& listener : listeners) {
std::cout << " " << listener.toIpPort() << std::endl;
}
});
Json::Value access_logger;
access_logger["name"] = "drogon::plugin::AccessLogger";
Json::Value smtp_mail;
smtp_mail["name"] = "SMTPMail";
Json::Value config;
config["plugins"].append(access_logger);
config["plugins"].append(smtp_mail);
if (!std::filesystem::exists("config.json")) {
std::cerr << "config.json missing" << std::endl;
return 1;
}
std::ifstream config_file("config.json");
config_file >> config["custom_config"];
if (!config["custom_config"].isObject()) {
std::cerr << "config.json must be an object" << std::endl;
return 1;
}
if (!config["custom_config"].isMember("gitlab_id")) {
std::cerr << "config.json missing gitlab_id" << std::endl;
return 1;
}
if (!config["custom_config"].isMember("gitlab_secret")) {
std::cerr << "config.json missing gitlab_secret" << std::endl;
return 1;
}
if (!config["custom_config"].isMember("gitlab_url")) {
std::cerr << "config.json missing gitlab_url" << std::endl;
return 1;
}
if (!config["custom_config"].isMember("gitlab_api_url")) {
std::cerr << "config.json missing gitlab_api_url" << std::endl;
return 1;
}
if (!config["custom_config"].isMember("gitlab_redirect_url")) {
std::cerr << "config.json missing gitlab_redirect_url" << std::endl;
return 1;
}
if (!config["custom_config"].isMember("smtp_server")) {
std::cerr << "config.json missing smtp_server" << std::endl;
return 1;
}
if (!config["custom_config"].isMember("smtp_port")) {
std::cerr << "config.json missing smtp_port" << std::endl;
return 1;
}
if (!config["custom_config"].isMember("smtp_user")) {
std::cerr << "config.json missing smtp_user" << std::endl;
return 1;
}
if (!config["custom_config"].isMember("smtp_password")) {
std::cerr << "config.json missing smtp_password" << std::endl;
return 1;
}
drogon::app()
.setClientMaxBodySize(std::numeric_limits<size_t>::max())
.loadConfigJson(config)
.createDbClient("sqlite3", "", 0, "", "", "", 1, "sqlite.db")
.setDefaultHandler(default_handler)
.setDocumentRoot("./static")
.setBrStatic(true)
.setStaticFilesCacheTime(dev_mode ? -1 : 0)
.setLogPath("./logs")
.setLogLevel(trantor::Logger::LogLevel::kDebug)
.setIntSignalHandler(cleanup)
.setTermSignalHandler(cleanup)
.addListener("0.0.0.0", 2345)
.setThreadNum(8);
std::cout << "Setup done!" << std::endl;
drogon::app().run();
}

34
backend/src/main.rs Normal file
View File

@@ -0,0 +1,34 @@
mod db;
mod schema;
mod dto;
mod routes;
mod config;
#[tokio::main]
async fn main() {
console_subscriber::init();
pretty_env_logger::formatted_builder().filter_level(log::LevelFilter::Info).init();
let _ = config::CONFIG;
let pool: db::DBPool = db::build_pool();
db::run_migrations(&mut pool.get().unwrap());
if !std::path::Path::new("files").exists() {
std::fs::create_dir("files").expect("Failed to create files directory");
}
let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel();
let (_addr, server) = warp::serve(routes::build_routes(pool.clone())).bind_with_graceful_shutdown(([0, 0, 0, 0], 2345), async {
shutdown_rx.await.ok();
});
tokio::task::spawn(server);
tokio::signal::ctrl_c().await.expect("Failed to wait for ctrl-c");
println!("Quitting");
shutdown_tx.send(()).expect("Failed to shutdown server");
}

129
backend/src/routes/admin.rs Normal file
View File

@@ -0,0 +1,129 @@
use warp::{Filter, Reply};
use crate::db::{DBConnection, DBPool, with_db};
use crate::dto;
use crate::routes::{AppError, get_reply};
use crate::routes::filters::{admin, UserInfo};
pub fn build_routes(db: DBPool) -> impl Filter<Extract = impl Reply, Error = warp::Rejection> + Clone {
let users = warp::path!("admin" / "users")
.and(warp::get())
.and(admin(db.clone()))
.and(with_db(db.clone()))
.and_then(users);
let set_role = warp::path!("admin" / "set_role")
.and(warp::post())
.and(warp::body::json())
.and(admin(db.clone()))
.and(with_db(db.clone()))
.and_then(set_role);
let logout = warp::path!("admin" / "logout")
.and(warp::post())
.and(warp::body::json())
.and(admin(db.clone()))
.and(with_db(db.clone()))
.and_then(logout);
let delete_user = warp::path!("admin" / "delete")
.and(warp::post())
.and(warp::body::json())
.and(admin(db.clone()))
.and(with_db(db.clone()))
.and_then(delete_user);
let disable_2fa = warp::path!("admin" / "disable_2fa")
.and(warp::post())
.and(warp::body::json())
.and(admin(db.clone()))
.and(with_db(db.clone()))
.and_then(disable_2fa);
let is_admin = warp::path!("admin" / "is_admin")
.and(warp::get())
.and(admin(db.clone()))
.and_then(|_| async { get_reply(&dto::responses::Success {
statusCode: 200
}) });
let get_token = warp::path!("admin" / "get_token" / i32)
.and(warp::get())
.and(admin(db.clone()))
.and(with_db(db))
.and_then(get_token);
users.or(set_role).or(logout).or(delete_user).or(disable_2fa).or(is_admin).or(get_token)
}
async fn users(_: UserInfo, mut db: DBConnection) -> Result<impl Reply, warp::Rejection> {
let users = db.get_users();
let mut res = dto::responses::AdminUsers {
statusCode: 200,
users: Vec::new()
};
for user in users {
res.users.push(dto::responses::AdminUsersEntry {
id: user.id,
gitlab: user.gitlab,
name: user.name,
role: user.role,
tfaEnabled: user.tfa_type != crate::db::TfaTypes::None
});
}
get_reply(&res)
}
async fn set_role(data: dto::requests::AdminSetRole, _: UserInfo, mut db: DBConnection) -> Result<impl Reply, warp::Rejection> {
let mut user = db.get_user(data.user)
.ok_or(AppError::Forbidden("Invalid user"))?;
user.role = data.role;
db.save_user(&user);
get_reply(&dto::responses::Success {
statusCode: 200
})
}
async fn logout(data: dto::requests::Admin, _: UserInfo, mut db: DBConnection) -> Result<impl Reply, warp::Rejection> {
db.delete_all_tokens(data.user);
get_reply(&dto::responses::Success {
statusCode: 200
})
}
async fn delete_user(data: dto::requests::Admin, _: UserInfo, mut db: DBConnection) -> Result<impl Reply, warp::Rejection> {
let user = db.get_user(data.user)
.ok_or(AppError::Forbidden("Invalid user"))?;
db.delete_all_tokens(data.user);
let root_node = super::fs::get_node_and_validate(&user, user.root_id, &mut db).expect("Failed to get root node for deleting");
super::fs::delete_node_root(&root_node, &mut db);
db.delete_user(&user);
get_reply(&dto::responses::Success {
statusCode: 200
})
}
async fn disable_2fa(data: dto::requests::Admin, _: UserInfo, mut db: DBConnection) -> Result<impl Reply, warp::Rejection> {
let mut user = db.get_user(data.user)
.ok_or(AppError::Forbidden("Invalid user"))?;
user.tfa_type = crate::db::TfaTypes::None;
db.save_user(&user);
get_reply(&dto::responses::Success {
statusCode: 200
})
}
async fn get_token(user: i32, _: UserInfo, mut db: DBConnection) -> Result<impl Reply, warp::Rejection> {
let user = db.get_user(user)
.ok_or(AppError::Forbidden("Invalid user"))?;
get_reply(&dto::responses::Login {
statusCode: 200,
jwt: super::auth::get_token(&user, &mut db)
})
}

View File

@@ -0,0 +1,115 @@
use warp::Filter;
use crate::db::{DBConnection, DBPool, with_db};
use crate::db::{TfaTypes, UserRole};
use crate::dto;
use crate::dto::requests::ChangePassword;
use crate::routes::{AppError, get_reply};
use crate::routes::filters::{authenticated, UserInfo};
pub fn build_routes(db: DBPool) -> impl Filter<Extract = impl warp::Reply, Error = warp::Rejection> + Clone {
let login = warp::path!("auth" / "login")
.and(warp::post())
.and(warp::body::json())
.and(with_db(db.clone()))
.and_then(login);
let signup = warp::path!("auth" / "signup")
.and(warp::post())
.and(warp::body::json())
.and(with_db(db.clone()))
.and_then(signup);
let refresh = warp::path!("auth" / "refresh")
.and(warp::post())
.and(authenticated(db.clone()))
.and(with_db(db.clone()))
.and_then(refresh);
let logout_all = warp::path!("auth" / "logout_all")
.and(warp::post())
.and(authenticated(db.clone()))
.and(with_db(db.clone()))
.and_then(logout_all);
let change_password = warp::path!("auth" / "change_password")
.and(warp::post())
.and(warp::body::json())
.and(authenticated(db.clone()))
.and(with_db(db))
.and_then(change_password);
login.or(signup).or(refresh).or(logout_all).or(change_password)
}
async fn login(data: dto::requests::Login, mut db: DBConnection)
-> Result<impl warp::Reply, warp::Rejection> {
let user = db.find_user(&data.username, false)
.ok_or(AppError::Unauthorized("Invalid username or password"))?;
if !argon2::verify_encoded(user.password.as_str(), data.password.as_bytes()).unwrap_or(false) {
return AppError::Unauthorized("Invalid username or password").err();
}
if user.role == UserRole::Disabled {
return AppError::Unauthorized("Account is disabled").err();
}
if user.tfa_type != TfaTypes::None {
if let Some(otp) = data.otp {
if !super::tfa::verify2fa(&user, otp) {
return AppError::Unauthorized("Incorrect 2fa").err();
}
} else {
if user.tfa_type == TfaTypes::Email { super::tfa::send_2fa_mail(&user); }
return get_reply(&dto::responses::Success {
statusCode: 200
});
}
}
get_reply(&dto::responses::Login {
statusCode: 200,
jwt: super::get_token(&user, &mut db)
})
}
async fn signup(data: dto::requests::SignUp, mut db: DBConnection)
-> Result<impl warp::Reply, warp::Rejection> {
if db.find_user(&data.username, false).is_some() {
return AppError::BadRequest("Username is already taken").err();
}
db.create_user_password(data.username, super::hash_password(&data.password));
get_reply(&dto::responses::Success {
statusCode: 200
})
}
async fn refresh(info: UserInfo, mut db: DBConnection) -> Result<impl warp::Reply, warp::Rejection> {
db.delete_token(info.1.id);
get_reply(&dto::responses::Login {
statusCode: 200,
jwt: super::get_token(&info.0, &mut db)
})
}
async fn logout_all(info: UserInfo, mut db: DBConnection) -> Result<impl warp::Reply, warp::Rejection> {
db.delete_all_tokens(info.0.id);
get_reply(&dto::responses::Success {
statusCode: 200
})
}
async fn change_password(data: ChangePassword, mut info: UserInfo, mut db: DBConnection) -> Result<impl warp::Reply, warp::Rejection> {
if !argon2::verify_encoded(info.0.password.as_str(), data.oldPassword.as_bytes()).unwrap_or(false) {
return AppError::Unauthorized("Old password is wrong").err();
}
info.0.password = super::hash_password(&data.newPassword);
db.save_user(&info.0);
db.delete_all_tokens(info.0.id);
get_reply(&dto::responses::Success {
statusCode: 200
})
}

View File

@@ -0,0 +1,108 @@
use cached::proc_macro::cached;
use lazy_static::lazy_static;
use warp::{Filter, Reply};
use crate::config::CONFIG;
use crate::db::{DBConnection, DBPool, with_db};
use crate::routes::AppError;
#[derive(serde::Deserialize, Clone, Debug)]
pub struct GitlabTokens {
pub access_token: String,
pub refresh_token: String
}
#[derive(serde::Deserialize, Clone, Debug)]
pub struct GitlabUser {
pub username: String,
pub is_admin: bool
}
#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)]
pub struct GitlabCallbackQuery {
pub code: String
}
lazy_static! {
static ref REDIRECT_URL: String = CONFIG.gitlab_redirect_url.clone() + "/api/auth/gitlab_callback";
static ref TOKEN_URL: String = format!("{}/oauth/token", CONFIG.gitlab_api_url.clone());
static ref USER_URL: String = format!("{}/api/v4/user", CONFIG.gitlab_api_url.clone());
static ref AUTHORIZE_URL: String = format!("{}/oauth/authorize", CONFIG.gitlab_url.clone());
}
pub fn get_gitlab_token(code_or_token: String, token: bool) -> Option<GitlabTokens> {
let mut req = ureq::post(&TOKEN_URL)
.query("redirect_uri", &REDIRECT_URL)
.query("client_id", &CONFIG.gitlab_id)
.query("client_secret", &CONFIG.gitlab_secret);
if token {
req = req
.query("refresh_token", &code_or_token)
.query("grant_type", "refresh_token");
} else {
req = req
.query("code", &code_or_token)
.query("grant_type", "authorization_code");
}
req.call().ok()?.into_json().ok()
}
#[cached(time=300, time_refresh=false, option=true)]
pub fn get_gitlab_user(token: String) -> Option<GitlabUser> {
ureq::get(&USER_URL)
.set("Authorization", &format!("Bearer {}", token))
.call()
.ok()?
.into_json().ok()
}
pub fn build_routes(db: DBPool) -> impl Filter<Extract = impl Reply, Error = warp::Rejection> + Clone {
let gitlab = warp::path!("auth" / "gitlab")
.and(warp::get())
.and_then(gitlab);
let gitlab_callback = warp::path!("auth" / "gitlab_callback")
.and(warp::get())
.and(warp::query::query::<GitlabCallbackQuery>())
.and(with_db(db))
.and_then(gitlab_callback);
gitlab.or(gitlab_callback)
}
async fn gitlab() -> Result<impl Reply, warp::Rejection> {
let uri = format!("{}?redirect_uri={}&client_id={}&scope=read_user&response_type=code", AUTHORIZE_URL.as_str(), REDIRECT_URL.as_str(), CONFIG.gitlab_id);
Ok(warp::redirect::found(uri.parse::<warp::http::Uri>().expect("Failed to parse gitlab auth uri")))
}
async fn gitlab_callback(code: GitlabCallbackQuery, mut db: DBConnection) -> Result<impl Reply, warp::Rejection> {
use crate::db::UserRole;
let tokens = get_gitlab_token(code.code, false).ok_or(AppError::Unauthorized("Invalid code"))?;
let gitlab_user = get_gitlab_user(tokens.access_token.clone()).ok_or(AppError::Unauthorized("Invalid code"))?;
let user = db.find_user(&gitlab_user.username, true);
let user = match user {
Some(mut v) => {
v.gitlab_at = Some(tokens.access_token);
v.gitlab_rt = Some(tokens.refresh_token);
db.save_user(&v);
v
},
None => {
db.create_user_gitlab(
gitlab_user.username,
if gitlab_user.is_admin { UserRole::Admin } else { UserRole::Disabled },
tokens.access_token,
tokens.refresh_token
)
}
};
if user.role == UserRole::Disabled {
Ok(warp::reply::html("<!DOCTYPE html><html><h2>Your account is disabled, please contact an admin.<br/><a href=\"/login\">Go to login page</a></h2></html>").into_response())
} else {
let uri = format!("/set_token?token={}", super::get_token(&user, &mut db));
Ok(warp::redirect::found(uri.parse::<warp::http::Uri>().expect("Failed to parse set_token uri")).into_response())
}
}

View File

@@ -0,0 +1,75 @@
mod basic;
mod tfa;
pub mod gitlab;
use std::ops::Add;
use lazy_static::lazy_static;
use ring::rand;
use ring::rand::SecureRandom;
use warp::Filter;
use crate::db::DBPool;
pub fn build_routes(db: DBPool) -> impl Filter<Extract = impl warp::Reply, Error = warp::Rejection> + Clone {
SEC_RANDOM.fill(&mut [0; 1]).expect("Failed to init secure random");
basic::build_routes(db.clone())
.or(tfa::build_routes(db.clone()))
.or(gitlab::build_routes(db))
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
pub struct JWTClaims {
pub exp: i64,
pub iat: i64,
pub jti: i32,
pub sub: i32
}
pub static JWT_ALGORITHM: jsonwebtoken::Algorithm = jsonwebtoken::Algorithm::HS512;
lazy_static! {
pub static ref SEC_RANDOM: rand::SystemRandom = rand::SystemRandom::new();
pub static ref JWT_SECRET: Vec<u8> = get_jwt_secret();
pub static ref JWT_DECODE_KEY: jsonwebtoken::DecodingKey = jsonwebtoken::DecodingKey::from_secret(JWT_SECRET.as_slice());
pub static ref JWT_ENCODE_KEY: jsonwebtoken::EncodingKey = jsonwebtoken::EncodingKey::from_secret(JWT_SECRET.as_slice());
}
fn get_jwt_secret() -> Vec<u8> {
let secret = std::fs::read("jwt.secret");
if let Ok(secret) = secret {
secret
} else {
let mut secret: [u8; 128] = [0; 128];
SEC_RANDOM.fill(&mut secret).expect("Failed to generate jwt secret");
std::fs::write("jwt.secret", secret).expect("Failed to write jwt secret");
Vec::from(secret)
}
}
pub fn get_token(user: &crate::db::User, db: &mut crate::db::DBConnection) -> String {
let iat = chrono::Utc::now();
let exp = iat.add(chrono::Duration::hours(24)).timestamp();
let iat = iat.timestamp();
let token = db.create_token(user.id, exp);
let claims = JWTClaims {
exp,
iat,
jti: token.id,
sub: user.id
};
jsonwebtoken::encode(&jsonwebtoken::Header::new(JWT_ALGORITHM), &claims, &JWT_ENCODE_KEY)
.expect("Failed to create JWT token")
}
pub fn hash_password(password: &String) -> String {
let mut salt = [0_u8; 16];
SEC_RANDOM.fill(&mut salt).expect("Failed to generate salt");
let config = argon2::Config {
mem_cost: 64 * 1024,
variant: argon2::Variant::Argon2id,
..Default::default()
};
argon2::hash_encoded(password.as_bytes(), &salt, &config).expect("Failed to hash password")
}

View File

@@ -0,0 +1,136 @@
use lazy_static::lazy_static;
use lettre::Transport;
use ring::rand::SecureRandom;
use warp::Filter;
use crate::config::CONFIG;
use crate::db::{DBConnection, DBPool, with_db, TfaTypes};
use crate::dto;
use crate::routes::{AppError, get_reply};
use crate::routes::filters::{authenticated, UserInfo};
fn build_mail_sender() -> lettre::SmtpTransport {
lettre::SmtpTransport::builder_dangerous(CONFIG.smtp_server.clone())
.port(CONFIG.smtp_port)
.tls(
lettre::transport::smtp::client::Tls::Required(
lettre::transport::smtp::client::TlsParameters::new(
CONFIG.smtp_server.clone()
).unwrap()
)
)
.credentials(lettre::transport::smtp::authentication::Credentials::new(CONFIG.smtp_user.clone(), CONFIG.smtp_password.clone()))
.build()
}
lazy_static! {
static ref MAIL_SENDER: lettre::SmtpTransport = build_mail_sender();
}
fn get_totp(user: &crate::db::User) -> totp_rs::TOTP {
totp_rs::TOTP::from_rfc6238(
totp_rs::Rfc6238::new(
6,
user.tfa_secret.clone().unwrap(),
Some("MFileserver".to_owned()),
user.name.clone()
).unwrap()
).unwrap()
}
pub fn verify2fa(user: &crate::db::User, code: String) -> bool {
let allowed_skew = if user.tfa_type == TfaTypes::Totp {0} else {10};
let totp = get_totp(user);
let time = std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_secs();
let base_step = time / totp.step - allowed_skew;
for i in 0..allowed_skew + 1 {
let step = (base_step + i) * totp.step;
if totp.generate(step).eq(&code) {
return true;
}
}
false
}
pub fn send_2fa_mail(user: &crate::db::User) {
let totp = get_totp(user);
let code = totp.generate_current().unwrap();
let mail = lettre::Message::builder()
.from("fileserver@mattv.de".parse().unwrap())
.to(user.name.parse().unwrap())
.subject("MFileserver - Email 2fa code")
.body(format!("Your code is: {}\r\nIt is valid for 5 minutes", code))
.unwrap();
MAIL_SENDER.send(&mail).expect("Failed to send mail");
}
pub fn build_routes(db: DBPool) -> impl Filter<Extract = impl warp::Reply, Error = warp::Rejection> + Clone {
let tfa_setup = warp::path!("auth" / "2fa" / "setup")
.and(warp::post())
.and(warp::body::json())
.and(authenticated(db.clone()))
.and(with_db(db.clone()))
.and_then(tfa_setup);
let tfa_complete = warp::path!("auth" / "2fa" / "complete")
.and(warp::post())
.and(warp::body::json())
.and(authenticated(db.clone()))
.and(with_db(db.clone()))
.and_then(tfa_complete);
let tfa_disable = warp::path!("auth" / "2fa" / "disable")
.and(warp::post())
.and(authenticated(db.clone()))
.and(with_db(db))
.and_then(tfa_disable);
tfa_setup.or(tfa_complete).or(tfa_disable)
}
async fn tfa_setup(data: dto::requests::TfaSetup, mut info: UserInfo, mut db: DBConnection)
-> Result<impl warp::Reply, warp::Rejection> {
let mut secret: [u8; 32] = [0; 32];
super::SEC_RANDOM.fill(&mut secret).expect("Failed to generate secret");
let secret = Vec::from(secret);
info.0.tfa_secret = Some(secret);
db.save_user(&info.0);
if data.mail {
send_2fa_mail(&info.0);
get_reply(&dto::responses::Success {
statusCode: 200
})
} else {
let totp = get_totp(&info.0);
get_reply(&dto::responses::TfaSetup {
statusCode: 200,
secret: totp.get_secret_base32(),
qrCode: "data:image/png;base64,".to_owned() + &totp.get_qr().expect("Failed to generate qr code")
})
}
}
async fn tfa_complete(data: dto::requests::TfaComplete, mut info: UserInfo, mut db: DBConnection)
-> Result<impl warp::Reply, warp::Rejection> {
info.0.tfa_type = if data.mail { TfaTypes::Email } else { TfaTypes::Totp };
if verify2fa(&info.0, data.code) {
db.save_user(&info.0);
db.delete_all_tokens(info.0.id);
get_reply(&dto::responses::Success {
statusCode: 200
})
} else {
AppError::BadRequest("Incorrect 2fa code").err()
}
}
async fn tfa_disable(mut info: UserInfo, mut db: DBConnection)
-> Result<impl warp::Reply, warp::Rejection> {
info.0.tfa_secret = None;
info.0.tfa_type = TfaTypes::None;
db.save_user(&info.0);
db.delete_all_tokens(info.0.id);
get_reply(&dto::responses::Success {
statusCode: 200
})
}

View File

@@ -0,0 +1,90 @@
use warp::Filter;
use warp::http::{HeaderMap, HeaderValue};
use crate::db::UserRole;
use crate::db::{DBConnection, DBPool, with_db};
use crate::routes::AppError;
use crate::routes::auth;
pub type UserInfo = (crate::db::User, crate::db::Token);
pub fn authenticated(db: DBPool) -> impl Filter<Extract=(UserInfo,), Error=warp::reject::Rejection> + Clone {
warp::header::headers_cloned()
.map(move |_headers: HeaderMap<HeaderValue>| _headers)
.and(with_db(db))
.and_then(authorize)
}
pub fn admin(db: DBPool) -> impl Filter<Extract=(UserInfo, ), Error=warp::reject::Rejection> + Clone {
warp::header::headers_cloned()
.map(move |_headers: HeaderMap<HeaderValue>| _headers)
.and(with_db(db))
.and_then(|_headers, db| async {
let info = authorize(_headers, db).await?;
if info.0.role == UserRole::Admin {
Ok(info)
} else {
AppError::Forbidden("Forbidden").err()
}
})
}
async fn authorize(_headers: HeaderMap<HeaderValue>, mut db: DBConnection) -> Result<UserInfo, warp::reject::Rejection> {
authorize_jwt(extract_jwt(&_headers).map_err(|e| e.reject())?, &mut db).await
}
pub async fn authorize_jwt(jwt: String, db: &mut DBConnection) -> Result<UserInfo, warp::reject::Rejection> {
let decoded = jsonwebtoken::decode::<auth::JWTClaims>(
&jwt,
&crate::routes::auth::JWT_DECODE_KEY,
&jsonwebtoken::Validation::new(auth::JWT_ALGORITHM)
).map_err(|_| AppError::Forbidden("Invalid token"))?;
db.cleanup_tokens();
let mut user = db.get_user(decoded.claims.sub)
.ok_or(AppError::Forbidden("Invalid token"))?;
let token = db.get_token(decoded.claims.jti)
.ok_or(AppError::Forbidden("Invalid token"))?;
if user.id != token.owner_id {
return AppError::Forbidden("Invalid token").err();
}
if user.role == UserRole::Disabled {
return AppError::Forbidden("Account disabled").err();
}
if user.gitlab {
let info = auth::gitlab::get_gitlab_user(user.gitlab_at.clone().unwrap());
let info = match info {
Some(v) => Some(v),
None => {
let tokens = auth::gitlab::get_gitlab_token(user.gitlab_rt.clone().unwrap(), true);
if let Some(tokens) = tokens {
user.gitlab_at = Some(tokens.access_token.clone());
user.gitlab_rt = Some(tokens.refresh_token);
db.save_user(&user);
auth::gitlab::get_gitlab_user(tokens.access_token)
} else { None }
}
};
if info.is_none() || info.unwrap().username != user.name {
db.delete_all_tokens(token.owner_id);
db.delete_all_tokens(user.id);
return AppError::Forbidden("Invalid gitlab user").err();
}
}
Ok((user, token))
}
fn extract_jwt(_headers: &HeaderMap<HeaderValue>) -> Result<String, AppError> {
let header = match _headers.get(warp::http::header::AUTHORIZATION) {
Some(v) => v,
None => return Err(AppError::Unauthorized("Missing token"))
};
let header = header.to_str().map_err(|_| AppError::Unauthorized("Missing token"))?;
if !header.starts_with("Bearer ") {
Err(AppError::Unauthorized("Missing token"))
} else {
Ok(header.trim_start_matches("Bearer ").to_owned())
}
}

View File

@@ -0,0 +1,199 @@
use std::collections::VecDeque;
use std::iter::Iterator;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, AtomicI64, AtomicU64, Ordering};
use lazy_static::lazy_static;
use warp::Filter;
use futures::TryFutureExt;
use futures::TryStreamExt;
use crate::db::DBPool;
mod routes;
pub fn build_routes(db: DBPool) -> impl Filter<Extract = impl warp::Reply, Error = warp::Rejection> + Clone {
{
if !std::path::Path::new("temp").is_dir() {
std::fs::create_dir("temp").expect("Failed to create temp dir");
}
std::fs::read_dir("temp")
.expect("Failed to iter temp dir")
.for_each(|dir| {
std::fs::remove_file(dir.expect("Failed to retrieve temp dir entry").path()).expect("Failed to delete file in temp dir");
});
DELETE_RT.spawn(async {});
ZIP_RT.spawn(async {});
}
routes::build_routes(db)
}
pub static WINDOWS_INVALID_CHARS: &str = "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F<>:\"/\\|";
pub struct ZipProgressEntry {
temp_id: u64,
done: AtomicBool,
progress: AtomicU64,
total: AtomicU64,
delete_after: AtomicI64
}
#[derive(Debug)]
pub enum CreateNodeResult {
InvalidName,
InvalidParent,
Exists(bool, i32)
}
lazy_static! {
static ref DELETE_RT: tokio::runtime::Runtime = tokio::runtime::Builder::new_multi_thread().worker_threads(1).enable_time().build().expect("Failed to create delete runtime");
static ref ZIP_RT: tokio::runtime::Runtime = tokio::runtime::Builder::new_multi_thread().worker_threads(3).enable_time().build().expect("Failed to create zip runtime");
pub static ref ZIP_TO_PROGRESS: tokio::sync::RwLock<std::collections::HashMap<std::collections::BTreeSet<i32>, Arc<ZipProgressEntry>>> = tokio::sync::RwLock::new(std::collections::HashMap::new());
}
static NEXT_TEMP_ID: AtomicU64 = AtomicU64::new(0);
async fn cleanup_temp_zips() {
let mut existing = ZIP_TO_PROGRESS.write().await;
existing.retain(|_, v| {
if Arc::strong_count(v) == 1 && v.done.load(Ordering::Relaxed) && v.delete_after.load(Ordering::Relaxed) <= chrono::Utc::now().timestamp() {
std::fs::remove_file(std::path::Path::new(&format!("./temp/{}", v.temp_id))).expect("Failed to delete temp file");
false
} else {
true
}
});
}
fn get_nodes_recursive(root: crate::db::Inode, db: &mut crate::db::DBConnection) -> VecDeque<crate::db::Inode> {
let mut nodes = VecDeque::from(vec![root.clone()]);
if root.is_file { return nodes; }
let mut nodes_to_check = VecDeque::from(vec![root]);
while !nodes_to_check.is_empty() {
let node = nodes_to_check.pop_front().unwrap();
db.get_children(node.id).iter().for_each(|node| {
nodes.push_back(node.clone());
if !node.is_file { nodes_to_check.push_front(node.clone()); }
});
}
nodes
}
fn get_node_path(node: crate::db::Inode, db: &mut crate::db::DBConnection) -> VecDeque<crate::db::Inode> {
let mut path = VecDeque::from(vec![node.clone()]);
let mut node = node;
while let Some(parent) = node.parent_id {
node = db.get_node(parent).expect("Failed to get node parent");
path.push_front(node.clone());
}
path
}
fn get_total_size(node: crate::db::Inode, db: &mut crate::db::DBConnection) -> u64 {
let nodes = get_nodes_recursive(node, db);
nodes.iter().fold(0_u64, |acc, node| acc + node.size.unwrap_or(0) as u64)
}
pub fn get_node_and_validate(user: &crate::db::User, node: i32, db: &mut crate::db::DBConnection) -> Option<crate::db::Inode> {
let node = db.get_node(node)?;
if node.owner_id != user.id {
None
} else {
Some(node)
}
}
pub fn create_node(name: String, owner: &crate::db::User, file: bool, parent: Option<i32>, force: bool, db: &mut crate::db::DBConnection)
-> Result<crate::db::Inode, CreateNodeResult> {
if !force && (name.is_empty() || name.starts_with(' ') || name.contains(|c| {
WINDOWS_INVALID_CHARS.contains(c)
} || name.ends_with(' ') || name.ends_with('.') || name == "." || name == "..")) {
return Err(CreateNodeResult::InvalidName);
}
if let Some(parent) = parent {
let parent = match get_node_and_validate(owner, parent, db) {
None => { return Err(CreateNodeResult::InvalidParent); }
Some(v) => v
};
if parent.is_file { return Err(CreateNodeResult::InvalidParent); }
let children = db.get_children(parent.id);
for child in children {
if child.name == name {
return Err(CreateNodeResult::Exists(child.is_file, child.id));
}
}
}
Ok(db.create_node(file, name, parent, owner.id))
}
pub fn delete_node_root(node: &crate::db::Inode, db: &mut crate::db::DBConnection) {
get_nodes_recursive(node.clone(), db).iter().rev().for_each(|node| {
db.delete_node(node);
});
}
pub async fn delete_node(node: &crate::db::Inode, sender: &mut warp::hyper::body::Sender, db: &mut crate::db::DBConnection) {
if node.parent_id.is_none() { return; }
for node in get_nodes_recursive(node.clone(), db).iter().rev() {
sender.send_data(warp::hyper::body::Bytes::from(format!("Deleting {}...", generate_path(node, db)))).await.unwrap();
db.delete_node(node);
sender.send_data(warp::hyper::body::Bytes::from(" Done \n")).await.unwrap();
}
}
pub fn generate_path(node: &crate::db::Inode, db: &mut crate::db::DBConnection) -> String {
let mut path = String::new();
get_node_path(node.clone(), db).iter().for_each(|node| {
if node.parent_id.is_none() {
path += "/";
} else {
path += &node.name;
if !node.is_file {
path += "/";
}
}
});
path
}
pub fn generate_path_dto(node: &crate::db::Inode, db: &mut crate::db::DBConnection) -> crate::dto::responses::GetPath {
let mut get_path = crate::dto::responses::GetPath {
segments: Vec::new()
};
get_node_path(node.clone(), db).iter().for_each(|node| {
if node.parent_id.is_none() {
get_path.segments.push(crate::dto::responses::GetPathSegment {
path: "/".to_owned(),
node: Some(node.id)
});
} else {
get_path.segments.push(crate::dto::responses::GetPathSegment {
path: node.name.clone(),
node: Some(node.id)
});
if !node.is_file {
get_path.segments.push(crate::dto::responses::GetPathSegment {
path: "/".to_owned(),
node: None
});
}
}
});
get_path
}
pub fn get_file_stream_body(path: String) -> warp::hyper::Body {
warp::hyper::Body::wrap_stream(
tokio::fs::File::open(path)
.map_ok(|file|
tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
.map_ok(bytes::BytesMut::freeze)
)
.try_flatten_stream()
)
}

View File

@@ -0,0 +1,444 @@
use std::collections::{BTreeSet, HashMap};
use std::io::{Read, Write};
use std::sync::atomic::Ordering;
use futures::{Stream, StreamExt};
use headers::HeaderMapExt;
use warp::{Filter, Reply};
use crate::db::{DBConnection, DBPool, with_db};
use crate::dto;
use crate::routes::{AppError, get_reply};
use crate::routes::filters::{authenticated, UserInfo};
pub fn build_routes(db: DBPool) -> impl Filter<Extract = impl Reply, Error = warp::Rejection> + Clone {
let root = warp::path!("fs" / "root")
.and(warp::get())
.and(authenticated(db.clone()))
.and_then(root);
let node = warp::path!("fs" / "node" / i32)
.and(warp::get())
.and(authenticated(db.clone()))
.and(with_db(db.clone()))
.and_then(node)
.with(warp::compression::brotli());
let path = warp::path!("fs" / "path" / i32)
.and(warp::get())
.and(authenticated(db.clone()))
.and(with_db(db.clone()))
.and_then(path);
let create_folder = warp::path!("fs" / "create_folder")
.and(warp::post())
.and(warp::body::json())
.and(authenticated(db.clone()))
.and(with_db(db.clone()))
.and_then(|data, info, db| create_node(data, info, db, false));
let create_file = warp::path!("fs" / "create_file")
.and(warp::post())
.and(warp::body::json())
.and(authenticated(db.clone()))
.and(with_db(db.clone()))
.and_then(|data, info, db| create_node(data, info, db, true));
let delete_node = warp::path!("fs" / "delete" / i32)
.and(warp::post())
.and(authenticated(db.clone()))
.and(with_db(db.clone()))
.and_then(delete_node);
let upload = warp::path!("fs" / "upload" / i32)
.and(warp::post())
.and(warp::body::stream())
.and(authenticated(db.clone()))
.and(with_db(db.clone()))
.and_then(upload);
let create_zip = warp::path!("fs" / "create_zip")
.and(warp::post())
.and(warp::body::json())
.and(authenticated(db.clone()))
.and(with_db(db.clone()))
.and_then(create_zip);
let download = warp::path!("fs" / "download")
.and(warp::post())
.and(warp::body::form())
.and(with_db(db.clone()))
.and_then(download);
let download_multi = warp::path!("fs" / "download_multi")
.and(warp::post())
.and(warp::body::form())
.and(with_db(db.clone()))
.and_then(download_multi);
let download_preview = warp::path!("fs" / "download_preview" / i32)
.and(warp::get())
.and(authenticated(db.clone()))
.and(with_db(db.clone()))
.and_then(download_preview);
let get_type = warp::path!("fs" / "get_type" / i32)
.and(warp::get())
.and(authenticated(db.clone()))
.and(with_db(db))
.and_then(get_type);
root.or(node).or(path).or(create_folder).or(create_file).or(delete_node).or(upload).or(create_zip).or(download).or(download_multi).or(download_preview).or(get_type)
}
async fn root(info: UserInfo) -> Result<impl Reply, warp::Rejection> {
get_reply(&dto::responses::Root {
statusCode: 200,
rootId: info.0.root_id
})
}
async fn node(node: i32, info: UserInfo, mut db: DBConnection) -> Result<impl Reply, warp::Rejection> {
let guard_lock = DBConnection::get_lock(info.0.id).await;
let _guard = guard_lock.read().await;
let node = super::get_node_and_validate(&info.0, node, &mut db)
.ok_or(AppError::BadRequest("Unknown node"))?;
get_reply(&dto::responses::GetNode {
statusCode: 200,
id: node.id,
name: node.name,
isFile: node.is_file,
preview: node.has_preview,
parent: node.parent_id,
size: node.size,
children: (!node.is_file).then(|| {
db.get_children(node.id).iter().map(|child| dto::responses::GetNodeEntry {
id: child.id,
name: child.name.clone(),
isFile: child.is_file,
preview: child.has_preview,
parent: child.parent_id,
size: child.size
}).collect()
})
})
}
async fn path(node: i32, info: UserInfo, mut db: DBConnection) -> Result<impl Reply, warp::Rejection> {
let guard_lock = DBConnection::get_lock(info.0.id).await;
let _guard = guard_lock.read().await;
let node = super::get_node_and_validate(&info.0, node, &mut db)
.ok_or(AppError::BadRequest("Unknown node"))?;
get_reply(&super::generate_path_dto(&node, &mut db))
}
async fn create_node(data: dto::requests::CreateNode, info: UserInfo, mut db: DBConnection, file: bool) -> Result<impl Reply, warp::Rejection> {
let guard_lock = DBConnection::get_lock(info.0.id).await;
let _guard = guard_lock.read().await;
let node = super::create_node(data.name, &info.0, file, Some(data.parent), false, &mut db);
match node {
Ok(v) => get_reply(&dto::responses::NewNode {
statusCode: 200,
id: v.id
}),
Err(v) => {
match v {
super::CreateNodeResult::InvalidName => AppError::BadRequest("Invalid name").err(),
super::CreateNodeResult::InvalidParent => AppError::BadRequest("Invalid parent").err(),
super::CreateNodeResult::Exists(file, id) => get_reply(&dto::responses::NodeExists {
statusCode: 200,
id,
exists: true,
isFile: file
})
}
}
}
}
async fn delete_node(node: i32, info: UserInfo, mut db: DBConnection) -> Result<impl Reply, warp::Rejection> {
let guard_lock = DBConnection::get_lock(info.0.id).await;
let inner_guard_lock = guard_lock.clone();
let _guard = guard_lock.read().await;
let node = super::get_node_and_validate(&info.0, node, &mut db)
.ok_or(AppError::BadRequest("Unknown node"))?;
if node.parent_id.is_none() {
return AppError::BadRequest("Can't delete root").err();
}
let (mut sender, body) = warp::hyper::Body::channel();
sender.send_data(warp::hyper::body::Bytes::from("Waiting in queue\n")).await.unwrap();
super::DELETE_RT.spawn(async move {
let guard_lock = inner_guard_lock.clone();
let _guard = guard_lock.write().await;
super::delete_node(&node, &mut sender, &mut db).await;
});
let mut resp = warp::reply::Response::new(body);
*resp.status_mut() = warp::http::StatusCode::OK;
resp.headers_mut().typed_insert(
headers::ContentType::text_utf8()
);
Ok(resp)
}
async fn upload<S, B>(node: i32, stream: S, info: UserInfo, mut db: DBConnection) -> Result<impl Reply, warp::Rejection>
where
S: Stream<Item = Result<B, warp::Error>>,
S: StreamExt,
B: warp::Buf
{
let guard_lock = DBConnection::get_lock(info.0.id).await;
let _guard = guard_lock.read().await;
let mut node = super::get_node_and_validate(&info.0, node, &mut db)
.ok_or(AppError::BadRequest("Unknown node"))?;
if !node.is_file {
return AppError::BadRequest("Can't upload to a directory").err();
}
let mut file_size = 0_i64;
let file_name = format!("./files/{}", node.id);
{
let mut file = std::fs::File::create(file_name.clone()).unwrap();
stream.for_each(|f| {
let mut buffer = f.unwrap();
file_size += buffer.remaining() as i64;
while buffer.remaining() != 0 {
let chunk = buffer.chunk();
buffer.advance(file.write(chunk).expect("Failed to write file"));
}
futures::future::ready(())
}).await;
}
let generate_preview = || -> Option<()> {
if file_size > 20 * 1024 * 1024 { return None; }
let mime = mime_guess::from_path(std::path::Path::new(&node.name)).first()?.to_string();
let img = image::load(
std::io::BufReader::new(std::fs::File::open(file_name.clone()).unwrap()),
image::ImageFormat::from_mime_type(mime)?
).ok()?;
let img = img.resize(300, 300, image::imageops::FilterType::Triangle);
img.save(std::path::Path::new(&(file_name + "_preview.jpg"))).expect("Failed to save preview image");
Some(())
};
node.has_preview = generate_preview().is_some();
node.size = Some(file_size);
db.save_node(&node);
get_reply(&dto::responses::Success {
statusCode: 200
})
}
async fn create_zip(data: dto::requests::CreateZip, info: UserInfo, mut db: DBConnection) -> Result<impl Reply, warp::Rejection> {
let guard_lock = DBConnection::get_lock(info.0.id).await;
let inner_guard_lock = guard_lock.clone();
let _guard = guard_lock.read().await;
let mut nodes: Vec<crate::db::Inode> = Vec::new();
for node in data.nodes.clone() {
nodes.push(
super::get_node_and_validate(&info.0, node, &mut db)
.ok_or(AppError::BadRequest("Unknown node"))?
);
}
let zip_nodes = BTreeSet::from_iter(data.nodes.iter().copied());
{
let guard = super::ZIP_TO_PROGRESS.read().await;
if let Some(entry) = guard.get(&zip_nodes) {
return get_reply(&dto::responses::CreateZipDone {
statusCode: 200,
done: entry.done.load(Ordering::Relaxed),
progress: Some(entry.progress.load(Ordering::Relaxed)),
total: Some(entry.total.load(Ordering::Relaxed))
})
}
}
let entry = {
let mut guard = super::ZIP_TO_PROGRESS.write().await;
guard.insert(zip_nodes.clone(), std::sync::Arc::from(super::ZipProgressEntry {
temp_id: super::NEXT_TEMP_ID.fetch_add(1, Ordering::Relaxed),
done: std::sync::atomic::AtomicBool::new(false),
progress: std::sync::atomic::AtomicU64::new(0),
total: std::sync::atomic::AtomicU64::new(1),
delete_after: std::sync::atomic::AtomicI64::new(0)
}));
guard.get(&zip_nodes).unwrap().clone()
};
super::ZIP_RT.spawn(async move {
type NodeMap = HashMap<i32, crate::db::Inode>;
super::cleanup_temp_zips().await;
let _guard = inner_guard_lock.read().await;
fn get_path(node: &crate::db::Inode, dirs: &NodeMap) -> String {
let mut path = node.name.clone();
let mut _node = dirs.get(&node.parent_id.unwrap_or(-1));
while let Some(node) = _node {
path.insert_str(0, &(node.name.clone() + "/"));
_node = dirs.get(&node.parent_id.unwrap_or(-1));
}
path
}
nodes.iter().for_each(|node| {
entry.total.fetch_add(super::get_total_size(node.clone(), &mut db), Ordering::Relaxed);
});
entry.total.fetch_sub(1, Ordering::Relaxed);
{
let mut buf = vec![0_u8; 1024 * 1024 * 4];
let file = std::fs::File::create(format!("./temp/{}", entry.temp_id)).expect("Failed to create temp file");
let mut zip = zip::ZipWriter::new(file);
let zip_options = zip::write::FileOptions::default().large_file(true);
let (files, dirs): (NodeMap, NodeMap) =
nodes.iter()
.flat_map(|node| super::get_nodes_recursive(node.clone(), &mut db))
.map(|node| (node.id, node))
.partition(|v| v.1.is_file);
dirs.values().for_each(|dir| {
zip.add_directory(get_path(dir, &dirs), zip_options).expect("Failed to add dir to zip");
});
files.values().for_each(|node| {
zip.start_file(get_path(node, &dirs), zip_options).expect("Failed to start zip file");
let mut file = std::fs::File::open(format!("./files/{}", node.id)).expect("Failed to open file for zip");
loop {
let count = file.read(&mut buf).expect("Failed to read file for zip");
if count == 0 { break; }
zip.write_all(&buf[..count]).expect("Failed to write zip");
entry.progress.fetch_add(count as u64, Ordering::Relaxed);
}
});
zip.finish().expect("Failed to finish zip");
}
entry.done.store(true, Ordering::Relaxed);
entry.delete_after.store(chrono::Utc::now().timestamp() + 10 * 60, Ordering::Relaxed);
});
get_reply(&dto::responses::CreateZipDone {
statusCode: 200,
done: false,
progress: Some(0),
total: Some(1)
})
}
async fn download(data: dto::requests::Download, mut db: DBConnection) -> Result<impl Reply, warp::Rejection> {
let info = crate::routes::filters::authorize_jwt(data.jwtToken, &mut db).await?;
let guard_lock = DBConnection::get_lock(info.0.id).await;
let _guard = guard_lock.read().await;
let node: crate::db::Inode = super::get_node_and_validate(&info.0, data.id, &mut db)
.ok_or(AppError::BadRequest("Unknown node"))?;
if node.is_file {
let mut resp = warp::reply::Response::new(super::get_file_stream_body(
format!("./files/{}", node.id)
));
*resp.status_mut() = warp::http::StatusCode::OK;
resp.headers_mut().typed_insert(
headers::ContentLength(node.size.unwrap() as u64)
);
resp.headers_mut().typed_insert(
headers::ContentType::from(
mime_guess::from_path(std::path::Path::new(&node.name)).first_or_octet_stream()
)
);
resp.headers_mut().insert(
"Content-Disposition",
("attachment; filename=".to_owned() + &node.name).parse().unwrap()
);
Ok(resp)
} else {
let nodes_key = BTreeSet::from([node.id]);
let guard = super::ZIP_TO_PROGRESS.read().await;
let entry = guard.get(&nodes_key)
.ok_or(AppError::BadRequest("Unknown node"))?;
if !entry.done.load(Ordering::Relaxed) {
AppError::BadRequest("Unknown node").err()
} else {
let file = format!("./temp/{}", entry.temp_id);
let mut resp = warp::reply::Response::new(super::get_file_stream_body(file.clone()));
*resp.status_mut() = warp::http::StatusCode::OK;
resp.headers_mut().typed_insert(
headers::ContentLength(std::fs::metadata(std::path::Path::new(&file)).unwrap().len())
);
resp.headers_mut().typed_insert(
headers::ContentType::from(
mime_guess::from_ext("zip").first().unwrap()
)
);
resp.headers_mut().insert(
"Content-Disposition",
("attachment; filename=".to_owned() + &node.name + ".zip").parse().unwrap()
);
Ok(resp)
}
}
}
async fn download_multi(data: dto::requests::DownloadMulti, mut db: DBConnection) -> Result<impl Reply, warp::Rejection> {
let info = crate::routes::filters::authorize_jwt(data.jwtToken, &mut db).await?;
let guard_lock = DBConnection::get_lock(info.0.id).await;
let _guard = guard_lock.read().await;
let mut nodes: Vec<crate::db::Inode> = Vec::new();
for node in data.id.split(',').map(|v| v.parse::<i32>()
.map_err(|_| AppError::BadRequest("Failed to parse").reject())
) {
nodes.push(
super::get_node_and_validate(&info.0, node?, &mut db)
.ok_or(AppError::BadRequest("Unknown node"))?
);
}
let nodes_key = BTreeSet::from_iter(nodes.iter().map(|node| node.id));
let guard = super::ZIP_TO_PROGRESS.read().await;
let entry = guard.get(&nodes_key)
.ok_or(AppError::BadRequest("Unknown zip"))?;
if !entry.done.load(Ordering::Relaxed) {
AppError::BadRequest("Unfinished zip").err()
} else {
let file = format!("./temp/{}", entry.temp_id);
let mut resp = warp::reply::Response::new(super::get_file_stream_body(file.clone()));
*resp.status_mut() = warp::http::StatusCode::OK;
resp.headers_mut().typed_insert(
headers::ContentLength(std::fs::metadata(std::path::Path::new(&file)).unwrap().len())
);
resp.headers_mut().typed_insert(
headers::ContentType::from(
mime_guess::from_ext("zip").first().unwrap()
)
);
resp.headers_mut().insert(
"Content-Disposition",
"attachment; filename=files.zip".parse().unwrap()
);
Ok(resp)
}
}
async fn download_preview(node: i32, info: UserInfo, mut db: DBConnection) -> Result<impl Reply, warp::Rejection> {
let guard_lock = DBConnection::get_lock(info.0.id).await;
let _guard = guard_lock.read().await;
let node: crate::db::Inode = super::get_node_and_validate(&info.0, node, &mut db)
.ok_or(AppError::BadRequest("Unknown node"))?;
if node.has_preview {
let file = format!("./files/{}_preview.jpg", node.id);
get_reply(&dto::responses::DownloadBase64 {
statusCode: 200,
data: "data:image/png;base64,".to_owned() + &base64::encode(std::fs::read(std::path::Path::new(&file)).unwrap())
})
} else {
AppError::BadRequest("No preview").err()
}
}
async fn get_type(node: i32, info: UserInfo, mut db: DBConnection) -> Result<impl Reply, warp::Rejection> {
let node: crate::db::Inode = super::get_node_and_validate(&info.0, node, &mut db)
.ok_or(AppError::BadRequest("Unknown node"))?;
get_reply(&dto::responses::Type {
statusCode: 200,
_type: mime_guess::from_path(std::path::Path::new(&node.name)).first_or_octet_stream().to_string()
})
}

114
backend/src/routes/mod.rs Normal file
View File

@@ -0,0 +1,114 @@
mod filters;
mod auth;
mod admin;
mod user;
pub mod fs;
use warp::{Filter, Reply};
use crate::db::DBPool;
use crate::dto;
pub fn build_routes(db: DBPool) -> impl Filter<Extract = impl Reply, Error = warp::Rejection> + Clone {
warp::path::path("api")
.and(
auth::build_routes(db.clone())
.or(admin::build_routes(db.clone()))
.or(user::build_routes(db.clone()))
.or(fs::build_routes(db))
.recover(error_handler)
)
.or(warp::fs::dir("./static/"))
.or(warp::fs::file("./static/index.html"))
}
pub fn get_reply<T>(data: &T) -> Result<warp::reply::Response, warp::Rejection> where T: serde::Serialize {
Ok(warp::reply::with_status(warp::reply::json(data), warp::http::StatusCode::OK).into_response())
}
#[derive(thiserror::Error, Debug, Clone)]
pub enum AppError {
#[error("unauthorized")]
Unauthorized(&'static str),
#[error("forbidden")]
Forbidden(&'static str),
#[error("bad request")]
BadRequest(&'static str),
#[error("internal error")]
InternalError(&'static str)
}
impl warp::reject::Reject for AppError {}
impl AppError {
pub fn reject(&self) -> warp::reject::Rejection {
warp::reject::custom(self.clone())
}
pub fn err<T>(&self) -> Result<T, warp::reject::Rejection> {
Err(self.reject())
}
}
pub async fn error_handler(err: warp::reject::Rejection) -> Result<impl Reply, std::convert::Infallible> {
if err.is_not_found() {
return Ok(warp::reply::with_status(
warp::reply::json(&dto::responses::Error {
statusCode: 404,
message: "bruh".to_owned()
}),
warp::http::StatusCode::NOT_FOUND
));
}
if let Some(e) = err.find::<AppError>() {
return Ok(warp::reply::with_status(
warp::reply::json(&dto::responses::Error {
statusCode: match e {
AppError::BadRequest(_) => 400,
AppError::Unauthorized(_) => 401,
AppError::Forbidden(_) => 403,
AppError::InternalError(_) => 500
},
message: match e {
AppError::BadRequest(v) => v.to_string(),
AppError::Unauthorized(v) => v.to_string(),
AppError::Forbidden(v) => v.to_string(),
AppError::InternalError(v) => v.to_string()
},
}),
match e {
AppError::BadRequest(_) => warp::http::StatusCode::BAD_REQUEST,
AppError::Unauthorized(_) => warp::http::StatusCode::UNAUTHORIZED,
AppError::Forbidden(_) => warp::http::StatusCode::FORBIDDEN,
AppError::InternalError(_) => warp::http::StatusCode::INTERNAL_SERVER_ERROR
}
));
}
if let Some(e) = err.find::<warp::body::BodyDeserializeError>() {
return Ok(warp::reply::with_status(
warp::reply::json(&dto::responses::Error {
statusCode: 400,
message: e.to_string(),
}),
warp::http::StatusCode::BAD_REQUEST
))
}
if let Some(e) = err.find::<warp::reject::InvalidQuery>() {
return Ok(warp::reply::with_status(
warp::reply::json(&dto::responses::Error {
statusCode: 400,
message: e.to_string(),
}),
warp::http::StatusCode::BAD_REQUEST
))
}
if let Some(e) = err.find::<warp::reject::MethodNotAllowed>() {
return Ok(warp::reply::with_status(
warp::reply::json(&dto::responses::Error {
statusCode: 405,
message: e.to_string(),
}),
warp::http::StatusCode::METHOD_NOT_ALLOWED
))
}
Err(err).expect("Can't handle error")
}

View File

@@ -0,0 +1,42 @@
use warp::{Filter, Reply};
use crate::db::{DBConnection, DBPool, with_db};
use crate::dto;
use crate::routes::get_reply;
use crate::routes::filters::{authenticated, UserInfo};
pub fn build_routes(db: DBPool) -> impl Filter<Extract = impl Reply, Error = warp::Rejection> + Clone {
let info = warp::path!("user" / "info")
.and(warp::get())
.and(authenticated(db.clone()))
.and_then(info);
let delete_user = warp::path!("user" / "delete")
.and(warp::post())
.and(authenticated(db.clone()))
.and(with_db(db))
.and_then(delete_user);
info.or(delete_user)
}
async fn info(info: UserInfo) -> Result<impl Reply, warp::Rejection> {
get_reply(&dto::responses::UserInfo {
statusCode: info.0.id,
name: info.0.name,
gitlab: info.0.gitlab,
tfaEnabled: info.0.tfa_type != crate::db::TfaTypes::None
})
}
async fn delete_user(info: UserInfo, mut db: DBConnection) -> Result<impl Reply, warp::Rejection> {
db.delete_all_tokens(info.0.id);
let root_node = super::fs::get_node_and_validate(&info.0, info.0.root_id, &mut db).expect("Failed to get root node for deleting");
super::fs::delete_node_root(&root_node, &mut db);
db.delete_user(&info.0);
get_reply(&dto::responses::Success {
statusCode: 200
})
}

42
backend/src/schema.rs Normal file
View File

@@ -0,0 +1,42 @@
// @generated automatically by Diesel CLI.
diesel::table! {
inode (id) {
id -> Integer,
is_file -> Bool,
name -> Text,
parent_id -> Nullable<Integer>,
owner_id -> Integer,
size -> Nullable<BigInt>,
has_preview -> Bool,
}
}
diesel::table! {
tokens (id) {
id -> Integer,
owner_id -> Integer,
exp -> BigInt,
}
}
diesel::table! {
user (id) {
id -> Integer,
gitlab -> Bool,
name -> Text,
password -> Text,
role -> SmallInt,
root_id -> Integer,
tfa_type -> SmallInt,
tfa_secret -> Nullable<Binary>,
gitlab_at -> Nullable<Text>,
gitlab_rt -> Nullable<Text>,
}
}
diesel::allow_tables_to_appear_in_same_query!(
inode,
tokens,
user,
);