mirror of
https://github.com/Drop-OSS/drop-app.git
synced 2025-11-13 16:22:43 +10:00
commit0f48f3fb44Author: quexeky <git@quexeky.dev> Date: Sun Oct 12 19:35:04 2025 +1100 chore: Run cargo clippy && cargo fmt Signed-off-by: quexeky <git@quexeky.dev> commit974666efe2Author: quexeky <git@quexeky.dev> Date: Sun Oct 12 19:17:40 2025 +1100 refactor: Finish refactor Signed-off-by: quexeky <git@quexeky.dev> commit9e1bf9852fAuthor: quexeky <git@quexeky.dev> Date: Sun Oct 12 18:33:43 2025 +1100 refactor: Builds, but some logic still left to move back Signed-off-by: quexeky <git@quexeky.dev> commit5d22b883d5Author: quexeky <git@quexeky.dev> Date: Sun Oct 12 17:04:27 2025 +1100 refactor: Improvements to src-tauri Signed-off-by: quexeky <git@quexeky.dev> commit62a2561539Author: quexeky <git@quexeky.dev> Date: Sat Oct 11 09:51:04 2025 +1100 fix: Remote tauri dependency from process Signed-off-by: quexeky <git@quexeky.dev> commit59f040bc8bAuthor: quexeky <git@quexeky.dev> Date: Thu Oct 9 07:46:17 2025 +1100 chore: Major refactoring Still needs a massive go-over because there shouldn't be anything referencing tauri in any of the workspaces except the original one. Process manager has been refactored as an example Signed-off-by: quexeky <git@quexeky.dev> Signed-off-by: quexeky <git@quexeky.dev>
141 lines
4.2 KiB
Rust
141 lines
4.2 KiB
Rust
use std::{
|
|
fs::File,
|
|
io::{self, Write},
|
|
path::{Path, PathBuf},
|
|
time::SystemTime,
|
|
};
|
|
|
|
use bitcode::{Decode, DecodeOwned, Encode};
|
|
use database::{Database, borrow_db_checked};
|
|
use http::{Response, header::CONTENT_TYPE, response::Builder as ResponseBuilder};
|
|
|
|
use crate::error::{CacheError, RemoteAccessError};
|
|
|
|
#[macro_export]
|
|
macro_rules! offline {
|
|
($var:expr, $func1:expr, $func2:expr, $( $arg:expr ),* ) => {
|
|
|
|
async move {
|
|
if ::database::borrow_db_checked().settings.force_offline
|
|
|| $var.lock().status == ::client::app_status::AppStatus::Offline {
|
|
$func2( $( $arg ), *).await
|
|
} else {
|
|
$func1( $( $arg ), *).await
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
fn get_sys_time_in_secs() -> u64 {
|
|
match SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) {
|
|
Ok(n) => n.as_secs(),
|
|
Err(_) => panic!("SystemTime before UNIX EPOCH!"),
|
|
}
|
|
}
|
|
|
|
fn get_cache_path(base: &Path, key: &str) -> PathBuf {
|
|
let key_hash = hex::encode(md5::compute(key.as_bytes()).0);
|
|
base.join(key_hash)
|
|
}
|
|
|
|
fn write_sync(base: &Path, key: &str, data: Vec<u8>) -> io::Result<()> {
|
|
let cache_path = get_cache_path(base, key);
|
|
let mut file = File::create(cache_path)?;
|
|
file.write_all(&data)?;
|
|
Ok(())
|
|
}
|
|
|
|
fn read_sync(base: &Path, key: &str) -> io::Result<Vec<u8>> {
|
|
let cache_path = get_cache_path(base, key);
|
|
let file = std::fs::read(cache_path)?;
|
|
Ok(file)
|
|
}
|
|
|
|
fn delete_sync(base: &Path, key: &str) -> io::Result<()> {
|
|
let cache_path = get_cache_path(base, key);
|
|
std::fs::remove_file(cache_path)?;
|
|
Ok(())
|
|
}
|
|
|
|
pub fn cache_object<D: Encode>(key: &str, data: &D) -> Result<(), RemoteAccessError> {
|
|
cache_object_db(key, data, &borrow_db_checked())
|
|
}
|
|
pub fn cache_object_db<D: Encode>(
|
|
key: &str,
|
|
data: &D,
|
|
database: &Database,
|
|
) -> Result<(), RemoteAccessError> {
|
|
let bytes = bitcode::encode(data);
|
|
write_sync(&database.cache_dir, key, bytes).map_err(RemoteAccessError::Cache)
|
|
}
|
|
pub fn get_cached_object<D: Encode + DecodeOwned>(key: &str) -> Result<D, RemoteAccessError> {
|
|
get_cached_object_db::<D>(key, &borrow_db_checked())
|
|
}
|
|
pub fn get_cached_object_db<D: DecodeOwned>(
|
|
key: &str,
|
|
db: &Database,
|
|
) -> Result<D, RemoteAccessError> {
|
|
let bytes = read_sync(&db.cache_dir, key).map_err(RemoteAccessError::Cache)?;
|
|
let data =
|
|
bitcode::decode::<D>(&bytes).map_err(|e| RemoteAccessError::Cache(io::Error::other(e)))?;
|
|
Ok(data)
|
|
}
|
|
pub fn clear_cached_object(key: &str) -> Result<(), RemoteAccessError> {
|
|
clear_cached_object_db(key, &borrow_db_checked())
|
|
}
|
|
pub fn clear_cached_object_db(key: &str, db: &Database) -> Result<(), RemoteAccessError> {
|
|
delete_sync(&db.cache_dir, key).map_err(RemoteAccessError::Cache)?;
|
|
Ok(())
|
|
}
|
|
|
|
#[derive(Encode, Decode)]
|
|
pub struct ObjectCache {
|
|
content_type: String,
|
|
body: Vec<u8>,
|
|
expiry: u64,
|
|
}
|
|
|
|
impl ObjectCache {
|
|
pub fn has_expired(&self) -> bool {
|
|
let current = get_sys_time_in_secs();
|
|
self.expiry < current
|
|
}
|
|
}
|
|
|
|
impl TryFrom<Response<Vec<u8>>> for ObjectCache {
|
|
type Error = CacheError;
|
|
|
|
fn try_from(value: Response<Vec<u8>>) -> Result<Self, Self::Error> {
|
|
Ok(ObjectCache {
|
|
content_type: value
|
|
.headers()
|
|
.get(CONTENT_TYPE)
|
|
.ok_or(CacheError::HeaderNotFound(CONTENT_TYPE))?
|
|
.to_str()
|
|
.map_err(CacheError::ParseError)?
|
|
.to_owned(),
|
|
body: value.body().clone(),
|
|
expiry: get_sys_time_in_secs() + 60 * 60 * 24,
|
|
})
|
|
}
|
|
}
|
|
impl TryFrom<ObjectCache> for Response<Vec<u8>> {
|
|
type Error = CacheError;
|
|
fn try_from(value: ObjectCache) -> Result<Self, Self::Error> {
|
|
let resp_builder = ResponseBuilder::new().header(CONTENT_TYPE, value.content_type);
|
|
resp_builder
|
|
.body(value.body)
|
|
.map_err(CacheError::ConstructionError)
|
|
}
|
|
}
|
|
impl TryFrom<&ObjectCache> for Response<Vec<u8>> {
|
|
type Error = CacheError;
|
|
|
|
fn try_from(value: &ObjectCache) -> Result<Self, Self::Error> {
|
|
let resp_builder = ResponseBuilder::new().header(CONTENT_TYPE, value.content_type.clone());
|
|
resp_builder
|
|
.body(value.body.clone())
|
|
.map_err(CacheError::ConstructionError)
|
|
}
|
|
}
|