mirror of
https://github.com/Drop-OSS/drop-app.git
synced 2025-11-13 00:02:41 +10:00
Collections & download stability, UI (#130)
* feat: different local path in dev #73 * feat: better error output for downloads * feat: collections in library view * feat: improve download manager reliability * feat: new download UI, more stable downloads * fix: clippy * fix: only show admin link if user is admin * feat: check for libs before building
This commit is contained in:
7
src-tauri/Cargo.lock
generated
7
src-tauri/Cargo.lock
generated
@ -1289,6 +1289,7 @@ dependencies = [
|
||||
"atomic-instant-full",
|
||||
"bitcode",
|
||||
"boxcar",
|
||||
"bytes",
|
||||
"cacache 13.1.0",
|
||||
"chrono",
|
||||
"deranged",
|
||||
@ -1296,6 +1297,7 @@ dependencies = [
|
||||
"droplet-rs",
|
||||
"dynfmt",
|
||||
"filetime",
|
||||
"futures-core",
|
||||
"futures-lite",
|
||||
"gethostname",
|
||||
"hex 0.4.3",
|
||||
@ -1339,6 +1341,7 @@ dependencies = [
|
||||
"tempfile",
|
||||
"throttle_my_fn",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"umu-wrapper-lib",
|
||||
"url",
|
||||
"urlencoding",
|
||||
@ -6083,9 +6086,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tokio-util"
|
||||
version = "0.7.15"
|
||||
version = "0.7.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df"
|
||||
checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"futures-core",
|
||||
|
||||
@ -73,6 +73,9 @@ futures-lite = "2.6.0"
|
||||
page_size = "0.6.0"
|
||||
sysinfo = "0.36.1"
|
||||
humansize = "2.1.3"
|
||||
tokio-util = { version = "0.7.16", features = ["io"] }
|
||||
futures-core = "0.3.31"
|
||||
bytes = "1.10.1"
|
||||
# tailscale = { path = "./tailscale" }
|
||||
|
||||
[dependencies.dynfmt]
|
||||
@ -106,7 +109,15 @@ features = ["other_errors"] # You can also use "yaml_enc" or "bin_enc"
|
||||
[dependencies.reqwest]
|
||||
version = "0.12.22"
|
||||
default-features = false
|
||||
features = ["json", "http2", "blocking", "rustls-tls", "native-tls-alpn", "rustls-tls-native-roots"]
|
||||
features = [
|
||||
"json",
|
||||
"http2",
|
||||
"blocking",
|
||||
"rustls-tls",
|
||||
"native-tls-alpn",
|
||||
"rustls-tls-native-roots",
|
||||
"stream",
|
||||
]
|
||||
|
||||
[dependencies.serde]
|
||||
version = "1"
|
||||
|
||||
@ -16,8 +16,13 @@ use crate::DB;
|
||||
|
||||
use super::models::data::Database;
|
||||
|
||||
#[cfg(not(debug_assertions))]
|
||||
static DATA_ROOT_PREFIX: &'static str = "drop";
|
||||
#[cfg(debug_assertions)]
|
||||
static DATA_ROOT_PREFIX: &str = "drop-debug";
|
||||
|
||||
pub static DATA_ROOT_DIR: LazyLock<Arc<PathBuf>> =
|
||||
LazyLock::new(|| Arc::new(dirs::data_dir().unwrap().join("drop")));
|
||||
LazyLock::new(|| Arc::new(dirs::data_dir().unwrap().join(DATA_ROOT_PREFIX)));
|
||||
|
||||
// Custom JSON serializer to support everything we need
|
||||
#[derive(Debug, Default, Clone)]
|
||||
|
||||
@ -1,9 +1,6 @@
|
||||
|
||||
|
||||
pub mod data {
|
||||
use std::path::PathBuf;
|
||||
use std::{hash::Hash, path::PathBuf};
|
||||
|
||||
|
||||
use native_model::native_model;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@ -17,6 +14,9 @@ pub mod data {
|
||||
|
||||
pub type GameDownloadStatus = v2::GameDownloadStatus;
|
||||
pub type ApplicationTransientStatus = v1::ApplicationTransientStatus;
|
||||
/**
|
||||
* Need to be universally accessible by the ID, and the version is just a couple sprinkles on top
|
||||
*/
|
||||
pub type DownloadableMetadata = v1::DownloadableMetadata;
|
||||
pub type DownloadType = v1::DownloadType;
|
||||
pub type DatabaseApplications = v2::DatabaseApplications;
|
||||
@ -24,7 +24,19 @@ pub mod data {
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub mod v1 {
|
||||
impl PartialEq for DownloadableMetadata {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.id == other.id && self.download_type == other.download_type
|
||||
}
|
||||
}
|
||||
impl Hash for DownloadableMetadata {
|
||||
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
||||
self.id.hash(state);
|
||||
self.download_type.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
mod v1 {
|
||||
use crate::process::process_manager::Platform;
|
||||
use serde_with::serde_as;
|
||||
use std::{collections::HashMap, path::PathBuf};
|
||||
@ -114,6 +126,7 @@ pub mod data {
|
||||
// Stuff that shouldn't be synced to disk
|
||||
#[derive(Clone, Serialize, Deserialize, Debug)]
|
||||
pub enum ApplicationTransientStatus {
|
||||
Queued { version_name: String },
|
||||
Downloading { version_name: String },
|
||||
Uninstalling {},
|
||||
Updating { version_name: String },
|
||||
@ -142,7 +155,7 @@ pub mod data {
|
||||
}
|
||||
|
||||
#[native_model(id = 7, version = 1, with = native_model::rmp_serde_1_3::RmpSerde)]
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Clone)]
|
||||
#[derive(Debug, Eq, PartialOrd, Ord, Serialize, Deserialize, Clone)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct DownloadableMetadata {
|
||||
pub id: String,
|
||||
@ -172,7 +185,7 @@ pub mod data {
|
||||
}
|
||||
}
|
||||
|
||||
pub mod v2 {
|
||||
mod v2 {
|
||||
use std::{collections::HashMap, path::PathBuf};
|
||||
|
||||
use serde_with::serde_as;
|
||||
|
||||
@ -5,10 +5,10 @@ use log::warn;
|
||||
use crate::{
|
||||
database::{
|
||||
db::borrow_db_mut_checked,
|
||||
models::data::v1::{DownloadType, DownloadableMetadata},
|
||||
models::data::{DownloadType, DownloadableMetadata},
|
||||
},
|
||||
games::{
|
||||
downloads::drop_data::{v1::DropData, DROP_DATA_PATH},
|
||||
downloads::drop_data::{DropData, DROP_DATA_PATH},
|
||||
library::set_partially_installed_db,
|
||||
},
|
||||
};
|
||||
|
||||
@ -12,6 +12,7 @@ use tauri::{AppHandle, Emitter};
|
||||
|
||||
use crate::{
|
||||
database::models::data::DownloadableMetadata,
|
||||
download_manager::download_manager_frontend::DownloadStatus,
|
||||
error::application_download_error::ApplicationDownloadError,
|
||||
games::library::{QueueUpdateEvent, QueueUpdateEventQueueData, StatsUpdateEvent},
|
||||
};
|
||||
@ -75,7 +76,6 @@ pub struct DownloadManagerBuilder {
|
||||
status: Arc<Mutex<DownloadManagerStatus>>,
|
||||
app_handle: AppHandle,
|
||||
|
||||
current_download_agent: Option<DownloadAgent>, // Should be the only download agent in the map with the "Go" flag
|
||||
current_download_thread: Mutex<Option<JoinHandle<()>>>,
|
||||
active_control_flag: Option<DownloadThreadControl>,
|
||||
}
|
||||
@ -95,7 +95,6 @@ impl DownloadManagerBuilder {
|
||||
progress: active_progress.clone(),
|
||||
app_handle,
|
||||
|
||||
current_download_agent: None,
|
||||
current_download_thread: Mutex::new(None),
|
||||
active_control_flag: None,
|
||||
};
|
||||
@ -121,7 +120,6 @@ impl DownloadManagerBuilder {
|
||||
fn cleanup_current_download(&mut self) {
|
||||
self.active_control_flag = None;
|
||||
*self.progress.lock().unwrap() = None;
|
||||
self.current_download_agent = None;
|
||||
|
||||
let mut download_thread_lock = self.current_download_thread.lock().unwrap();
|
||||
|
||||
@ -197,7 +195,7 @@ impl DownloadManagerBuilder {
|
||||
return;
|
||||
}
|
||||
|
||||
download_agent.on_initialised(&self.app_handle);
|
||||
download_agent.on_queued(&self.app_handle);
|
||||
self.download_queue.append(meta.clone());
|
||||
self.download_agent_registry.insert(meta, download_agent);
|
||||
|
||||
@ -216,19 +214,13 @@ impl DownloadManagerBuilder {
|
||||
return;
|
||||
}
|
||||
|
||||
if self.current_download_agent.is_some()
|
||||
&& self.download_queue.read().front().unwrap()
|
||||
== &self.current_download_agent.as_ref().unwrap().metadata()
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
debug!("current download queue: {:?}", self.download_queue.read());
|
||||
|
||||
// Should always be Some if the above two statements keep going
|
||||
let agent_data = self.download_queue.read().front().unwrap().clone();
|
||||
|
||||
info!("starting download for {agent_data:?}");
|
||||
let agent_data = if let Some(agent_data) = self.download_queue.read().front() {
|
||||
agent_data.clone()
|
||||
} else {
|
||||
return;
|
||||
};
|
||||
|
||||
let download_agent = self
|
||||
.download_agent_registry
|
||||
@ -236,8 +228,22 @@ impl DownloadManagerBuilder {
|
||||
.unwrap()
|
||||
.clone();
|
||||
|
||||
let status = download_agent.status();
|
||||
|
||||
// This download is already going
|
||||
if status != DownloadStatus::Queued {
|
||||
return;
|
||||
}
|
||||
|
||||
// Ensure all others are marked as queued
|
||||
for agent in self.download_agent_registry.values() {
|
||||
if agent.metadata() != agent_data && agent.status() != DownloadStatus::Queued {
|
||||
agent.on_queued(&self.app_handle);
|
||||
}
|
||||
}
|
||||
|
||||
info!("starting download for {agent_data:?}");
|
||||
self.active_control_flag = Some(download_agent.control_flag());
|
||||
self.current_download_agent = Some(download_agent.clone());
|
||||
|
||||
let sender = self.sender.clone();
|
||||
|
||||
@ -310,8 +316,8 @@ impl DownloadManagerBuilder {
|
||||
}
|
||||
fn manage_completed_signal(&mut self, meta: DownloadableMetadata) {
|
||||
debug!("got signal Completed");
|
||||
if let Some(interface) = &self.current_download_agent
|
||||
&& interface.metadata() == meta
|
||||
if let Some(interface) = self.download_queue.read().front()
|
||||
&& interface == &meta
|
||||
{
|
||||
self.remove_and_cleanup_front_download(&meta);
|
||||
}
|
||||
@ -321,11 +327,13 @@ impl DownloadManagerBuilder {
|
||||
}
|
||||
fn manage_error_signal(&mut self, error: ApplicationDownloadError) {
|
||||
debug!("got signal Error");
|
||||
if let Some(current_agent) = self.current_download_agent.clone() {
|
||||
if let Some(metadata) = self.download_queue.read().front()
|
||||
&& let Some(current_agent) = self.download_agent_registry.get(metadata)
|
||||
{
|
||||
current_agent.on_error(&self.app_handle, &error);
|
||||
|
||||
self.stop_and_wait_current_download();
|
||||
self.remove_and_cleanup_front_download(¤t_agent.metadata());
|
||||
self.remove_and_cleanup_front_download(metadata);
|
||||
}
|
||||
self.push_ui_queue_update();
|
||||
self.set_status(DownloadManagerStatus::Error);
|
||||
@ -333,32 +341,23 @@ impl DownloadManagerBuilder {
|
||||
fn manage_cancel_signal(&mut self, meta: &DownloadableMetadata) {
|
||||
debug!("got signal Cancel");
|
||||
|
||||
if let Some(current_download) = &self.current_download_agent {
|
||||
if ¤t_download.metadata() == meta {
|
||||
self.set_status(DownloadManagerStatus::Paused);
|
||||
current_download.on_cancelled(&self.app_handle);
|
||||
self.stop_and_wait_current_download();
|
||||
// If the current download is the one we're tryna cancel
|
||||
if let Some(current_metadata) = self.download_queue.read().front()
|
||||
&& current_metadata == meta
|
||||
&& let Some(current_download) = self.download_agent_registry.get(current_metadata)
|
||||
{
|
||||
self.set_status(DownloadManagerStatus::Paused);
|
||||
current_download.on_cancelled(&self.app_handle);
|
||||
self.stop_and_wait_current_download();
|
||||
|
||||
self.download_queue.pop_front();
|
||||
self.download_queue.pop_front();
|
||||
|
||||
self.cleanup_current_download();
|
||||
debug!("current download queue: {:?}", self.download_queue.read());
|
||||
}
|
||||
// TODO: Collapse these two into a single if statement somehow
|
||||
else if let Some(download_agent) = self.download_agent_registry.get(meta) {
|
||||
let index = self.download_queue.get_by_meta(meta);
|
||||
if let Some(index) = index {
|
||||
download_agent.on_cancelled(&self.app_handle);
|
||||
let _ = self.download_queue.edit().remove(index).unwrap();
|
||||
let removed = self.download_agent_registry.remove(meta);
|
||||
debug!(
|
||||
"removed {:?} from queue {:?}",
|
||||
removed.map(|x| x.metadata()),
|
||||
self.download_queue.read()
|
||||
);
|
||||
}
|
||||
}
|
||||
} else if let Some(download_agent) = self.download_agent_registry.get(meta) {
|
||||
self.cleanup_current_download();
|
||||
self.download_agent_registry.remove(meta);
|
||||
debug!("current download queue: {:?}", self.download_queue.read());
|
||||
}
|
||||
// else just cancel it
|
||||
else if let Some(download_agent) = self.download_agent_registry.get(meta) {
|
||||
let index = self.download_queue.get_by_meta(meta);
|
||||
if let Some(index) = index {
|
||||
download_agent.on_cancelled(&self.app_handle);
|
||||
@ -371,6 +370,7 @@ impl DownloadManagerBuilder {
|
||||
);
|
||||
}
|
||||
}
|
||||
self.sender.send(DownloadManagerSignal::Go).unwrap();
|
||||
self.push_ui_queue_update();
|
||||
}
|
||||
fn push_ui_stats_update(&self, kbs: usize, time: usize) {
|
||||
|
||||
@ -62,7 +62,7 @@ impl Serialize for DownloadManagerStatus {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone, Debug)]
|
||||
#[derive(Serialize, Clone, Debug, PartialEq)]
|
||||
pub enum DownloadStatus {
|
||||
Queued,
|
||||
Downloading,
|
||||
|
||||
@ -12,6 +12,12 @@ use super::{
|
||||
util::{download_thread_control_flag::DownloadThreadControl, progress_object::ProgressObject},
|
||||
};
|
||||
|
||||
/**
|
||||
* Downloadables are responsible for managing their specific object's download state
|
||||
* e.g, the GameDownloadAgent is responsible for pushing game updates
|
||||
*
|
||||
* But the download manager manages the queue state
|
||||
*/
|
||||
pub trait Downloadable: Send + Sync {
|
||||
fn download(&self, app_handle: &AppHandle) -> Result<bool, ApplicationDownloadError>;
|
||||
fn validate(&self, app_handle: &AppHandle) -> Result<bool, ApplicationDownloadError>;
|
||||
@ -20,7 +26,7 @@ pub trait Downloadable: Send + Sync {
|
||||
fn control_flag(&self) -> DownloadThreadControl;
|
||||
fn status(&self) -> DownloadStatus;
|
||||
fn metadata(&self) -> DownloadableMetadata;
|
||||
fn on_initialised(&self, app_handle: &AppHandle);
|
||||
fn on_queued(&self, app_handle: &AppHandle);
|
||||
fn on_error(&self, app_handle: &AppHandle, error: &ApplicationDownloadError);
|
||||
fn on_complete(&self, app_handle: &AppHandle);
|
||||
fn on_cancelled(&self, app_handle: &AppHandle);
|
||||
|
||||
@ -23,7 +23,7 @@ pub struct ProgressObject {
|
||||
//last_update: Arc<RwLock<Instant>>,
|
||||
last_update_time: Arc<AtomicInstant>,
|
||||
bytes_last_update: Arc<AtomicUsize>,
|
||||
rolling: RollingProgressWindow<1>,
|
||||
rolling: RollingProgressWindow<1000>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
@ -120,7 +120,7 @@ pub fn calculate_update(progress: &ProgressObject) {
|
||||
let last_update_time = progress
|
||||
.last_update_time
|
||||
.swap(Instant::now(), Ordering::SeqCst);
|
||||
let time_since_last_update = Instant::now().duration_since(last_update_time).as_millis();
|
||||
let time_since_last_update = Instant::now().duration_since(last_update_time).as_millis_f64();
|
||||
|
||||
let current_bytes_downloaded = progress.sum();
|
||||
let max = progress.get_max();
|
||||
@ -128,17 +128,17 @@ pub fn calculate_update(progress: &ProgressObject) {
|
||||
.bytes_last_update
|
||||
.swap(current_bytes_downloaded, Ordering::Acquire);
|
||||
|
||||
let bytes_since_last_update = current_bytes_downloaded.saturating_sub(bytes_at_last_update);
|
||||
let bytes_since_last_update = current_bytes_downloaded.saturating_sub(bytes_at_last_update) as f64;
|
||||
|
||||
let kilobytes_per_second = bytes_since_last_update / (time_since_last_update as usize).max(1);
|
||||
let kilobytes_per_second = bytes_since_last_update / time_since_last_update;
|
||||
|
||||
let bytes_remaining = max.saturating_sub(current_bytes_downloaded); // bytes
|
||||
|
||||
progress.update_window(kilobytes_per_second);
|
||||
progress.update_window(kilobytes_per_second as usize);
|
||||
push_update(progress, bytes_remaining);
|
||||
}
|
||||
|
||||
#[throttle(1, Duration::from_millis(500))]
|
||||
#[throttle(1, Duration::from_millis(250))]
|
||||
pub fn push_update(progress: &ProgressObject, bytes_remaining: usize) {
|
||||
let average_speed = progress.rolling.get_average();
|
||||
let time_remaining = (bytes_remaining / 1000) / average_speed.max(1);
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
use std::sync::{
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
Arc,
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
};
|
||||
|
||||
#[derive(Clone)]
|
||||
@ -22,17 +22,22 @@ impl<const S: usize> RollingProgressWindow<S> {
|
||||
}
|
||||
pub fn get_average(&self) -> usize {
|
||||
let current = self.current.load(Ordering::SeqCst);
|
||||
self.window
|
||||
let valid = self
|
||||
.window
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(i, _)| i < ¤t)
|
||||
.map(|(_, x)| x.load(Ordering::Acquire))
|
||||
.sum::<usize>()
|
||||
/ S
|
||||
.collect::<Vec<usize>>();
|
||||
let amount = valid.len();
|
||||
let sum = valid.into_iter().sum::<usize>();
|
||||
|
||||
sum / amount
|
||||
}
|
||||
pub fn reset(&self) {
|
||||
self.window
|
||||
.iter()
|
||||
.for_each(|x| x.store(0, Ordering::Release));
|
||||
self.current.store(0, Ordering::Release);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,10 +1,11 @@
|
||||
use bitcode::{Decode, Encode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::games::library::Game;
|
||||
|
||||
pub type Collections = Vec<Collection>;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default, Encode, Decode)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Collection {
|
||||
id: String,
|
||||
@ -14,7 +15,7 @@ pub struct Collection {
|
||||
entries: Vec<CollectionObject>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default, Encode, Decode)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CollectionObject {
|
||||
collection_id: String,
|
||||
|
||||
@ -4,6 +4,7 @@ use crate::{
|
||||
error::remote_access_error::RemoteAccessError,
|
||||
remote::{
|
||||
auth::generate_authorization_header,
|
||||
cache::{cache_object, get_cached_object},
|
||||
requests::{generate_url, make_authenticated_get},
|
||||
utils::DROP_CLIENT_ASYNC,
|
||||
},
|
||||
@ -12,11 +13,23 @@ use crate::{
|
||||
use super::collection::{Collection, Collections};
|
||||
|
||||
#[tauri::command]
|
||||
pub async fn fetch_collections() -> Result<Collections, RemoteAccessError> {
|
||||
pub async fn fetch_collections(
|
||||
hard_refresh: Option<bool>,
|
||||
) -> Result<Collections, RemoteAccessError> {
|
||||
let do_hard_refresh = hard_refresh.unwrap_or(false);
|
||||
if !do_hard_refresh && let Ok(cached_response) = get_cached_object::<Collections>("collections")
|
||||
{
|
||||
return Ok(cached_response);
|
||||
}
|
||||
|
||||
let response =
|
||||
make_authenticated_get(generate_url(&["/api/v1/client/collection"], &[])?).await?;
|
||||
|
||||
Ok(response.json().await?)
|
||||
let collections: Collections = response.json().await?;
|
||||
|
||||
cache_object("collections", &collections)?;
|
||||
|
||||
Ok(collections)
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
@ -90,7 +103,8 @@ pub async fn delete_game_in_collection(
|
||||
.delete(url)
|
||||
.header("Authorization", generate_authorization_header())
|
||||
.json(&json!({"id": game_id}))
|
||||
.send().await?;
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -27,12 +27,14 @@ use super::{
|
||||
#[tauri::command]
|
||||
pub async fn fetch_library(
|
||||
state: tauri::State<'_, Mutex<AppState<'_>>>,
|
||||
hard_refresh: Option<bool>,
|
||||
) -> Result<Vec<Game>, RemoteAccessError> {
|
||||
offline!(
|
||||
state,
|
||||
fetch_library_logic,
|
||||
fetch_library_logic_offline,
|
||||
state
|
||||
state,
|
||||
hard_refresh
|
||||
).await
|
||||
}
|
||||
|
||||
|
||||
@ -23,7 +23,7 @@ use crate::remote::utils::{DROP_CLIENT_ASYNC, DROP_CLIENT_SYNC};
|
||||
use log::{debug, error, info, warn};
|
||||
use rayon::ThreadPoolBuilder;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::fs::{create_dir_all, OpenOptions};
|
||||
use std::fs::{OpenOptions, create_dir_all};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::mpsc::Sender;
|
||||
use std::sync::{Arc, Mutex};
|
||||
@ -39,6 +39,7 @@ use super::drop_data::DropData;
|
||||
static RETRY_COUNT: usize = 3;
|
||||
|
||||
const TARGET_BUCKET_SIZE: usize = 63 * 1000 * 1000;
|
||||
const MAX_FILES_PER_BUCKET: usize = (1024 / 4) - 1;
|
||||
|
||||
pub struct GameDownloadAgent {
|
||||
pub id: String,
|
||||
@ -83,6 +84,8 @@ impl GameDownloadAgent {
|
||||
let stored_manifest =
|
||||
DropData::generate(id.clone(), version.clone(), data_base_dir_path.clone());
|
||||
|
||||
let context_lock = stored_manifest.contexts.lock().unwrap().clone();
|
||||
|
||||
let result = Self {
|
||||
id,
|
||||
version,
|
||||
@ -105,7 +108,14 @@ impl GameDownloadAgent {
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.values()
|
||||
.map(|e| e.lengths.iter().sum::<usize>())
|
||||
.map(|e| {
|
||||
e.lengths
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(i, _)| *context_lock.get(&e.checksums[*i]).unwrap_or(&false))
|
||||
.map(|(_, v)| v)
|
||||
.sum::<usize>()
|
||||
})
|
||||
.sum::<usize>() as u64;
|
||||
|
||||
let available_space = get_disk_available(data_base_dir_path)? as u64;
|
||||
@ -298,7 +308,8 @@ impl GameDownloadAgent {
|
||||
drops: vec![],
|
||||
});
|
||||
|
||||
if *current_bucket_size + length >= TARGET_BUCKET_SIZE
|
||||
if (*current_bucket_size + length >= TARGET_BUCKET_SIZE
|
||||
|| current_bucket.drops.len() >= MAX_FILES_PER_BUCKET)
|
||||
&& !current_bucket.drops.is_empty()
|
||||
{
|
||||
// Move current bucket into list and make a new one
|
||||
@ -367,7 +378,8 @@ impl GameDownloadAgent {
|
||||
.iter()
|
||||
.map(|e| &e.version)
|
||||
.collect::<HashSet<_>>()
|
||||
.into_iter().cloned()
|
||||
.into_iter()
|
||||
.cloned()
|
||||
.collect::<Vec<String>>();
|
||||
|
||||
info!("downloading across these versions: {versions:?}");
|
||||
@ -460,6 +472,7 @@ impl GameDownloadAgent {
|
||||
ApplicationDownloadError::Communication(_)
|
||||
| ApplicationDownloadError::Checksum
|
||||
| ApplicationDownloadError::Lock
|
||||
| ApplicationDownloadError::IoError(_)
|
||||
);
|
||||
|
||||
if i == RETRY_COUNT - 1 || !retry {
|
||||
@ -626,8 +639,17 @@ impl Downloadable for GameDownloadAgent {
|
||||
}
|
||||
}
|
||||
|
||||
fn on_initialised(&self, _app_handle: &tauri::AppHandle) {
|
||||
fn on_queued(&self, app_handle: &tauri::AppHandle) {
|
||||
*self.status.lock().unwrap() = DownloadStatus::Queued;
|
||||
let mut db_lock = borrow_db_mut_checked();
|
||||
let status = ApplicationTransientStatus::Queued {
|
||||
version_name: self.version.clone(),
|
||||
};
|
||||
db_lock
|
||||
.applications
|
||||
.transient_statuses
|
||||
.insert(self.metadata(), status.clone());
|
||||
push_game_update(app_handle, &self.id, None, (None, Some(status)));
|
||||
}
|
||||
|
||||
fn on_error(&self, app_handle: &tauri::AppHandle, error: &ApplicationDownloadError) {
|
||||
@ -636,7 +658,7 @@ impl Downloadable for GameDownloadAgent {
|
||||
.emit("download_error", error.to_string())
|
||||
.unwrap();
|
||||
|
||||
error!("error while managing download: {error}");
|
||||
error!("error while managing download: {error:?}");
|
||||
|
||||
let mut handle = borrow_db_mut_checked();
|
||||
handle
|
||||
@ -662,15 +684,8 @@ impl Downloadable for GameDownloadAgent {
|
||||
}
|
||||
|
||||
fn on_cancelled(&self, app_handle: &tauri::AppHandle) {
|
||||
info!("cancelled {}", self.id);
|
||||
self.cancel(app_handle);
|
||||
/*
|
||||
on_game_incomplete(
|
||||
&self.metadata(),
|
||||
self.dropdata.base_path.to_string_lossy().to_string(),
|
||||
app_handle,
|
||||
)
|
||||
.unwrap();
|
||||
*/
|
||||
}
|
||||
|
||||
fn status(&self) -> DownloadStatus {
|
||||
|
||||
@ -9,7 +9,7 @@ use crate::games::downloads::manifest::{ChunkBody, DownloadBucket, DownloadConte
|
||||
use crate::remote::auth::generate_authorization_header;
|
||||
use crate::remote::requests::generate_url;
|
||||
use crate::remote::utils::DROP_CLIENT_SYNC;
|
||||
use log::{info, warn};
|
||||
use log::{debug, info, warn};
|
||||
use md5::{Context, Digest};
|
||||
use reqwest::blocking::Response;
|
||||
|
||||
@ -18,6 +18,7 @@ use std::io::Read;
|
||||
#[cfg(unix)]
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
use std::{
|
||||
fs::{File, OpenOptions},
|
||||
io::{self, BufWriter, Seek, SeekFrom, Write},
|
||||
@ -25,6 +26,7 @@ use std::{
|
||||
};
|
||||
|
||||
static MAX_PACKET_LENGTH: usize = 4096 * 4;
|
||||
static BUMP_SIZE: usize = 4096 * 16;
|
||||
|
||||
pub struct DropWriter<W: Write> {
|
||||
hasher: Context,
|
||||
@ -79,6 +81,8 @@ pub struct DropDownloadPipeline<'a, R: Read, W: Write> {
|
||||
pub drops: Vec<DownloadDrop>,
|
||||
pub destination: Vec<DropWriter<W>>,
|
||||
pub control_flag: &'a DownloadThreadControl,
|
||||
#[allow(dead_code)]
|
||||
progress: ProgressHandle,
|
||||
}
|
||||
|
||||
impl<'a> DropDownloadPipeline<'a, Response, File> {
|
||||
@ -96,6 +100,7 @@ impl<'a> DropDownloadPipeline<'a, Response, File> {
|
||||
.try_collect()?,
|
||||
drops,
|
||||
control_flag,
|
||||
progress,
|
||||
})
|
||||
}
|
||||
|
||||
@ -111,13 +116,24 @@ impl<'a> DropDownloadPipeline<'a, Response, File> {
|
||||
if drop.start != 0 {
|
||||
destination.seek(SeekFrom::Start(drop.start.try_into().unwrap()))?;
|
||||
}
|
||||
let mut last_bump = 0;
|
||||
loop {
|
||||
let size = MAX_PACKET_LENGTH.min(remaining);
|
||||
self.source.read_exact(&mut copy_buffer[0..size])?;
|
||||
let size = self.source.read(&mut copy_buffer[0..size]).inspect_err(|_| {
|
||||
info!("got error from {}", drop.filename);
|
||||
})?;
|
||||
remaining -= size;
|
||||
last_bump += size;
|
||||
|
||||
destination.write_all(©_buffer[0..size])?;
|
||||
|
||||
if last_bump > BUMP_SIZE {
|
||||
last_bump -= BUMP_SIZE;
|
||||
if self.control_flag.get() == DownloadThreadControlFlag::Stop {
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
|
||||
if remaining == 0 {
|
||||
break;
|
||||
};
|
||||
@ -131,6 +147,13 @@ impl<'a> DropDownloadPipeline<'a, Response, File> {
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn debug_skip_checksum(self) {
|
||||
self.destination
|
||||
.into_iter()
|
||||
.for_each(|mut e| e.flush().unwrap());
|
||||
}
|
||||
|
||||
fn finish(self) -> Result<Vec<Digest>, io::Error> {
|
||||
let checksums = self
|
||||
.destination
|
||||
@ -153,6 +176,8 @@ pub fn download_game_bucket(
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
let header = generate_authorization_header();
|
||||
|
||||
let url = generate_url(&["/api/v2/client/chunk"], &[])
|
||||
@ -195,9 +220,7 @@ pub fn download_game_bucket(
|
||||
for (i, raw_length) in lengths.split(",").enumerate() {
|
||||
let length = raw_length.parse::<usize>().unwrap_or(0);
|
||||
let Some(drop) = bucket.drops.get(i) else {
|
||||
warn!(
|
||||
"invalid number of Content-Lengths recieved: {i}, {lengths}"
|
||||
);
|
||||
warn!("invalid number of Content-Lengths recieved: {i}, {lengths}");
|
||||
return Err(ApplicationDownloadError::DownloadError);
|
||||
};
|
||||
if drop.length != length {
|
||||
@ -209,6 +232,10 @@ pub fn download_game_bucket(
|
||||
}
|
||||
}
|
||||
|
||||
let timestep = start.elapsed().as_millis();
|
||||
|
||||
debug!("took {}ms to start downloading", timestep);
|
||||
|
||||
let mut pipeline =
|
||||
DropDownloadPipeline::new(response, bucket.drops.clone(), control_flag, progress)
|
||||
.map_err(|e| ApplicationDownloadError::IoError(Arc::new(e)))?;
|
||||
|
||||
@ -80,7 +80,13 @@ pub struct StatsUpdateEvent {
|
||||
|
||||
pub async fn fetch_library_logic(
|
||||
state: tauri::State<'_, Mutex<AppState<'_>>>,
|
||||
hard_fresh: Option<bool>,
|
||||
) -> Result<Vec<Game>, RemoteAccessError> {
|
||||
let do_hard_refresh = hard_fresh.unwrap_or(false);
|
||||
if !do_hard_refresh && let Ok(library) = get_cached_object("library") {
|
||||
return Ok(library);
|
||||
}
|
||||
|
||||
let client = DROP_CLIENT_ASYNC.clone();
|
||||
let response = generate_url(&["/api/v1/client/user/library"], &[])?;
|
||||
let response = client
|
||||
@ -142,6 +148,7 @@ pub async fn fetch_library_logic(
|
||||
}
|
||||
pub async fn fetch_library_logic_offline(
|
||||
_state: tauri::State<'_, Mutex<AppState<'_>>>,
|
||||
_hard_refresh: Option<bool>,
|
||||
) -> Result<Vec<Game>, RemoteAccessError> {
|
||||
let mut games: Vec<Game> = get_cached_object("library")?;
|
||||
|
||||
@ -521,9 +528,10 @@ pub fn push_game_update(
|
||||
) {
|
||||
if let Some(GameDownloadStatus::Installed { .. } | GameDownloadStatus::SetupRequired { .. }) =
|
||||
&status.0
|
||||
&& version.is_none() {
|
||||
panic!("pushed game for installed game that doesn't have version information");
|
||||
}
|
||||
&& version.is_none()
|
||||
{
|
||||
panic!("pushed game for installed game that doesn't have version information");
|
||||
}
|
||||
|
||||
app_handle
|
||||
.emit(
|
||||
|
||||
@ -1,4 +1,6 @@
|
||||
use crate::database::models::data::{ApplicationTransientStatus, Database, GameDownloadStatus};
|
||||
use crate::database::models::data::{
|
||||
ApplicationTransientStatus, Database, DownloadType, DownloadableMetadata, GameDownloadStatus,
|
||||
};
|
||||
|
||||
pub type GameStatusWithTransient = (
|
||||
Option<GameDownloadStatus>,
|
||||
@ -8,10 +10,16 @@ pub struct GameStatusManager {}
|
||||
|
||||
impl GameStatusManager {
|
||||
pub fn fetch_state(game_id: &String, database: &Database) -> GameStatusWithTransient {
|
||||
let online_state = match database.applications.installed_game_version.get(game_id) {
|
||||
Some(meta) => database.applications.transient_statuses.get(meta).cloned(),
|
||||
None => None,
|
||||
};
|
||||
let online_state = database
|
||||
.applications
|
||||
.transient_statuses
|
||||
.get(&DownloadableMetadata {
|
||||
id: game_id.to_string(),
|
||||
download_type: DownloadType::Game,
|
||||
version: None,
|
||||
})
|
||||
.cloned();
|
||||
|
||||
let offline_state = database.applications.game_statuses.get(game_id).cloned();
|
||||
|
||||
if online_state.is_some() {
|
||||
|
||||
@ -12,7 +12,7 @@ use crate::{
|
||||
database::{
|
||||
db::{borrow_db_checked, borrow_db_mut_checked},
|
||||
models::data::DatabaseAuth,
|
||||
}, error::{drop_server_error::DropServerError, remote_access_error::RemoteAccessError}, remote::{requests::make_authenticated_get, utils::{DROP_CLIENT_ASYNC, DROP_CLIENT_SYNC}}, AppState, AppStatus, User
|
||||
}, error::{drop_server_error::DropServerError, remote_access_error::RemoteAccessError}, remote::{cache::clear_cached_object, requests::make_authenticated_get, utils::{DROP_CLIENT_ASYNC, DROP_CLIENT_SYNC}}, AppState, AppStatus, User
|
||||
};
|
||||
|
||||
use super::{
|
||||
@ -159,6 +159,9 @@ pub async fn recieve_handshake(app: AppHandle, path: String) {
|
||||
state_lock.status = app_status;
|
||||
state_lock.user = user;
|
||||
|
||||
let _ = clear_cached_object("collections");
|
||||
let _ = clear_cached_object("library");
|
||||
|
||||
drop(state_lock);
|
||||
|
||||
app.emit("auth/finished", ()).unwrap();
|
||||
|
||||
@ -50,6 +50,12 @@ fn read_sync(base: &Path, key: &str) -> io::Result<Vec<u8>> {
|
||||
Ok(file)
|
||||
}
|
||||
|
||||
fn delete_sync(base: &Path, key: &str) -> io::Result<()> {
|
||||
let cache_path = get_cache_path(base, key);
|
||||
std::fs::remove_file(cache_path)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn cache_object<D: Encode>(key: &str, data: &D) -> Result<(), RemoteAccessError> {
|
||||
cache_object_db(key, data, &borrow_db_checked())
|
||||
}
|
||||
@ -73,6 +79,17 @@ pub fn get_cached_object_db<D: DecodeOwned>(
|
||||
bitcode::decode::<D>(&bytes).map_err(|e| RemoteAccessError::Cache(io::Error::other(e)))?;
|
||||
Ok(data)
|
||||
}
|
||||
pub fn clear_cached_object(key: &str) -> Result<(), RemoteAccessError> {
|
||||
clear_cached_object_db(key, &borrow_db_checked())
|
||||
}
|
||||
pub fn clear_cached_object_db(
|
||||
key: &str,
|
||||
db: &Database,
|
||||
) -> Result<(), RemoteAccessError> {
|
||||
delete_sync(&db.cache_dir, key).map_err(RemoteAccessError::Cache)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Encode, Decode)]
|
||||
pub struct ObjectCache {
|
||||
content_type: String,
|
||||
|
||||
Reference in New Issue
Block a user