fix: manifest sizing for slow backends

This commit is contained in:
DecDuck
2025-08-15 16:49:18 +10:00
parent 913dc2f58d
commit 4fb9bb7563
4 changed files with 81 additions and 17 deletions

22
__test__/debug.spec.mjs Normal file
View File

@ -0,0 +1,22 @@
import test from "ava";
import { DropletHandler, generateManifest } from "../index.js";
test.skip("debug", async (t) => {
const handler = new DropletHandler();
console.log("created handler");
const manifest = JSON.parse(
await new Promise((r, e) =>
generateManifest(
handler,
"./assets/TheGame.zip",
(_, __) => {},
(_, __) => {},
(err, manifest) => (err ? e(err) : r(manifest))
)
)
);
return t.pass();
});

View File

@ -102,7 +102,7 @@ test("read file offset", async (t) => {
fs.rmSync(dirName, { recursive: true }); fs.rmSync(dirName, { recursive: true });
}); });
test("zip speed test", async (t) => { test.skip("zip speed test", async (t) => {
t.timeout(100_000_000); t.timeout(100_000_000);
const dropletHandler = new DropletHandler(); const dropletHandler = new DropletHandler();
@ -135,7 +135,31 @@ test("zip speed test", async (t) => {
const roughAverage = totalRead / totalSeconds; const roughAverage = totalRead / totalSeconds;
console.log(`total rough average: ${prettyBytes(roughAverage)}/s`) console.log(`total rough average: ${prettyBytes(roughAverage)}/s`);
t.pass();
});
test("zip manifest test", async (t) => {
const dropletHandler = new DropletHandler();
const manifest = JSON.parse(
await new Promise((r, e) =>
generateManifest(
dropletHandler,
"./assets/TheGame.zip",
(_, __) => {},
(_, __) => {},
(err, manifest) => (err ? e(err) : r(manifest))
)
)
);
const file = manifest[Object.keys(manifest).at(0)];
const amount = file.ids.length;
if(amount > 20) {
return t.fail(`Zip manifest has ${amount} chunks, more than 20`);
}
t.pass(); t.pass();
}); });

View File

@ -1,6 +1,6 @@
{ {
"name": "@drop-oss/droplet", "name": "@drop-oss/droplet",
"version": "2.0.2", "version": "2.1.0",
"main": "index.js", "main": "index.js",
"types": "index.d.ts", "types": "index.d.ts",
"napi": { "napi": {

View File

@ -1,7 +1,6 @@
use std::{ use std::{
collections::HashMap, collections::HashMap,
io::{BufRead, BufReader}, io::{BufRead, BufReader},
path::Path,
sync::Arc, sync::Arc,
thread, thread,
}; };
@ -42,11 +41,11 @@ pub fn generate_manifest<'a>(
log_sfn: ThreadsafeFunction<String>, log_sfn: ThreadsafeFunction<String>,
callback_sfn: ThreadsafeFunction<String>, callback_sfn: ThreadsafeFunction<String>,
) -> Result<()> { ) -> Result<()> {
let backend: &mut Box<dyn VersionBackend + Send> = let backend: &mut Box<dyn VersionBackend + Send> = droplet_handler
droplet_handler.create_backend_for_path(dir).ok_or(napi::Error::from_reason("Could not create backend for path."))?; .create_backend_for_path(dir)
let backend: &'static mut Box<dyn VersionBackend + Send> = .ok_or(napi::Error::from_reason(
unsafe { std::mem::transmute(backend) }; "Could not create backend for path.",
thread::spawn(move || { ))?;
let files = backend.list_files(); let files = backend.list_files();
// Filepath to chunk data // Filepath to chunk data
@ -56,8 +55,8 @@ pub fn generate_manifest<'a>(
let mut i: i32 = 0; let mut i: i32 = 0;
for version_file in files { for version_file in files {
let raw_reader = backend.reader(&version_file).unwrap(); let reader = backend.reader(&version_file).unwrap();
let mut reader = BufReader::with_capacity(CHUNK_SIZE, raw_reader); let mut reader = BufReader::with_capacity(8128, reader);
let mut chunk_data = ChunkData { let mut chunk_data = ChunkData {
permissions: version_file.permission, permissions: version_file.permission,
@ -68,12 +67,28 @@ pub fn generate_manifest<'a>(
let mut chunk_index = 0; let mut chunk_index = 0;
loop { loop {
let mut length = 0;
let mut buffer: Vec<u8> = Vec::new(); let mut buffer: Vec<u8> = Vec::new();
reader.fill_buf().unwrap().clone_into(&mut buffer); let mut file_empty = false;
let length = buffer.len();
if length == 0 { loop {
break; let read_buf = reader.fill_buf().unwrap();
let buf_length = read_buf.len();
length += buf_length;
if length >= CHUNK_SIZE {
break;
}
// If we're out of data, add this chunk and then move onto the next file
if buf_length == 0 {
file_empty = true;
break;
}
buffer.extend_from_slice(read_buf);
reader.consume(length);
} }
let chunk_id = Uuid::new_v4(); let chunk_id = Uuid::new_v4();
@ -88,10 +103,14 @@ pub fn generate_manifest<'a>(
"Processed chunk {} for {}", "Processed chunk {} for {}",
chunk_index, &version_file.relative_filename chunk_index, &version_file.relative_filename
); );
log_sfn.call(Ok(log_str), ThreadsafeFunctionCallMode::Blocking); log_sfn.call(Ok(log_str), ThreadsafeFunctionCallMode::Blocking);
reader.consume(length);
chunk_index += 1; chunk_index += 1;
if file_empty {
break;
}
} }
chunks.insert(version_file.relative_filename, chunk_data); chunks.insert(version_file.relative_filename, chunk_data);
@ -105,7 +124,6 @@ pub fn generate_manifest<'a>(
Ok(json!(chunks).to_string()), Ok(json!(chunks).to_string()),
ThreadsafeFunctionCallMode::Blocking, ThreadsafeFunctionCallMode::Blocking,
); );
});
Ok(()) Ok(())
} }