mirror of
https://github.com/Drop-OSS/droplet.git
synced 2025-11-09 20:12:18 +10:00
feat: no panik
This commit is contained in:
8
Cargo.lock
generated
8
Cargo.lock
generated
@ -37,6 +37,12 @@ version = "0.2.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923"
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.99"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b0674a1ddeecb70197781e945de4b3b8ffb61fa939a5597bcf48503737663100"
|
||||
|
||||
[[package]]
|
||||
name = "arrayvec"
|
||||
version = "0.7.6"
|
||||
@ -477,6 +483,7 @@ dependencies = [
|
||||
name = "droplet"
|
||||
version = "0.7.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"boa_engine",
|
||||
"dyn-clone",
|
||||
"flate2",
|
||||
@ -925,6 +932,7 @@ version = "3.0.0-beta.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ca1763658b41abbdf10caaa63b74e58f4ec62d52b889a558e0af6f3638cc9426"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bitflags",
|
||||
"ctor",
|
||||
"futures-core",
|
||||
|
||||
@ -9,11 +9,7 @@ crate-type = ["cdylib"]
|
||||
|
||||
[dependencies]
|
||||
# Default enable napi4 feature, see https://nodejs.org/api/n-api.html#node-api-version-matrix
|
||||
napi = { version = "3.0.0-beta.11", default-features = false, features = [
|
||||
"napi6",
|
||||
"async",
|
||||
"web_stream",
|
||||
] }
|
||||
napi = { version = "3.0.0-beta.11", default-features = false, features = ["napi6", "async", "web_stream", "error_anyhow"] }
|
||||
napi-derive = "3.0.0-beta.11"
|
||||
hex = "0.4.3"
|
||||
md5 = "0.7.0"
|
||||
@ -30,6 +26,7 @@ rhai = "1.22.2"
|
||||
mlua = { version = "0.11.2", features = ["luajit"] }
|
||||
boa_engine = "0.20.0"
|
||||
serde_json = "1.0.143"
|
||||
anyhow = "1.0.99"
|
||||
|
||||
[package.metadata.patch]
|
||||
crates = ["rawzip"]
|
||||
|
||||
10
src/lib.rs
10
src/lib.rs
@ -1,10 +1,14 @@
|
||||
#![deny(clippy::all)]
|
||||
#![deny(clippy::unwrap_used)]
|
||||
#![deny(clippy::expect_used)]
|
||||
#![deny(clippy::panic)]
|
||||
#![feature(trait_alias)]
|
||||
|
||||
use std::{any, io};
|
||||
|
||||
pub mod manifest;
|
||||
pub mod script;
|
||||
pub mod ssl;
|
||||
pub mod version;
|
||||
pub mod script;
|
||||
|
||||
#[macro_use]
|
||||
extern crate napi_derive;
|
||||
extern crate napi_derive;
|
||||
123
src/manifest.rs
123
src/manifest.rs
@ -35,7 +35,7 @@ pub fn generate_manifest<'a>(
|
||||
progress_sfn: ThreadsafeFunction<i32>,
|
||||
log_sfn: ThreadsafeFunction<String>,
|
||||
callback_sfn: ThreadsafeFunction<String>,
|
||||
) -> Result<()> {
|
||||
) -> anyhow::Result<()> {
|
||||
let backend: &mut Box<dyn VersionBackend + Send> = droplet_handler
|
||||
.create_backend_for_path(dir)
|
||||
.ok_or(napi::Error::from_reason(
|
||||
@ -49,83 +49,94 @@ pub fn generate_manifest<'a>(
|
||||
unsafe { std::mem::transmute(backend) };
|
||||
|
||||
thread::spawn(move || {
|
||||
let files = backend.list_files();
|
||||
let callback_borrow = &callback_sfn;
|
||||
|
||||
// Filepath to chunk data
|
||||
let mut chunks: HashMap<String, ChunkData> = HashMap::new();
|
||||
let mut inner = move || -> Result<()> {
|
||||
let files = backend.list_files()?;
|
||||
|
||||
let total: i32 = files.len() as i32;
|
||||
let mut i: i32 = 0;
|
||||
// Filepath to chunk data
|
||||
let mut chunks: HashMap<String, ChunkData> = HashMap::new();
|
||||
|
||||
let mut buf = [0u8; 1024 * 16];
|
||||
let total: i32 = files.len() as i32;
|
||||
let mut i: i32 = 0;
|
||||
|
||||
for version_file in files {
|
||||
let mut reader = backend.reader(&version_file, 0, 0).unwrap();
|
||||
let mut buf = [0u8; 1024 * 16];
|
||||
|
||||
let mut chunk_data = ChunkData {
|
||||
permissions: version_file.permission,
|
||||
ids: Vec::new(),
|
||||
checksums: Vec::new(),
|
||||
lengths: Vec::new(),
|
||||
};
|
||||
for version_file in files {
|
||||
let mut reader = backend.reader(&version_file, 0, 0)?;
|
||||
|
||||
let mut chunk_index = 0;
|
||||
loop {
|
||||
let mut length = 0;
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
let mut file_empty = false;
|
||||
let mut chunk_data = ChunkData {
|
||||
permissions: version_file.permission,
|
||||
ids: Vec::new(),
|
||||
checksums: Vec::new(),
|
||||
lengths: Vec::new(),
|
||||
};
|
||||
|
||||
let mut chunk_index = 0;
|
||||
loop {
|
||||
let read = reader.read(&mut buf).unwrap();
|
||||
let mut length = 0;
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
let mut file_empty = false;
|
||||
|
||||
length += read;
|
||||
loop {
|
||||
let read = reader.read(&mut buf)?;
|
||||
|
||||
// If we're out of data, add this chunk and then move onto the next file
|
||||
if read == 0 {
|
||||
file_empty = true;
|
||||
break;
|
||||
length += read;
|
||||
|
||||
// If we're out of data, add this chunk and then move onto the next file
|
||||
if read == 0 {
|
||||
file_empty = true;
|
||||
break;
|
||||
}
|
||||
|
||||
buffer.extend_from_slice(&buf[0..read]);
|
||||
|
||||
if length >= CHUNK_SIZE {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
buffer.extend_from_slice(&buf[0..read]);
|
||||
let chunk_id = Uuid::new_v4();
|
||||
let checksum = md5::compute(buffer).0;
|
||||
let checksum_string = hex::encode(checksum);
|
||||
|
||||
if length >= CHUNK_SIZE {
|
||||
chunk_data.ids.push(chunk_id.to_string());
|
||||
chunk_data.checksums.push(checksum_string);
|
||||
chunk_data.lengths.push(length);
|
||||
|
||||
let log_str = format!(
|
||||
"Processed chunk {} for {}",
|
||||
chunk_index, &version_file.relative_filename
|
||||
);
|
||||
|
||||
log_sfn.call(Ok(log_str), ThreadsafeFunctionCallMode::Blocking);
|
||||
|
||||
chunk_index += 1;
|
||||
|
||||
if file_empty {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let chunk_id = Uuid::new_v4();
|
||||
let checksum = md5::compute(buffer).0;
|
||||
let checksum_string = hex::encode(checksum);
|
||||
chunks.insert(version_file.relative_filename, chunk_data);
|
||||
|
||||
chunk_data.ids.push(chunk_id.to_string());
|
||||
chunk_data.checksums.push(checksum_string);
|
||||
chunk_data.lengths.push(length);
|
||||
|
||||
let log_str = format!(
|
||||
"Processed chunk {} for {}",
|
||||
chunk_index, &version_file.relative_filename
|
||||
);
|
||||
|
||||
log_sfn.call(Ok(log_str), ThreadsafeFunctionCallMode::Blocking);
|
||||
|
||||
chunk_index += 1;
|
||||
|
||||
if file_empty {
|
||||
break;
|
||||
}
|
||||
i += 1;
|
||||
let progress = i * 100 / total;
|
||||
progress_sfn.call(Ok(progress), ThreadsafeFunctionCallMode::Blocking);
|
||||
}
|
||||
|
||||
chunks.insert(version_file.relative_filename, chunk_data);
|
||||
callback_borrow.call(
|
||||
Ok(json!(chunks).to_string()),
|
||||
ThreadsafeFunctionCallMode::Blocking,
|
||||
);
|
||||
|
||||
i += 1;
|
||||
let progress = i * 100 / total;
|
||||
progress_sfn.call(Ok(progress), ThreadsafeFunctionCallMode::Blocking);
|
||||
Ok(())
|
||||
};
|
||||
|
||||
let result = inner();
|
||||
if let Err(generate_err) = result {
|
||||
callback_borrow.call(Err(generate_err), ThreadsafeFunctionCallMode::Blocking);
|
||||
}
|
||||
|
||||
callback_sfn.call(
|
||||
Ok(json!(chunks).to_string()),
|
||||
ThreadsafeFunctionCallMode::Blocking,
|
||||
);
|
||||
});
|
||||
|
||||
Ok(())
|
||||
|
||||
59
src/ssl.rs
59
src/ssl.rs
@ -1,3 +1,4 @@
|
||||
use anyhow::anyhow;
|
||||
use napi::Error;
|
||||
use rcgen::{
|
||||
CertificateParams, DistinguishedName, IsCa, KeyPair, KeyUsagePurpose, PublicKeyData,
|
||||
@ -10,7 +11,7 @@ use x509_parser::parse_x509_certificate;
|
||||
use x509_parser::pem::Pem;
|
||||
|
||||
#[napi]
|
||||
pub fn generate_root_ca() -> Result<Vec<String>, Error> {
|
||||
pub fn generate_root_ca() -> anyhow::Result<Vec<String>> {
|
||||
let mut params = CertificateParams::default();
|
||||
|
||||
let mut name = DistinguishedName::new();
|
||||
@ -22,7 +23,7 @@ pub fn generate_root_ca() -> Result<Vec<String>, Error> {
|
||||
params.not_before = OffsetDateTime::now_utc();
|
||||
params.not_after = OffsetDateTime::now_utc()
|
||||
.checked_add(Duration::days(365 * 1000))
|
||||
.unwrap();
|
||||
.ok_or(anyhow!("failed to calculate end date"))?;
|
||||
|
||||
params.is_ca = IsCa::Ca(rcgen::BasicConstraints::Unconstrained);
|
||||
|
||||
@ -32,9 +33,8 @@ pub fn generate_root_ca() -> Result<Vec<String>, Error> {
|
||||
KeyUsagePurpose::DigitalSignature,
|
||||
];
|
||||
|
||||
let key_pair = KeyPair::generate().map_err(|e| napi::Error::from_reason(e.to_string()))?;
|
||||
let certificate = CertificateParams::self_signed(params, &key_pair)
|
||||
.map_err(|e| napi::Error::from_reason(e.to_string()))?;
|
||||
let key_pair = KeyPair::generate()?;
|
||||
let certificate = CertificateParams::self_signed(params, &key_pair)?;
|
||||
|
||||
// Returns certificate, then private key
|
||||
Ok(vec![certificate.pem(), key_pair.serialize_pem()])
|
||||
@ -46,13 +46,10 @@ pub fn generate_client_certificate(
|
||||
_client_name: String,
|
||||
root_ca: String,
|
||||
root_ca_private: String,
|
||||
) -> Result<Vec<String>, Error> {
|
||||
let root_key_pair =
|
||||
KeyPair::from_pem(&root_ca_private).map_err(|e| napi::Error::from_reason(e.to_string()))?;
|
||||
let certificate_params = CertificateParams::from_ca_cert_pem(&root_ca)
|
||||
.map_err(|e| napi::Error::from_reason(e.to_string()))?;
|
||||
let root_ca = CertificateParams::self_signed(certificate_params, &root_key_pair)
|
||||
.map_err(|e| napi::Error::from_reason(e.to_string()))?;
|
||||
) -> anyhow::Result<Vec<String>> {
|
||||
let root_key_pair = KeyPair::from_pem(&root_ca_private)?;
|
||||
let certificate_params = CertificateParams::from_ca_cert_pem(&root_ca)?;
|
||||
let root_ca = CertificateParams::self_signed(certificate_params, &root_key_pair)?;
|
||||
|
||||
let mut params = CertificateParams::default();
|
||||
|
||||
@ -66,28 +63,24 @@ pub fn generate_client_certificate(
|
||||
KeyUsagePurpose::DataEncipherment,
|
||||
];
|
||||
|
||||
let key_pair = KeyPair::generate_for(&rcgen::PKCS_ECDSA_P384_SHA384)
|
||||
.map_err(|e| napi::Error::from_reason(e.to_string()))?;
|
||||
let certificate = CertificateParams::signed_by(params, &key_pair, &root_ca, &root_key_pair)
|
||||
.map_err(|e| napi::Error::from_reason(e.to_string()))?;
|
||||
let key_pair = KeyPair::generate_for(&rcgen::PKCS_ECDSA_P384_SHA384)?;
|
||||
let certificate = CertificateParams::signed_by(params, &key_pair, &root_ca, &root_key_pair)?;
|
||||
|
||||
// Returns certificate, then private key
|
||||
Ok(vec![certificate.pem(), key_pair.serialize_pem()])
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn verify_client_certificate(client_cert: String, root_ca: String) -> Result<bool, Error> {
|
||||
pub fn verify_client_certificate(client_cert: String, root_ca: String) -> anyhow::Result<bool> {
|
||||
let root_ca = Pem::iter_from_buffer(root_ca.as_bytes())
|
||||
.next()
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let root_ca = root_ca.parse_x509().unwrap();
|
||||
.ok_or(anyhow!("no certificates in root ca"))??;
|
||||
let root_ca = root_ca.parse_x509()?;
|
||||
|
||||
let client_cert = Pem::iter_from_buffer(client_cert.as_bytes())
|
||||
.next()
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let client_cert = client_cert.parse_x509().unwrap();
|
||||
.ok_or(anyhow!("No client certs in chain."))??;
|
||||
let client_cert = client_cert.parse_x509()?;
|
||||
|
||||
let valid = root_ca
|
||||
.verify_signature(Some(client_cert.public_key()))
|
||||
@ -97,31 +90,33 @@ pub fn verify_client_certificate(client_cert: String, root_ca: String) -> Result
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn sign_nonce(private_key: String, nonce: String) -> Result<String, Error> {
|
||||
pub fn sign_nonce(private_key: String, nonce: String) -> anyhow::Result<String> {
|
||||
let rng = SystemRandom::new();
|
||||
|
||||
let key_pair = KeyPair::from_pem(&private_key).unwrap();
|
||||
let key_pair = KeyPair::from_pem(&private_key)?;
|
||||
|
||||
let key_pair = EcdsaKeyPair::from_pkcs8(
|
||||
&ring::signature::ECDSA_P384_SHA384_FIXED_SIGNING,
|
||||
&key_pair.serialize_der(),
|
||||
&rng,
|
||||
)
|
||||
.unwrap();
|
||||
.map_err(|e| napi::Error::from_reason(e.to_string()))?;
|
||||
|
||||
let signature = key_pair.sign(&rng, nonce.as_bytes()).unwrap();
|
||||
let signature = key_pair
|
||||
.sign(&rng, nonce.as_bytes())
|
||||
.map_err(|e| napi::Error::from_reason(e.to_string()))?;
|
||||
let hex_signature = hex::encode(signature);
|
||||
|
||||
Ok(hex_signature)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn verify_nonce(public_cert: String, nonce: String, signature: String) -> Result<bool, Error> {
|
||||
let (_, pem) = x509_parser::pem::parse_x509_pem(public_cert.as_bytes()).unwrap();
|
||||
let (_, spki) = parse_x509_certificate(&pem.contents).unwrap();
|
||||
let public_key = SubjectPublicKeyInfo::from_der(spki.public_key().raw).unwrap();
|
||||
pub fn verify_nonce(public_cert: String, nonce: String, signature: String) -> anyhow::Result<bool> {
|
||||
let (_, pem) = x509_parser::pem::parse_x509_pem(public_cert.as_bytes())?;
|
||||
let (_, spki) = parse_x509_certificate(&pem.contents)?;
|
||||
let public_key = SubjectPublicKeyInfo::from_der(spki.public_key().raw)?;
|
||||
|
||||
let raw_signature = hex::decode(signature).unwrap();
|
||||
let raw_signature = hex::decode(signature)?;
|
||||
|
||||
let valid = ring::signature::ECDSA_P384_SHA384_FIXED
|
||||
.verify(
|
||||
|
||||
@ -7,26 +7,29 @@ use std::{
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use anyhow::anyhow;
|
||||
use flate2::read::DeflateDecoder;
|
||||
use rawzip::{
|
||||
CompressionMethod, FileReader, ZipArchive, ZipArchiveEntryWayfinder, ZipEntry,
|
||||
ZipVerifier, RECOMMENDED_BUFFER_SIZE,
|
||||
CompressionMethod, FileReader, ZipArchive, ZipArchiveEntryWayfinder, ZipEntry, ZipVerifier,
|
||||
RECOMMENDED_BUFFER_SIZE,
|
||||
};
|
||||
|
||||
use crate::version::types::{MinimumFileObject, VersionBackend, VersionFile};
|
||||
|
||||
pub fn _list_files(vec: &mut Vec<PathBuf>, path: &Path) {
|
||||
if metadata(path).unwrap().is_dir() {
|
||||
let paths = fs::read_dir(path).unwrap();
|
||||
pub fn _list_files(vec: &mut Vec<PathBuf>, path: &Path) -> napi::Result<()> {
|
||||
if metadata(path)?.is_dir() {
|
||||
let paths = fs::read_dir(path)?;
|
||||
for path_result in paths {
|
||||
let full_path = path_result.unwrap().path();
|
||||
if metadata(&full_path).unwrap().is_dir() {
|
||||
_list_files(vec, &full_path);
|
||||
let full_path = path_result?.path();
|
||||
if metadata(&full_path)?.is_dir() {
|
||||
_list_files(vec, &full_path)?;
|
||||
} else {
|
||||
vec.push(full_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
@ -34,23 +37,26 @@ pub struct PathVersionBackend {
|
||||
pub base_dir: PathBuf,
|
||||
}
|
||||
impl VersionBackend for PathVersionBackend {
|
||||
fn list_files(&mut self) -> Vec<VersionFile> {
|
||||
fn list_files(&mut self) -> anyhow::Result<Vec<VersionFile>> {
|
||||
let mut vec = Vec::new();
|
||||
_list_files(&mut vec, &self.base_dir);
|
||||
_list_files(&mut vec, &self.base_dir)?;
|
||||
|
||||
let mut results = Vec::new();
|
||||
|
||||
for pathbuf in vec.iter() {
|
||||
let relative = pathbuf.strip_prefix(self.base_dir.clone()).unwrap();
|
||||
let relative = pathbuf.strip_prefix(self.base_dir.clone())?;
|
||||
|
||||
results.push(
|
||||
self
|
||||
.peek_file(relative.to_str().unwrap().to_owned())
|
||||
.unwrap(),
|
||||
self.peek_file(
|
||||
relative
|
||||
.to_str()
|
||||
.ok_or(napi::Error::from_reason("Could not parse path"))?
|
||||
.to_owned(),
|
||||
)?,
|
||||
);
|
||||
}
|
||||
|
||||
results
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
fn reader(
|
||||
@ -58,28 +64,28 @@ impl VersionBackend for PathVersionBackend {
|
||||
file: &VersionFile,
|
||||
start: u64,
|
||||
end: u64,
|
||||
) -> Option<Box<dyn MinimumFileObject + 'static>> {
|
||||
let mut file = File::open(self.base_dir.join(file.relative_filename.clone())).ok()?;
|
||||
) -> anyhow::Result<Box<dyn MinimumFileObject + 'static>> {
|
||||
let mut file = File::open(self.base_dir.join(file.relative_filename.clone()))?;
|
||||
|
||||
if start != 0 {
|
||||
file.seek(SeekFrom::Start(start)).ok()?;
|
||||
file.seek(SeekFrom::Start(start))?;
|
||||
}
|
||||
|
||||
if end != 0 {
|
||||
return Some(Box::new(file.take(end - start)));
|
||||
return Ok(Box::new(file.take(end - start)));
|
||||
}
|
||||
|
||||
return Some(Box::new(file));
|
||||
Ok(Box::new(file))
|
||||
}
|
||||
|
||||
fn peek_file(&mut self, sub_path: String) -> Option<VersionFile> {
|
||||
fn peek_file(&mut self, sub_path: String) -> anyhow::Result<VersionFile> {
|
||||
let pathbuf = self.base_dir.join(sub_path.clone());
|
||||
if !pathbuf.exists() {
|
||||
return None;
|
||||
return Err(anyhow!("Path doesn't exist."));
|
||||
};
|
||||
|
||||
let file = File::open(pathbuf.clone()).unwrap();
|
||||
let metadata = file.try_clone().unwrap().metadata().unwrap();
|
||||
let file = File::open(pathbuf.clone())?;
|
||||
let metadata = file.try_clone()?.metadata()?;
|
||||
let permission_object = metadata.permissions();
|
||||
let permissions = {
|
||||
let perm: u32;
|
||||
@ -94,7 +100,7 @@ impl VersionBackend for PathVersionBackend {
|
||||
perm
|
||||
};
|
||||
|
||||
Some(VersionFile {
|
||||
Ok(VersionFile {
|
||||
relative_filename: sub_path,
|
||||
permission: permissions,
|
||||
size: metadata.len(),
|
||||
@ -107,11 +113,11 @@ pub struct ZipVersionBackend {
|
||||
archive: Arc<ZipArchive<FileReader>>,
|
||||
}
|
||||
impl ZipVersionBackend {
|
||||
pub fn new(archive: File) -> Self {
|
||||
let archive = ZipArchive::from_file(archive, &mut [0u8; RECOMMENDED_BUFFER_SIZE]).unwrap();
|
||||
Self {
|
||||
pub fn new(archive: File) -> anyhow::Result<Self> {
|
||||
let archive = ZipArchive::from_file(archive, &mut [0u8; RECOMMENDED_BUFFER_SIZE])?;
|
||||
Ok(Self {
|
||||
archive: Arc::new(archive),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn new_entry<'archive>(
|
||||
@ -120,27 +126,26 @@ impl ZipVersionBackend {
|
||||
compression_method: CompressionMethod,
|
||||
start: u64,
|
||||
end: u64,
|
||||
) -> ZipFileWrapper<'archive> {
|
||||
) -> anyhow::Result<ZipFileWrapper<'archive>> {
|
||||
let deflater: Box<dyn Read + Send + 'archive> = match compression_method {
|
||||
CompressionMethod::Store => Box::new(entry.reader()),
|
||||
CompressionMethod::Deflate => Box::new(DeflateDecoder::new(entry.reader())),
|
||||
CompressionMethod::Deflate64 => Box::new(DeflateDecoder::new(entry.reader())),
|
||||
_ => panic!(
|
||||
"unsupported decompression algorithm: {:?}",
|
||||
compression_method
|
||||
),
|
||||
_ => Err(anyhow!(
|
||||
"unsupported decompression algorithm: {compression_method:?}"
|
||||
))?,
|
||||
};
|
||||
|
||||
let mut verifier = entry.verifying_reader(deflater);
|
||||
if start != 0 {
|
||||
io::copy(&mut (&mut verifier).take(start), &mut Sink::default()).unwrap();
|
||||
io::copy(&mut (&mut verifier).take(start), &mut Sink::default())?;
|
||||
}
|
||||
|
||||
ZipFileWrapper {
|
||||
Ok(ZipFileWrapper {
|
||||
reader: verifier,
|
||||
limit: (end - start) as usize,
|
||||
current: 0,
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -159,10 +164,8 @@ impl<'a> Read for ZipFileWrapper<'a> {
|
||||
let has_limit = self.limit != 0;
|
||||
|
||||
// End this stream if the read is the right size
|
||||
if has_limit {
|
||||
if self.current >= self.limit {
|
||||
return Ok(0);
|
||||
}
|
||||
if has_limit && self.current >= self.limit {
|
||||
return Ok(0);
|
||||
}
|
||||
|
||||
let read = self.reader.read(buf)?;
|
||||
@ -173,7 +176,7 @@ impl<'a> Read for ZipFileWrapper<'a> {
|
||||
return Ok(read - over);
|
||||
}
|
||||
}
|
||||
return Ok(read);
|
||||
Ok(read)
|
||||
}
|
||||
}
|
||||
//impl<'a> MinimumFileObject for ZipFileWrapper<'a> {}
|
||||
@ -182,40 +185,40 @@ impl ZipVersionBackend {
|
||||
fn find_wayfinder(
|
||||
&mut self,
|
||||
filename: &str,
|
||||
) -> Option<(ZipArchiveEntryWayfinder, CompressionMethod)> {
|
||||
) -> anyhow::Result<(ZipArchiveEntryWayfinder, CompressionMethod)> {
|
||||
let read_buffer = &mut [0u8; RECOMMENDED_BUFFER_SIZE];
|
||||
let mut entries = self.archive.entries(read_buffer);
|
||||
let entry = loop {
|
||||
if let Some(v) = entries.next_entry().unwrap() {
|
||||
if v.file_path().try_normalize().unwrap().as_ref() == filename {
|
||||
break Some(v);
|
||||
if let Some(v) = entries.next_entry()? {
|
||||
if v.file_path().try_normalize()?.as_ref() == filename {
|
||||
break Ok(v);
|
||||
}
|
||||
} else {
|
||||
break None;
|
||||
break Err(anyhow!("failed to fetch zip file header."));
|
||||
}
|
||||
}?;
|
||||
|
||||
let wayfinder = entry.wayfinder();
|
||||
|
||||
Some((wayfinder, entry.compression_method()))
|
||||
Ok((wayfinder, entry.compression_method()))
|
||||
}
|
||||
}
|
||||
impl VersionBackend for ZipVersionBackend {
|
||||
fn list_files(&mut self) -> Vec<VersionFile> {
|
||||
fn list_files(&mut self) -> anyhow::Result<Vec<VersionFile>> {
|
||||
let mut results = Vec::new();
|
||||
let read_buffer = &mut [0u8; RECOMMENDED_BUFFER_SIZE];
|
||||
let mut budget_iterator = self.archive.entries(read_buffer);
|
||||
while let Some(entry) = budget_iterator.next_entry().unwrap() {
|
||||
while let Some(entry) = budget_iterator.next_entry()? {
|
||||
if entry.is_dir() {
|
||||
continue;
|
||||
}
|
||||
results.push(VersionFile {
|
||||
relative_filename: String::from(entry.file_path().try_normalize().unwrap()),
|
||||
relative_filename: String::from(entry.file_path().try_normalize()?),
|
||||
permission: entry.mode().permissions(),
|
||||
size: entry.uncompressed_size_hint(),
|
||||
});
|
||||
}
|
||||
results
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
fn reader(
|
||||
@ -223,19 +226,21 @@ impl VersionBackend for ZipVersionBackend {
|
||||
file: &VersionFile,
|
||||
start: u64,
|
||||
end: u64,
|
||||
) -> Option<Box<dyn MinimumFileObject + '_>> {
|
||||
) -> anyhow::Result<Box<dyn MinimumFileObject + '_>> {
|
||||
let (wayfinder, compression_method) = self.find_wayfinder(&file.relative_filename)?;
|
||||
let local_entry = self.archive.get_entry(wayfinder).unwrap();
|
||||
let local_entry = self
|
||||
.archive
|
||||
.get_entry(wayfinder)?;
|
||||
|
||||
let wrapper = self.new_entry(local_entry, compression_method, start, end);
|
||||
let wrapper = self.new_entry(local_entry, compression_method, start, end)?;
|
||||
|
||||
Some(Box::new(wrapper) as Box<dyn MinimumFileObject>)
|
||||
Ok(Box::new(wrapper) as Box<dyn MinimumFileObject>)
|
||||
}
|
||||
|
||||
fn peek_file(&mut self, sub_path: String) -> Option<VersionFile> {
|
||||
fn peek_file(&mut self, sub_path: String) -> anyhow::Result<VersionFile> {
|
||||
let (entry, _) = self.find_wayfinder(&sub_path)?;
|
||||
|
||||
Some(VersionFile {
|
||||
Ok(VersionFile {
|
||||
relative_filename: sub_path,
|
||||
permission: 0,
|
||||
size: entry.uncompressed_size_hint(),
|
||||
|
||||
@ -1,6 +1,4 @@
|
||||
use std::{
|
||||
fmt::Debug, io::Read
|
||||
};
|
||||
use std::{fmt::Debug, io::Read};
|
||||
|
||||
use dyn_clone::DynClone;
|
||||
use tokio::io::{self, AsyncRead};
|
||||
@ -12,7 +10,7 @@ pub struct VersionFile {
|
||||
pub size: u64,
|
||||
}
|
||||
|
||||
pub trait MinimumFileObject: Read + Send {}
|
||||
pub trait MinimumFileObject: Read + Send {}
|
||||
impl<T: Read + Send> MinimumFileObject for T {}
|
||||
|
||||
// Intentionally not a generic, because of types in read_file
|
||||
@ -30,16 +28,27 @@ impl<'a> AsyncRead for ReadToAsyncRead<'a> {
|
||||
) -> std::task::Poll<io::Result<()>> {
|
||||
let mut read_buf = [0u8; ASYNC_READ_BUFFER_SIZE];
|
||||
let read_size = ASYNC_READ_BUFFER_SIZE.min(buf.remaining());
|
||||
let read = self.inner.read(&mut read_buf[0..read_size]).unwrap();
|
||||
buf.put_slice(&read_buf[0..read]);
|
||||
std::task::Poll::Ready(Ok(()))
|
||||
match self.inner.read(&mut read_buf[0..read_size]) {
|
||||
Ok(read) => {
|
||||
buf.put_slice(&read_buf[0..read]);
|
||||
std::task::Poll::Ready(Ok(()))
|
||||
}
|
||||
Err(err) => {
|
||||
std::task::Poll::Ready(Err(err))
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait VersionBackend: DynClone {
|
||||
fn list_files(&mut self) -> Vec<VersionFile>;
|
||||
fn peek_file(&mut self, sub_path: String) -> Option<VersionFile>;
|
||||
fn reader(&mut self, file: &VersionFile, start: u64, end: u64) -> Option<Box<dyn MinimumFileObject + '_>>;
|
||||
fn list_files(&mut self) -> anyhow::Result<Vec<VersionFile>>;
|
||||
fn peek_file(&mut self, sub_path: String) -> anyhow::Result<VersionFile>;
|
||||
fn reader(
|
||||
&mut self,
|
||||
file: &VersionFile,
|
||||
start: u64,
|
||||
end: u64,
|
||||
) -> anyhow::Result<Box<dyn MinimumFileObject + '_>>;
|
||||
}
|
||||
|
||||
dyn_clone::clone_trait_object!(VersionBackend);
|
||||
dyn_clone::clone_trait_object!(VersionBackend);
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
use std::{
|
||||
collections::HashMap, fs::File, path::Path
|
||||
};
|
||||
use std::{collections::HashMap, fs::File, path::Path};
|
||||
|
||||
use anyhow::anyhow;
|
||||
use napi::{bindgen_prelude::*, sys::napi_value__, tokio_stream::StreamExt};
|
||||
use tokio_util::codec::{BytesCodec, FramedRead};
|
||||
|
||||
@ -15,7 +14,7 @@ use crate::version::{
|
||||
*/
|
||||
pub fn create_backend_constructor<'a>(
|
||||
path: &Path,
|
||||
) -> Option<Box<dyn FnOnce() -> Box<dyn VersionBackend + Send + 'a>>> {
|
||||
) -> Option<Box<dyn FnOnce() -> Result<Box<dyn VersionBackend + Send + 'a>>>> {
|
||||
if !path.exists() {
|
||||
return None;
|
||||
}
|
||||
@ -23,12 +22,14 @@ pub fn create_backend_constructor<'a>(
|
||||
let is_directory = path.is_dir();
|
||||
if is_directory {
|
||||
let base_dir = path.to_path_buf();
|
||||
return Some(Box::new(move || Box::new(PathVersionBackend { base_dir })));
|
||||
return Some(Box::new(move || {
|
||||
Ok(Box::new(PathVersionBackend { base_dir }))
|
||||
}));
|
||||
};
|
||||
|
||||
if path.to_string_lossy().ends_with(".zip") {
|
||||
let f = File::open(path.to_path_buf()).unwrap();
|
||||
return Some(Box::new(|| Box::new(ZipVersionBackend::new(f))));
|
||||
let f = File::open(path.to_path_buf()).ok()?;
|
||||
return Some(Box::new(|| Ok(Box::new(ZipVersionBackend::new(f)?))));
|
||||
}
|
||||
|
||||
None
|
||||
@ -58,10 +59,13 @@ impl<'a> DropletHandler<'a> {
|
||||
let fs_path = Path::new(&path);
|
||||
let constructor = create_backend_constructor(fs_path)?;
|
||||
|
||||
let existing_backend = self.backend_cache.entry(path).or_insert_with(|| {
|
||||
let backend = constructor();
|
||||
backend
|
||||
});
|
||||
let existing_backend = match self.backend_cache.entry(path) {
|
||||
std::collections::hash_map::Entry::Occupied(occupied_entry) => occupied_entry.into_mut(),
|
||||
std::collections::hash_map::Entry::Vacant(vacant_entry) => {
|
||||
let backend = constructor().ok()?;
|
||||
vacant_entry.insert(backend)
|
||||
}
|
||||
};
|
||||
|
||||
Some(existing_backend)
|
||||
}
|
||||
@ -80,7 +84,7 @@ impl<'a> DropletHandler<'a> {
|
||||
let backend = self
|
||||
.create_backend_for_path(path)
|
||||
.ok_or(napi::Error::from_reason("No backend for path"))?;
|
||||
let files = backend.list_files();
|
||||
let files = backend.list_files()?;
|
||||
Ok(files.into_iter().map(|e| e.relative_filename).collect())
|
||||
}
|
||||
|
||||
@ -90,11 +94,9 @@ impl<'a> DropletHandler<'a> {
|
||||
.create_backend_for_path(path)
|
||||
.ok_or(napi::Error::from_reason("No backend for path"))?;
|
||||
|
||||
let file = backend
|
||||
.peek_file(sub_path)
|
||||
.ok_or(napi::Error::from_reason("Can't find file to peek"))?;
|
||||
let file = backend.peek_file(sub_path)?;
|
||||
|
||||
return Ok(file.size.try_into().unwrap());
|
||||
Ok(file.size)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
@ -106,28 +108,24 @@ impl<'a> DropletHandler<'a> {
|
||||
env: Env,
|
||||
start: Option<BigInt>,
|
||||
end: Option<BigInt>,
|
||||
) -> Result<JsDropStreamable> {
|
||||
) -> anyhow::Result<JsDropStreamable> {
|
||||
let stream = reference.share_with(env, |handler| {
|
||||
let backend = handler
|
||||
.create_backend_for_path(path)
|
||||
.ok_or(napi::Error::from_reason("Failed to create backend."))?;
|
||||
.ok_or(anyhow!("Failed to create backend."))?;
|
||||
let version_file = VersionFile {
|
||||
relative_filename: sub_path,
|
||||
permission: 0, // Shouldn't matter
|
||||
size: 0, // Shouldn't matter
|
||||
};
|
||||
// Use `?` operator for cleaner error propagation from `Option`
|
||||
let reader = backend
|
||||
.reader(
|
||||
&version_file,
|
||||
start.map(|e| e.get_u64().1).unwrap_or(0),
|
||||
end.map(|e| e.get_u64().1).unwrap_or(0),
|
||||
)
|
||||
.ok_or(napi::Error::from_reason("Failed to create reader."))?;
|
||||
let reader = backend.reader(
|
||||
&version_file,
|
||||
start.map(|e| e.get_u64().1).unwrap_or(0),
|
||||
end.map(|e| e.get_u64().1).unwrap_or(0),
|
||||
)?;
|
||||
|
||||
let async_reader = ReadToAsyncRead {
|
||||
inner: reader,
|
||||
};
|
||||
let async_reader = ReadToAsyncRead { inner: reader };
|
||||
|
||||
// Create a FramedRead stream with BytesCodec for chunking
|
||||
let stream = FramedRead::new(async_reader, BytesCodec::new())
|
||||
@ -137,12 +135,12 @@ impl<'a> DropletHandler<'a> {
|
||||
// Apply Result::map to transform Ok(BytesMut) to Ok(Vec<u8>)
|
||||
.map(|bytes| bytes.to_vec())
|
||||
// Apply Result::map_err to transform Err(std::io::Error) to Err(napi::Error)
|
||||
.map_err(|e| napi::Error::from(e)) // napi::Error implements From<tokio::io::Error>
|
||||
.map_err(napi::Error::from) // napi::Error implements From<tokio::io::Error>
|
||||
});
|
||||
// Create the napi-rs ReadableStream from the tokio_stream::Stream
|
||||
// The unwrap() here means if stream creation fails, it will panic.
|
||||
// For a production system, consider returning Result<Option<...>> and handling this.
|
||||
Ok(ReadableStream::create_with_stream_bytes(&env, stream).unwrap())
|
||||
ReadableStream::create_with_stream_bytes(&env, stream)
|
||||
})?;
|
||||
|
||||
Ok(JsDropStreamable { inner: stream })
|
||||
|
||||
Reference in New Issue
Block a user