Remove deprecated add member (#5611)

* chore: remove deprecated add member directly

* chore: cargo clippy

* chore: cargo clippy

* chore: cargo clippy
This commit is contained in:
Zack
2024-06-24 14:19:36 +08:00
committed by GitHub
parent 7586a0ed48
commit d2ca41c8f5
14 changed files with 65 additions and 127 deletions

View File

@ -314,12 +314,12 @@ impl StorageService for StorageServiceImpl {
// When resuming an upload, check if the upload_id is empty.
// If the upload_id is empty, the upload has likely not been created yet.
// If the upload_id is not empty, verify which parts have already been uploaded.
select_upload_file(conn, &workspace_id, &parent_dir, &file_id)?.and_then(|record| {
select_upload_file(conn, workspace_id, parent_dir, file_id)?.map(|record| {
if record.upload_id.is_empty() {
Some((record, vec![]))
(record, vec![])
} else {
let parts = select_upload_parts(conn, &record.upload_id).unwrap_or_default();
Some((record, parts))
(record, parts)
}
}),
)
@ -358,7 +358,7 @@ async fn create_upload_record(
let content_type = mime_guess::from_path(&local_file_path)
.first_or_octet_stream()
.to_string();
let file_id = format!("{}.{}", fxhash::hash(&chunked_bytes.data).to_string(), ext);
let file_id = format!("{}.{}", fxhash::hash(&chunked_bytes.data), ext);
let record = UploadFileTable {
workspace_id,
file_id,
@ -430,10 +430,10 @@ async fn start_upload(
upload_file.file_id,
chunked_bytes.iter().count()
);
let mut iter = chunked_bytes.iter().enumerate();
let iter = chunked_bytes.iter().enumerate();
let mut completed_parts = Vec::new();
while let Some((index, chunk_bytes)) = iter.next() {
for (index, chunk_bytes) in iter {
let part_number = index as i32 + 1;
trace!(
"[File] {} uploading part: {}, len:{}KB",
@ -443,8 +443,8 @@ async fn start_upload(
);
// start uploading parts
match upload_part(
&cloud_service,
&user_service,
cloud_service,
user_service,
&upload_file.workspace_id,
&upload_file.parent_dir,
&upload_file.upload_id,
@ -476,8 +476,8 @@ async fn start_upload(
// mark it as completed
complete_upload(
&cloud_service,
&user_service,
cloud_service,
user_service,
temp_storage,
&upload_file,
completed_parts,
@ -540,6 +540,7 @@ async fn resume_upload(
Ok(())
}
#[allow(clippy::too_many_arguments)]
#[instrument(level = "debug", skip_all)]
async fn upload_part(
cloud_service: &Arc<dyn StorageCloudService>,
@ -553,10 +554,10 @@ async fn upload_part(
) -> Result<UploadPartResponse, FlowyError> {
let resp = cloud_service
.upload_part(
&workspace_id,
&parent_dir,
&upload_id,
&file_id,
workspace_id,
parent_dir,
upload_id,
file_id,
part_number,
body,
)

View File

@ -13,7 +13,7 @@ use std::path::PathBuf;
use std::time::Duration;
pub fn test_database() -> (Database, PathBuf) {
let db_path = temp_dir().join(&format!("test-{}.db", generate_random_string(8)));
let db_path = temp_dir().join(format!("test-{}.db", generate_random_string(8)));
(flowy_sqlite::init(&db_path).unwrap(), db_path)
}
@ -102,7 +102,7 @@ async fn test_upload_part_test() {
// get all existing parts
let mut conn = db.get_connection().unwrap();
let parts = select_upload_parts(&mut *conn, &upload_id).unwrap();
let parts = select_upload_parts(&mut conn, &upload_id).unwrap();
assert_eq!(parts.len(), 2);
assert_eq!(parts[0].part_num, 1);
assert_eq!(parts[1].part_num, 2);
@ -112,7 +112,7 @@ async fn test_upload_part_test() {
delete_upload_file(conn, &upload_id).unwrap();
let mut conn = db.get_connection().unwrap();
let parts = select_upload_parts(&mut *conn, &upload_id).unwrap();
let parts = select_upload_parts(&mut conn, &upload_id).unwrap();
assert!(parts.is_empty())
}
@ -165,7 +165,7 @@ pub async fn create_upload_file_record(
let file_id = fxhash::hash(&chunked_bytes.data).to_string();
// Create UploadFileTable record
let upload_file = UploadFileTable {
UploadFileTable {
workspace_id,
file_id,
upload_id,
@ -175,7 +175,5 @@ pub async fn create_upload_file_record(
chunk_size: MIN_CHUNK_SIZE as i32,
num_chunk: chunked_bytes.offsets.len() as i32,
created_at: chrono::Utc::now().timestamp(),
};
upload_file
}
}