refactor: File upload (#5542)

* chore: rename service

* refactor: upload

* chore: save upload meta data

* chore: add sql test

* chore: uploader

* chore: fix upload

* chore: cache file and remove after finish

* chore: retry upload

* chore: pause when netowork unreachable

* chore: add event test

* chore: add test

* chore: clippy

* chore: update client-api commit id

* chore: fix flutter test
This commit is contained in:
Nathan.fooo 2024-06-20 07:44:57 +08:00 committed by GitHub
parent fdaca36b87
commit b64da2c02f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
61 changed files with 2687 additions and 643 deletions

View File

@ -114,14 +114,14 @@ class DocumentService {
/// Upload a file to the cloud storage.
Future<FlowyResult<UploadedFilePB, FlowyError>> uploadFile({
required String localFilePath,
bool isAsync = true,
required String documentId,
}) async {
final workspace = await FolderEventReadCurrentWorkspace().send();
return workspace.fold((l) async {
final payload = UploadFileParamsPB(
workspaceId: l.id,
localFilePath: localFilePath,
isAsync: isAsync,
documentId: documentId,
);
final result = await DocumentEventUploadFile(payload).send();
return result;

View File

@ -53,13 +53,6 @@ extension PasteFromImage on EditorState {
await File(copyToPath).writeAsBytes(imageBytes);
final String? path;
if (context.mounted) {
showSnackBarMessage(
context,
LocaleKeys.document_imageBlock_imageIsUploading.tr(),
);
}
if (isLocalMode) {
path = await saveImageToLocalStorage(copyToPath);
} else {

View File

@ -49,9 +49,11 @@ Future<(String? path, String? errorMessage)> saveImageToCloudStorage(
);
}
final documentService = DocumentService();
Log.debug("Uploading image local path: $localImagePath");
final result = await documentService.uploadFile(
localFilePath: localImagePath,
isAsync: false,
// TODO(lucas): replace with actual documentId
documentId: "temp",
);
return result.fold(
(s) async {

View File

@ -172,7 +172,7 @@ checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da"
[[package]]
name = "app-error"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"bincode",
@ -192,7 +192,7 @@ dependencies = [
[[package]]
name = "appflowy-ai-client"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"bytes",
@ -772,7 +772,7 @@ dependencies = [
[[package]]
name = "client-api"
version = "0.2.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"again",
"anyhow",
@ -782,17 +782,16 @@ dependencies = [
"brotli",
"bytes",
"chrono",
"client-api-entity",
"client-websocket",
"collab",
"collab-entity",
"collab-rt-entity",
"collab-rt-protocol",
"database-entity",
"futures-core",
"futures-util",
"getrandom 0.2.10",
"gotrue",
"gotrue-entity",
"infra",
"mime",
"parking_lot 0.12.1",
"prost",
@ -816,10 +815,22 @@ dependencies = [
"yrs",
]
[[package]]
name = "client-api-entity"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"collab-entity",
"collab-rt-entity",
"database-entity",
"gotrue-entity",
"shared-entity",
]
[[package]]
name = "client-websocket"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"futures-channel",
"futures-util",
@ -1059,7 +1070,7 @@ dependencies = [
[[package]]
name = "collab-rt-entity"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"bincode",
@ -1084,7 +1095,7 @@ dependencies = [
[[package]]
name = "collab-rt-protocol"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"async-trait",
@ -1441,7 +1452,7 @@ checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308"
[[package]]
name = "database-entity"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"app-error",
@ -1933,6 +1944,7 @@ dependencies = [
"flowy-server-pub",
"flowy-sqlite",
"flowy-storage",
"flowy-storage-pub",
"flowy-user",
"flowy-user-pub",
"futures",
@ -2058,7 +2070,7 @@ dependencies = [
"flowy-document-pub",
"flowy-error",
"flowy-notification",
"flowy-storage",
"flowy-storage-pub",
"futures",
"getrandom 0.2.10",
"indexmap 2.1.0",
@ -2266,6 +2278,7 @@ dependencies = [
"flowy-search-pub",
"flowy-server-pub",
"flowy-storage",
"flowy-storage-pub",
"flowy-user-pub",
"futures",
"futures-util",
@ -2324,14 +2337,16 @@ dependencies = [
name = "flowy-storage"
version = "0.1.0"
dependencies = [
"anyhow",
"async-trait",
"bytes",
"chrono",
"flowy-error",
"flowy-sqlite",
"flowy-storage-pub",
"fxhash",
"lib-infra",
"mime",
"mime_guess",
"reqwest",
"serde",
"serde_json",
"tokio",
@ -2339,6 +2354,23 @@ dependencies = [
"url",
]
[[package]]
name = "flowy-storage-pub"
version = "0.1.0"
dependencies = [
"anyhow",
"async-trait",
"bytes",
"client-api-entity",
"flowy-error",
"lib-infra",
"mime",
"mime_guess",
"serde",
"serde_json",
"tokio",
]
[[package]]
name = "flowy-user"
version = "0.1.0"
@ -2862,7 +2894,7 @@ dependencies = [
[[package]]
name = "gotrue"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"futures-util",
@ -2879,7 +2911,7 @@ dependencies = [
[[package]]
name = "gotrue-entity"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"app-error",
@ -3311,12 +3343,14 @@ dependencies = [
[[package]]
name = "infra"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"bytes",
"reqwest",
"serde",
"serde_json",
"tokio",
"tracing",
]
@ -5801,7 +5835,7 @@ dependencies = [
[[package]]
name = "shared-entity"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"app-error",

View File

@ -52,7 +52,7 @@ collab-user = { version = "0.2" }
# Run the script:
# scripts/tool/update_client_api_rev.sh new_rev_id
# ⚠️⚠️⚠️️
client-api = { git = "https://github.com/AppFlowy-IO/AppFlowy-Cloud", rev = "430e3e15c9a1dc6aba2a9599d17d946a61ac7cae" }
client-api = { git = "https://github.com/AppFlowy-IO/AppFlowy-Cloud", rev = "6262816043efeede8823d7a7ea252083adf407e9" }
[dependencies]
serde_json.workspace = true

View File

@ -122,7 +122,6 @@ dependencies = [
"flowy-notification",
"flowy-server",
"flowy-server-pub",
"flowy-storage",
"flowy-user-pub",
"js-sys",
"lazy_static",
@ -216,7 +215,7 @@ checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da"
[[package]]
name = "app-error"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"bincode",
@ -236,7 +235,7 @@ dependencies = [
[[package]]
name = "appflowy-ai-client"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"bytes",
@ -562,7 +561,7 @@ dependencies = [
[[package]]
name = "client-api"
version = "0.2.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"again",
"anyhow",
@ -572,17 +571,16 @@ dependencies = [
"brotli",
"bytes",
"chrono",
"client-api-entity",
"client-websocket",
"collab",
"collab-entity",
"collab-rt-entity",
"collab-rt-protocol",
"database-entity",
"futures-core",
"futures-util",
"getrandom 0.2.12",
"gotrue",
"gotrue-entity",
"infra",
"mime",
"parking_lot 0.12.1",
"prost",
@ -606,10 +604,22 @@ dependencies = [
"yrs",
]
[[package]]
name = "client-api-entity"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"collab-entity",
"collab-rt-entity",
"database-entity",
"gotrue-entity",
"shared-entity",
]
[[package]]
name = "client-websocket"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"futures-channel",
"futures-util",
@ -787,7 +797,7 @@ dependencies = [
[[package]]
name = "collab-rt-entity"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"bincode",
@ -812,7 +822,7 @@ dependencies = [
[[package]]
name = "collab-rt-protocol"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"async-trait",
@ -981,7 +991,7 @@ dependencies = [
"cssparser-macros",
"dtoa-short",
"itoa",
"phf 0.8.0",
"phf 0.11.2",
"smallvec",
]
@ -1026,7 +1036,7 @@ checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5"
[[package]]
name = "database-entity"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"app-error",
@ -1380,7 +1390,7 @@ dependencies = [
"flowy-document-pub",
"flowy-error",
"flowy-notification",
"flowy-storage",
"flowy-storage-pub",
"futures",
"getrandom 0.2.12",
"indexmap",
@ -1438,8 +1448,10 @@ dependencies = [
"fancy-regex 0.11.0",
"flowy-codegen",
"flowy-derive",
"flowy-sqlite",
"lib-dispatch",
"protobuf",
"r2d2",
"reqwest",
"serde",
"serde_json",
@ -1550,6 +1562,7 @@ dependencies = [
"flowy-search-pub",
"flowy-server-pub",
"flowy-storage",
"flowy-storage-pub",
"flowy-user-pub",
"futures",
"futures-util",
@ -1608,14 +1621,16 @@ dependencies = [
name = "flowy-storage"
version = "0.1.0"
dependencies = [
"anyhow",
"async-trait",
"bytes",
"chrono",
"flowy-error",
"flowy-sqlite",
"flowy-storage-pub",
"fxhash",
"lib-infra",
"mime",
"mime_guess",
"reqwest",
"serde",
"serde_json",
"tokio",
@ -1623,6 +1638,23 @@ dependencies = [
"url",
]
[[package]]
name = "flowy-storage-pub"
version = "0.1.0"
dependencies = [
"anyhow",
"async-trait",
"bytes",
"client-api-entity",
"flowy-error",
"lib-infra",
"mime",
"mime_guess",
"serde",
"serde_json",
"tokio",
]
[[package]]
name = "flowy-user-pub"
version = "0.1.0"
@ -1887,7 +1919,7 @@ dependencies = [
[[package]]
name = "gotrue"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"futures-util",
@ -1904,7 +1936,7 @@ dependencies = [
[[package]]
name = "gotrue-entity"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"app-error",
@ -2205,12 +2237,14 @@ dependencies = [
[[package]]
name = "infra"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"bytes",
"reqwest",
"serde",
"serde_json",
"tokio",
"tracing",
]
@ -2932,7 +2966,7 @@ version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3dfb61232e34fcb633f43d12c58f83c1df82962dcdfa565a4e866ffc17dafe12"
dependencies = [
"phf_macros",
"phf_macros 0.8.0",
"phf_shared 0.8.0",
"proc-macro-hack",
]
@ -2952,6 +2986,7 @@ version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc"
dependencies = [
"phf_macros 0.11.2",
"phf_shared 0.11.2",
]
@ -3019,6 +3054,19 @@ dependencies = [
"syn 1.0.109",
]
[[package]]
name = "phf_macros"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3444646e286606587e49f3bcf1679b8cef1dc2c5ecc29ddacaffc305180d464b"
dependencies = [
"phf_generator 0.11.2",
"phf_shared 0.11.2",
"proc-macro2",
"quote",
"syn 2.0.48",
]
[[package]]
name = "phf_shared"
version = "0.8.0"
@ -3903,7 +3951,7 @@ dependencies = [
[[package]]
name = "shared-entity"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"app-error",

View File

@ -20,7 +20,6 @@ flowy-derive = { path = "../../rust-lib/build-tool/flowy-derive" }
flowy-codegen = { path = "../../rust-lib/build-tool/flowy-codegen" }
flowy-document = { path = "../../rust-lib/flowy-document" }
flowy-folder = { path = "../../rust-lib/flowy-folder" }
flowy-storage = { path = "../../rust-lib/flowy-storage" }
lib-infra = { path = "../../rust-lib/lib-infra" }
bytes = { version = "1.5" }
protobuf = { version = "2.28.0" }
@ -55,7 +54,7 @@ yrs = "0.18.8"
# Run the script:
# scripts/tool/update_client_api_rev.sh new_rev_id
# ⚠️⚠️⚠️️
client-api = { git = "https://github.com/AppFlowy-IO/AppFlowy-Cloud", rev = "430e3e15c9a1dc6aba2a9599d17d946a61ac7cae" }
client-api = { git = "https://github.com/AppFlowy-IO/AppFlowy-Cloud", rev = "6262816043efeede8823d7a7ea252083adf407e9" }
[profile.dev]
opt-level = 0

View File

@ -21,7 +21,6 @@ tokio-stream.workspace = true
af-user.workspace = true
af-persistence.workspace = true
flowy-storage = { workspace = true }
flowy-notification = { workspace = true, features = ["web_ts"] }
flowy-user-pub = { workspace = true }
flowy-server = { workspace = true }

View File

@ -156,14 +156,14 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.81"
version = "1.0.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247"
checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da"
[[package]]
name = "app-error"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"bincode",
@ -183,7 +183,7 @@ dependencies = [
[[package]]
name = "appflowy-ai-client"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"bytes",
@ -746,7 +746,7 @@ dependencies = [
[[package]]
name = "client-api"
version = "0.2.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"again",
"anyhow",
@ -756,17 +756,16 @@ dependencies = [
"brotli",
"bytes",
"chrono",
"client-api-entity",
"client-websocket",
"collab",
"collab-entity",
"collab-rt-entity",
"collab-rt-protocol",
"database-entity",
"futures-core",
"futures-util",
"getrandom 0.2.12",
"gotrue",
"gotrue-entity",
"infra",
"mime",
"parking_lot 0.12.1",
"prost",
@ -790,10 +789,22 @@ dependencies = [
"yrs",
]
[[package]]
name = "client-api-entity"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"collab-entity",
"collab-rt-entity",
"database-entity",
"gotrue-entity",
"shared-entity",
]
[[package]]
name = "client-websocket"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"futures-channel",
"futures-util",
@ -1042,7 +1053,7 @@ dependencies = [
[[package]]
name = "collab-rt-entity"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"bincode",
@ -1067,7 +1078,7 @@ dependencies = [
[[package]]
name = "collab-rt-protocol"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"async-trait",
@ -1317,7 +1328,7 @@ dependencies = [
"cssparser-macros",
"dtoa-short",
"itoa 1.0.10",
"phf 0.11.2",
"phf 0.8.0",
"smallvec",
]
@ -1428,7 +1439,7 @@ checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5"
[[package]]
name = "database-entity"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"app-error",
@ -1970,6 +1981,7 @@ dependencies = [
"flowy-server-pub",
"flowy-sqlite",
"flowy-storage",
"flowy-storage-pub",
"flowy-user",
"flowy-user-pub",
"futures",
@ -2095,7 +2107,7 @@ dependencies = [
"flowy-document-pub",
"flowy-error",
"flowy-notification",
"flowy-storage",
"flowy-storage-pub",
"futures",
"getrandom 0.2.12",
"indexmap 2.2.6",
@ -2303,6 +2315,7 @@ dependencies = [
"flowy-search-pub",
"flowy-server-pub",
"flowy-storage",
"flowy-storage-pub",
"flowy-user-pub",
"futures",
"futures-util",
@ -2361,14 +2374,16 @@ dependencies = [
name = "flowy-storage"
version = "0.1.0"
dependencies = [
"anyhow",
"async-trait",
"bytes",
"chrono",
"flowy-error",
"flowy-sqlite",
"flowy-storage-pub",
"fxhash",
"lib-infra",
"mime",
"mime_guess",
"reqwest",
"serde",
"serde_json",
"tokio",
@ -2376,6 +2391,23 @@ dependencies = [
"url",
]
[[package]]
name = "flowy-storage-pub"
version = "0.1.0"
dependencies = [
"anyhow",
"async-trait",
"bytes",
"client-api-entity",
"flowy-error",
"lib-infra",
"mime",
"mime_guess",
"serde",
"serde_json",
"tokio",
]
[[package]]
name = "flowy-user"
version = "0.1.0"
@ -2936,7 +2968,7 @@ dependencies = [
[[package]]
name = "gotrue"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"futures-util",
@ -2953,7 +2985,7 @@ dependencies = [
[[package]]
name = "gotrue-entity"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"app-error",
@ -3390,12 +3422,14 @@ dependencies = [
[[package]]
name = "infra"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"bytes",
"reqwest",
"serde",
"serde_json",
"tokio",
"tracing",
]
@ -4897,7 +4931,7 @@ checksum = "c55e02e35260070b6f716a2423c2ff1c3bb1642ddca6f99e1f26d06268a0e2d2"
dependencies = [
"bytes",
"heck 0.4.1",
"itertools 0.11.0",
"itertools 0.10.5",
"log",
"multimap",
"once_cell",
@ -4918,7 +4952,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "efb6c9a1dd1def8e2124d17e83a20af56f1570d6c2d2bd9e266ccb768df3840e"
dependencies = [
"anyhow",
"itertools 0.11.0",
"itertools 0.10.5",
"proc-macro2",
"quote",
"syn 2.0.55",
@ -5896,7 +5930,7 @@ dependencies = [
[[package]]
name = "shared-entity"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"app-error",

View File

@ -52,7 +52,7 @@ collab-user = { version = "0.2" }
# Run the script:
# scripts/tool/update_client_api_rev.sh new_rev_id
# ⚠️⚠️⚠️️
client-api = { git = "https://github.com/AppFlowy-IO/AppFlowy-Cloud", rev = "430e3e15c9a1dc6aba2a9599d17d946a61ac7cae" }
client-api = { git = "https://github.com/AppFlowy-IO/AppFlowy-Cloud", rev = "6262816043efeede8823d7a7ea252083adf407e9" }
[dependencies]
serde_json.workspace = true

View File

@ -163,7 +163,7 @@ checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da"
[[package]]
name = "app-error"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"bincode",
@ -183,7 +183,7 @@ dependencies = [
[[package]]
name = "appflowy-ai-client"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"bytes",
@ -664,7 +664,7 @@ dependencies = [
[[package]]
name = "client-api"
version = "0.2.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"again",
"anyhow",
@ -674,17 +674,16 @@ dependencies = [
"brotli",
"bytes",
"chrono",
"client-api-entity",
"client-websocket",
"collab",
"collab-entity",
"collab-rt-entity",
"collab-rt-protocol",
"database-entity",
"futures-core",
"futures-util",
"getrandom 0.2.10",
"gotrue",
"gotrue-entity",
"infra",
"mime",
"parking_lot 0.12.1",
"prost",
@ -708,10 +707,22 @@ dependencies = [
"yrs",
]
[[package]]
name = "client-api-entity"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"collab-entity",
"collab-rt-entity",
"database-entity",
"gotrue-entity",
"shared-entity",
]
[[package]]
name = "client-websocket"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"futures-channel",
"futures-util",
@ -920,7 +931,7 @@ dependencies = [
[[package]]
name = "collab-rt-entity"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"bincode",
@ -945,7 +956,7 @@ dependencies = [
[[package]]
name = "collab-rt-protocol"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"async-trait",
@ -1165,7 +1176,7 @@ dependencies = [
"cssparser-macros",
"dtoa-short",
"itoa",
"phf 0.11.2",
"phf 0.8.0",
"smallvec",
]
@ -1265,7 +1276,7 @@ checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308"
[[package]]
name = "database-entity"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"app-error",
@ -1519,6 +1530,7 @@ dependencies = [
"flowy-server",
"flowy-server-pub",
"flowy-storage",
"flowy-storage-pub",
"flowy-user",
"flowy-user-pub",
"futures",
@ -1753,6 +1765,7 @@ dependencies = [
"flowy-server-pub",
"flowy-sqlite",
"flowy-storage",
"flowy-storage-pub",
"flowy-user",
"flowy-user-pub",
"futures",
@ -1879,7 +1892,7 @@ dependencies = [
"flowy-document-pub",
"flowy-error",
"flowy-notification",
"flowy-storage",
"flowy-storage-pub",
"futures",
"getrandom 0.2.10",
"indexmap 2.1.0",
@ -2092,6 +2105,7 @@ dependencies = [
"flowy-search-pub",
"flowy-server-pub",
"flowy-storage",
"flowy-storage-pub",
"flowy-user-pub",
"futures",
"futures-util",
@ -2154,19 +2168,40 @@ dependencies = [
name = "flowy-storage"
version = "0.1.0"
dependencies = [
"anyhow",
"async-trait",
"bytes",
"chrono",
"flowy-error",
"flowy-sqlite",
"flowy-storage-pub",
"fxhash",
"lib-infra",
"mime",
"mime_guess",
"reqwest",
"rand 0.8.5",
"serde",
"serde_json",
"tokio",
"tracing",
"url",
"uuid",
]
[[package]]
name = "flowy-storage-pub"
version = "0.1.0"
dependencies = [
"anyhow",
"async-trait",
"bytes",
"client-api-entity",
"flowy-error",
"lib-infra",
"mime",
"mime_guess",
"serde",
"serde_json",
"tokio",
]
[[package]]
@ -2532,7 +2567,7 @@ dependencies = [
[[package]]
name = "gotrue"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"futures-util",
@ -2549,7 +2584,7 @@ dependencies = [
[[package]]
name = "gotrue-entity"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"app-error",
@ -2914,12 +2949,14 @@ dependencies = [
[[package]]
name = "infra"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"bytes",
"reqwest",
"serde",
"serde_json",
"tokio",
"tracing",
]
@ -3790,7 +3827,7 @@ version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3dfb61232e34fcb633f43d12c58f83c1df82962dcdfa565a4e866ffc17dafe12"
dependencies = [
"phf_macros 0.8.0",
"phf_macros",
"phf_shared 0.8.0",
"proc-macro-hack",
]
@ -3810,7 +3847,6 @@ version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc"
dependencies = [
"phf_macros 0.11.2",
"phf_shared 0.11.2",
]
@ -3878,19 +3914,6 @@ dependencies = [
"syn 1.0.109",
]
[[package]]
name = "phf_macros"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3444646e286606587e49f3bcf1679b8cef1dc2c5ecc29ddacaffc305180d464b"
dependencies = [
"phf_generator 0.11.2",
"phf_shared 0.11.2",
"proc-macro2",
"quote",
"syn 2.0.47",
]
[[package]]
name = "phf_shared"
version = "0.8.0"
@ -4094,7 +4117,7 @@ checksum = "c55e02e35260070b6f716a2423c2ff1c3bb1642ddca6f99e1f26d06268a0e2d2"
dependencies = [
"bytes",
"heck 0.4.1",
"itertools 0.11.0",
"itertools 0.10.5",
"log",
"multimap",
"once_cell",
@ -4115,7 +4138,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "efb6c9a1dd1def8e2124d17e83a20af56f1570d6c2d2bd9e266ccb768df3840e"
dependencies = [
"anyhow",
"itertools 0.11.0",
"itertools 0.10.5",
"proc-macro2",
"quote",
"syn 2.0.47",
@ -5012,7 +5035,7 @@ dependencies = [
[[package]]
name = "shared-entity"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=430e3e15c9a1dc6aba2a9599d17d946a61ac7cae#430e3e15c9a1dc6aba2a9599d17d946a61ac7cae"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Cloud?rev=6262816043efeede8823d7a7ea252083adf407e9#6262816043efeede8823d7a7ea252083adf407e9"
dependencies = [
"anyhow",
"app-error",

View File

@ -31,6 +31,7 @@ members = [
"flowy-search-pub",
"flowy-chat",
"flowy-chat-pub",
"flowy-storage-pub",
]
resolver = "2"
@ -59,6 +60,7 @@ flowy-server-pub = { workspace = true, path = "flowy-server-pub" }
flowy-config = { workspace = true, path = "flowy-config" }
flowy-encrypt = { workspace = true, path = "flowy-encrypt" }
flowy-storage = { workspace = true, path = "flowy-storage" }
flowy-storage-pub = { workspace = true, path = "flowy-storage-pub" }
flowy-search = { workspace = true, path = "flowy-search" }
flowy-search-pub = { workspace = true, path = "flowy-search-pub" }
collab-integrate = { workspace = true, path = "collab-integrate" }
@ -95,7 +97,8 @@ validator = { version = "0.16.1", features = ["derive"] }
# Run the script.add_workspace_members:
# scripts/tool/update_client_api_rev.sh new_rev_id
# ⚠️⚠️⚠️️
client-api = { git = "https://github.com/AppFlowy-IO/AppFlowy-Cloud", rev = "430e3e15c9a1dc6aba2a9599d17d946a61ac7cae" }
client-api = { git = "https://github.com/AppFlowy-IO/AppFlowy-Cloud", rev = "6262816043efeede8823d7a7ea252083adf407e9" }
client-api-entity = { git = "https://github.com/AppFlowy-IO/AppFlowy-Cloud", rev = "6262816043efeede8823d7a7ea252083adf407e9" }
[profile.dev]
opt-level = 1

View File

@ -24,6 +24,7 @@ flowy-server-pub = { workspace = true }
flowy-notification = { workspace = true }
anyhow.workspace = true
flowy-storage = { workspace = true }
flowy-storage-pub = { workspace = true }
flowy-search = { workspace = true }
semver = "1.0.23"

View File

@ -9,7 +9,7 @@ use std::sync::Arc;
use std::time::Duration;
use nanoid::nanoid;
use parking_lot::RwLock;
use parking_lot::{Mutex, RwLock};
use semver::Version;
use tokio::select;
use tokio::time::sleep;
@ -37,7 +37,7 @@ pub struct EventIntegrationTest {
pub authenticator: Arc<RwLock<AuthenticatorPB>>,
pub appflowy_core: AppFlowyCore,
#[allow(dead_code)]
cleaner: Arc<Cleaner>,
cleaner: Arc<Mutex<Cleaner>>,
pub notification_sender: TestNotificationSender,
}
@ -52,10 +52,26 @@ impl EventIntegrationTest {
Self::new_with_user_data_path(temp_dir, name.to_string()).await
}
pub async fn new_with_config(config: AppFlowyCoreConfig) -> Self {
let clean_path = config.storage_path.clone();
let inner = init_core(config).await;
let notification_sender = TestNotificationSender::new();
let authenticator = Arc::new(RwLock::new(AuthenticatorPB::Local));
register_notification_sender(notification_sender.clone());
// In case of dropping the runtime that runs the core, we need to forget the dispatcher
std::mem::forget(inner.dispatcher());
Self {
appflowy_core: inner,
authenticator,
notification_sender,
cleaner: Arc::new(Mutex::new(Cleaner::new(PathBuf::from(clean_path)))),
}
}
pub async fn new_with_user_data_path(path_buf: PathBuf, name: String) -> Self {
let path = path_buf.to_str().unwrap().to_string();
let device_id = uuid::Uuid::new_v4().to_string();
let config = AppFlowyCoreConfig::new(
Version::new(0, 5, 8),
path.clone(),
@ -72,20 +88,11 @@ impl EventIntegrationTest {
// "lib_dispatch".to_string(),
],
);
Self::new_with_config(config).await
}
let inner = init_core(config).await;
let notification_sender = TestNotificationSender::new();
let authenticator = Arc::new(RwLock::new(AuthenticatorPB::Local));
register_notification_sender(notification_sender.clone());
// In case of dropping the runtime that runs the core, we need to forget the dispatcher
std::mem::forget(inner.dispatcher());
Self {
appflowy_core: inner,
authenticator,
notification_sender,
cleaner: Arc::new(Cleaner(path_buf)),
}
pub fn set_no_cleanup(&mut self) {
self.cleaner.lock().should_clean = false;
}
pub fn instance_name(&self) -> String {
@ -173,11 +180,18 @@ impl std::ops::Deref for EventIntegrationTest {
}
}
pub struct Cleaner(PathBuf);
#[derive(Clone)]
pub struct Cleaner {
dir: PathBuf,
should_clean: bool,
}
impl Cleaner {
pub fn new(dir: PathBuf) -> Self {
Cleaner(dir)
Self {
dir,
should_clean: true,
}
}
fn cleanup(dir: &PathBuf) {
@ -187,6 +201,8 @@ impl Cleaner {
impl Drop for Cleaner {
fn drop(&mut self) {
Self::cleanup(&self.0)
if self.should_clean {
Self::cleanup(&self.dir)
}
}
}

View File

@ -0,0 +1,138 @@
use crate::document::generate_random_bytes;
use event_integration_test::user_event::user_localhost_af_cloud;
use event_integration_test::EventIntegrationTest;
use flowy_storage_pub::storage::UploadStatus;
use std::env::temp_dir;
use std::time::Duration;
use tokio::fs;
use tokio::fs::File;
use tokio::io::AsyncWriteExt;
#[tokio::test]
async fn af_cloud_upload_file_test() {
user_localhost_af_cloud().await;
let test = EventIntegrationTest::new().await;
test.af_cloud_sign_up().await;
let workspace_id = test.get_current_workspace().await.id;
let file_path = generate_file_with_bytes_len(1024).await.0;
let mut rx = test.storage_manager.subscribe_upload_result();
let created_upload = test
.storage_manager
.storage_service
.create_upload(&workspace_id, "temp_test", &file_path)
.await
.unwrap();
while let Ok(result) = rx.recv().await {
if result.file_id == created_upload.file_id {
if result.status == UploadStatus::Finish {
break;
}
}
}
let _ = fs::remove_file(file_path).await;
}
#[tokio::test]
async fn af_cloud_upload_big_file_test() {
user_localhost_af_cloud().await;
let mut test = EventIntegrationTest::new().await;
test.af_cloud_sign_up().await;
tokio::time::sleep(Duration::from_secs(6)).await;
let workspace_id = test.get_current_workspace().await.id;
let (file_path, upload_data) = generate_file_with_bytes_len(30 * 1024 * 1024).await;
let created_upload = test
.storage_manager
.storage_service
.create_upload(&workspace_id, "temp_test", &file_path)
.await
.unwrap();
let mut rx = test.storage_manager.subscribe_upload_result();
while let Ok(result) = rx.recv().await {
if result.file_id == created_upload.file_id {
if result.status == UploadStatus::InProgress {
break;
}
}
}
// Simulate a restart
let config = test.config.clone();
test.set_no_cleanup();
drop(test);
tokio::time::sleep(Duration::from_secs(3)).await;
// Restart the test. It will load unfinished uploads
let test = EventIntegrationTest::new_with_config(config).await;
let mut rx = test.storage_manager.subscribe_upload_result();
while let Ok(result) = rx.recv().await {
if result.file_id == created_upload.file_id {
if result.status == UploadStatus::Finish {
break;
}
}
}
// download the file and then compare the data.
let file_service = test
.server_provider
.get_server()
.unwrap()
.file_storage()
.unwrap();
let file = file_service.get_object(created_upload.url).await.unwrap();
assert_eq!(file.raw.to_vec(), upload_data);
let _ = fs::remove_file(file_path).await;
}
#[tokio::test]
async fn af_cloud_upload_6_files_test() {
user_localhost_af_cloud().await;
let test = EventIntegrationTest::new().await;
test.af_cloud_sign_up().await;
let workspace_id = test.get_current_workspace().await.id;
let mut rx = test.storage_manager.subscribe_upload_result();
let mut created_uploads = vec![];
for file_size in vec![1, 2, 5, 8, 12, 20] {
let file_path = generate_file_with_bytes_len(file_size * 1024 * 1024)
.await
.0;
let created_upload = test
.storage_manager
.storage_service
.create_upload(&workspace_id, "temp_test", &file_path)
.await
.unwrap();
created_uploads.push(created_upload);
let _ = fs::remove_file(file_path).await;
}
while let Ok(result) = rx.recv().await {
if result.status == UploadStatus::Finish {
created_uploads.retain(|upload| upload.file_id != result.file_id);
}
if created_uploads.is_empty() {
break;
}
}
}
async fn generate_file_with_bytes_len(len: usize) -> (String, Vec<u8>) {
let data = generate_random_bytes(len);
let file_dir = temp_dir().join(uuid::Uuid::new_v4().to_string());
let file_path = file_dir.to_str().unwrap().to_string();
let mut file = File::create(file_dir).await.unwrap();
file.write_all(&data).await.unwrap();
(file_path, data)
}

View File

@ -1 +1,2 @@
mod edit_test;
mod file_upload_test;

View File

@ -4,7 +4,7 @@ mod af_cloud_test;
// #[cfg(feature = "supabase_cloud_test")]
// mod supabase_test;
use rand::{distributions::Alphanumeric, Rng};
use rand::{distributions::Alphanumeric, thread_rng, Rng};
pub fn generate_random_string(len: usize) -> String {
let rng = rand::thread_rng();
@ -14,3 +14,12 @@ pub fn generate_random_string(len: usize) -> String {
.map(char::from)
.collect()
}
pub fn generate_random_bytes(size: usize) -> Vec<u8> {
let s: String = thread_rng()
.sample_iter(&Alphanumeric)
.take(size)
.map(char::from)
.collect();
s.into_bytes()
}

View File

@ -31,6 +31,7 @@ collab = { workspace = true }
diesel.workspace = true
uuid.workspace = true
flowy-storage = { workspace = true }
flowy-storage-pub = { workspace = true }
client-api.workspace = true
flowy-chat = { workspace = true }
flowy-chat-pub = { workspace = true }

View File

@ -8,7 +8,7 @@ use flowy_document::entities::{DocumentSnapshotData, DocumentSnapshotMeta};
use flowy_document::manager::{DocumentManager, DocumentSnapshotService, DocumentUserService};
use flowy_document_pub::cloud::DocumentCloudService;
use flowy_error::{FlowyError, FlowyResult};
use flowy_storage::ObjectStorageService;
use flowy_storage_pub::storage::StorageService;
use flowy_user::services::authenticate_user::AuthenticateUser;
pub struct DocumentDepsResolver();
@ -18,7 +18,7 @@ impl DocumentDepsResolver {
_database_manager: &Arc<DatabaseManager>,
collab_builder: Arc<AppFlowyCollabBuilder>,
cloud_service: Arc<dyn DocumentCloudService>,
storage_service: Weak<dyn ObjectStorageService>,
storage_service: Weak<dyn StorageService>,
) -> Arc<DocumentManager> {
let user_service: Arc<dyn DocumentUserService> =
Arc::new(DocumentUserImpl(authenticate_user.clone()));

View File

@ -0,0 +1,54 @@
use flowy_error::FlowyError;
use flowy_sqlite::DBConnection;
use flowy_storage::manager::{StorageManager, StorageUserService};
use flowy_storage_pub::cloud::StorageCloudService;
use flowy_user::services::authenticate_user::AuthenticateUser;
use std::sync::{Arc, Weak};
pub struct FileStorageResolver;
impl FileStorageResolver {
pub fn resolve(
authenticate_user: Weak<AuthenticateUser>,
cloud_service: Arc<dyn StorageCloudService>,
root: &str,
) -> Arc<StorageManager> {
let user_service = FileStorageServiceImpl {
user: authenticate_user,
root_dir: root.to_owned(),
};
Arc::new(StorageManager::new(cloud_service, Arc::new(user_service)))
}
}
struct FileStorageServiceImpl {
user: Weak<AuthenticateUser>,
root_dir: String,
}
impl FileStorageServiceImpl {
fn upgrade_user(&self) -> Result<Arc<AuthenticateUser>, FlowyError> {
let user = self
.user
.upgrade()
.ok_or(FlowyError::internal().with_context("Unexpected error: UserSession is None"))?;
Ok(user)
}
}
impl StorageUserService for FileStorageServiceImpl {
fn user_id(&self) -> Result<i64, FlowyError> {
self.upgrade_user()?.user_id()
}
fn workspace_id(&self) -> Result<String, FlowyError> {
self.upgrade_user()?.workspace_id()
}
fn sqlite_connection(&self, uid: i64) -> Result<DBConnection, FlowyError> {
self.upgrade_user()?.get_sqlite_connection(uid)
}
fn get_application_root_dir(&self) -> &str {
&self.root_dir
}
}

View File

@ -12,5 +12,6 @@ mod folder_deps;
mod chat_deps;
mod database_deps;
pub mod file_storage_deps;
mod search_deps;
mod user_deps;

View File

@ -53,6 +53,7 @@ pub fn create_log_filter(level: String, with_crates: Vec<String>, platform: Plat
filters.push(format!("lib_infra={}", level));
filters.push(format!("flowy_search={}", level));
filters.push(format!("flowy_chat={}", level));
filters.push(format!("flowy_storage={}", level));
// Enable the frontend logs. DO NOT DISABLE.
// These logs are essential for debugging and verifying frontend behavior.
filters.push(format!("dart_ffi={}", level));

View File

@ -1,6 +1,5 @@
use client_api::entity::search_dto::SearchDocumentResponseItem;
use flowy_search_pub::cloud::SearchCloudService;
use flowy_storage::{ObjectIdentity, ObjectStorageService};
use std::sync::Arc;
use anyhow::Error;
@ -28,13 +27,14 @@ use flowy_database_pub::cloud::{
};
use flowy_document::deps::DocumentData;
use flowy_document_pub::cloud::{DocumentCloudService, DocumentSnapshot};
use flowy_error::FlowyError;
use flowy_error::{FlowyError, FlowyResult};
use flowy_folder_pub::cloud::{
FolderCloudService, FolderCollabParams, FolderData, FolderSnapshot, Workspace, WorkspaceRecord,
};
use flowy_server_pub::af_cloud_config::AFCloudConfiguration;
use flowy_server_pub::supabase_config::SupabaseConfiguration;
use flowy_storage::ObjectValue;
use flowy_storage_pub::cloud::{ObjectIdentity, ObjectValue, StorageCloudService};
use flowy_storage_pub::storage::{CompletedPartRequest, CreateUploadResponse, UploadPartResponse};
use flowy_user_pub::cloud::{UserCloudService, UserCloudServiceProvider};
use flowy_user_pub::entities::{Authenticator, UserTokenState};
use lib_infra::async_trait::async_trait;
@ -42,7 +42,8 @@ use lib_infra::future::FutureResult;
use crate::integrate::server::{Server, ServerProvider};
impl ObjectStorageService for ServerProvider {
#[async_trait]
impl StorageCloudService for ServerProvider {
fn get_object_url(&self, object_id: ObjectIdentity) -> FutureResult<String, FlowyError> {
let server = self.get_server();
FutureResult::new(async move {
@ -59,21 +60,85 @@ impl ObjectStorageService for ServerProvider {
})
}
fn delete_object(&self, url: String) -> FutureResult<(), FlowyError> {
fn delete_object(&self, url: &str) -> FutureResult<(), FlowyError> {
let server = self.get_server();
let url = url.to_string();
FutureResult::new(async move {
let storage = server?.file_storage().ok_or(FlowyError::internal())?;
storage.delete_object(url).await
storage.delete_object(&url).await
})
}
fn get_object(&self, url: String) -> FutureResult<flowy_storage::ObjectValue, FlowyError> {
fn get_object(&self, url: String) -> FutureResult<ObjectValue, FlowyError> {
let server = self.get_server();
FutureResult::new(async move {
let storage = server?.file_storage().ok_or(FlowyError::internal())?;
storage.get_object(url).await
})
}
fn get_object_url_v1(
&self,
workspace_id: &str,
parent_dir: &str,
file_id: &str,
) -> FlowyResult<String> {
let server = self.get_server()?;
let storage = server.file_storage().ok_or(FlowyError::internal())?;
storage.get_object_url_v1(workspace_id, parent_dir, file_id)
}
async fn create_upload(
&self,
workspace_id: &str,
parent_dir: &str,
file_id: &str,
content_type: &str,
) -> Result<CreateUploadResponse, FlowyError> {
let server = self.get_server();
let storage = server?.file_storage().ok_or(FlowyError::internal())?;
storage
.create_upload(workspace_id, parent_dir, file_id, content_type)
.await
}
async fn upload_part(
&self,
workspace_id: &str,
parent_dir: &str,
upload_id: &str,
file_id: &str,
part_number: i32,
body: Vec<u8>,
) -> Result<UploadPartResponse, FlowyError> {
let server = self.get_server();
let storage = server?.file_storage().ok_or(FlowyError::internal())?;
storage
.upload_part(
workspace_id,
parent_dir,
upload_id,
file_id,
part_number,
body,
)
.await
}
async fn complete_upload(
&self,
workspace_id: &str,
parent_dir: &str,
upload_id: &str,
file_id: &str,
parts: Vec<CompletedPartRequest>,
) -> Result<(), FlowyError> {
let server = self.get_server();
let storage = server?.file_storage().ok_or(FlowyError::internal())?;
storage
.complete_upload(workspace_id, parent_dir, upload_id, file_id, parts)
.await
}
}
impl UserCloudServiceProvider for ServerProvider {

View File

@ -1,7 +1,7 @@
use std::sync::Arc;
use anyhow::Context;
use tracing::event;
use tracing::{event, trace};
use collab_entity::CollabType;
use collab_integrate::collab_builder::AppFlowyCollabBuilder;
@ -9,13 +9,13 @@ use flowy_database2::DatabaseManager;
use flowy_document::manager::DocumentManager;
use flowy_error::{FlowyError, FlowyResult};
use flowy_folder::manager::{FolderInitDataSource, FolderManager};
use flowy_storage::manager::StorageManager;
use flowy_user::event_map::UserStatusCallback;
use flowy_user_pub::cloud::{UserCloudConfig, UserCloudServiceProvider};
use flowy_user_pub::entities::{Authenticator, UserProfile, UserWorkspace};
use lib_infra::future::{to_fut, Fut};
use crate::integrate::server::{Server, ServerProvider};
use crate::AppFlowyCoreConfig;
pub(crate) struct UserStatusCallbackImpl {
pub(crate) collab_builder: Arc<AppFlowyCollabBuilder>,
@ -23,8 +23,7 @@ pub(crate) struct UserStatusCallbackImpl {
pub(crate) database_manager: Arc<DatabaseManager>,
pub(crate) document_manager: Arc<DocumentManager>,
pub(crate) server_provider: Arc<ServerProvider>,
#[allow(dead_code)]
pub(crate) config: AppFlowyCoreConfig,
pub(crate) storage_manager: Arc<StorageManager>,
}
impl UserStatusCallback for UserStatusCallbackImpl {
@ -213,6 +212,8 @@ impl UserStatusCallback for UserStatusCallbackImpl {
}
fn did_update_network(&self, reachable: bool) {
trace!("Notify did update network: reachable: {}", reachable);
self.collab_builder.update_network(reachable);
self.storage_manager.update_network_reachable(reachable);
}
}

View File

@ -2,7 +2,6 @@
use flowy_search::folder::indexer::FolderIndexManagerImpl;
use flowy_search::services::manager::SearchManager;
use flowy_storage::ObjectStorageService;
use std::sync::{Arc, Weak};
use std::time::Duration;
use sysinfo::System;
@ -18,6 +17,7 @@ use flowy_folder::manager::FolderManager;
use flowy_server::af_cloud::define::ServerUser;
use flowy_sqlite::kv::StorePreferences;
use flowy_storage::manager::StorageManager;
use flowy_user::services::authenticate_user::AuthenticateUser;
use flowy_user::services::entities::UserConfig;
use flowy_user::user_manager::UserManager;
@ -30,6 +30,7 @@ use lib_log::stream_log::StreamLogSender;
use module::make_plugins;
use crate::config::AppFlowyCoreConfig;
use crate::deps_resolve::file_storage_deps::FileStorageResolver;
use crate::deps_resolve::*;
use crate::integrate::collab_interact::CollabInteractImpl;
use crate::integrate::log::init_log;
@ -59,6 +60,7 @@ pub struct AppFlowyCore {
pub store_preference: Arc<StorePreferences>,
pub search_manager: Arc<SearchManager>,
pub chat_manager: Arc<ChatManager>,
pub storage_manager: Arc<StorageManager>,
}
impl AppFlowyCore {
@ -140,7 +142,13 @@ impl AppFlowyCore {
collab_builder,
search_manager,
chat_manager,
storage_manager,
) = async {
let storage_manager = FileStorageResolver::resolve(
Arc::downgrade(&authenticate_user),
server_provider.clone(),
&user_config.storage_path,
);
/// The shared collab builder is used to build the [Collab] instance. The plugins will be loaded
/// on demand based on the [CollabPluginConfig].
let collab_builder = Arc::new(AppFlowyCollabBuilder::new(
@ -164,7 +172,7 @@ impl AppFlowyCore {
&database_manager,
collab_builder.clone(),
server_provider.clone(),
Arc::downgrade(&(server_provider.clone() as Arc<dyn ObjectStorageService>)),
Arc::downgrade(&storage_manager.storage_service),
);
let chat_manager =
@ -216,6 +224,7 @@ impl AppFlowyCore {
collab_builder,
search_manager,
chat_manager,
storage_manager,
)
}
.await;
@ -226,7 +235,7 @@ impl AppFlowyCore {
database_manager: database_manager.clone(),
document_manager: document_manager.clone(),
server_provider: server_provider.clone(),
config: config.clone(),
storage_manager: storage_manager.clone(),
};
let collab_interact_impl = CollabInteractImpl {
@ -267,6 +276,7 @@ impl AppFlowyCore {
store_preference,
search_manager,
chat_manager,
storage_manager,
}
}

View File

@ -14,7 +14,7 @@ collab-entity = { workspace = true }
collab-plugins = { workspace = true }
collab-integrate = { workspace = true }
flowy-document-pub = { workspace = true }
flowy-storage = { workspace = true }
flowy-storage-pub = { workspace = true }
flowy-derive.workspace = true
flowy-notification = { workspace = true }
flowy-error = { path = "../flowy-error", features = ["impl_from_serde", "impl_from_dispatch_error", "impl_from_collab_document", "impl_from_collab_persistence"] }

View File

@ -77,11 +77,12 @@ pub struct UploadFileParamsPB {
pub workspace_id: String,
#[pb(index = 2)]
#[validate(custom = "required_valid_path")]
pub local_file_path: String,
#[validate(custom = "required_not_empty_str")]
pub document_id: String,
#[pb(index = 3)]
pub is_async: bool,
#[validate(custom = "required_valid_path")]
pub local_file_path: String,
}
#[derive(Default, ProtoBuf, Validate)]

View File

@ -422,13 +422,13 @@ pub(crate) async fn upload_file_handler(
) -> DataResult<UploadedFilePB, FlowyError> {
let AFPluginData(UploadFileParamsPB {
workspace_id,
document_id,
local_file_path,
is_async,
}) = params;
let manager = upgrade_document(manager)?;
let url = manager
.upload_file(workspace_id, &local_file_path, is_async)
.upload_file(workspace_id, &document_id, &local_file_path)
.await?;
Ok(AFPluginData(UploadedFilePB {

View File

@ -13,16 +13,14 @@ use collab_document::document_data::default_document_data;
use collab_entity::CollabType;
use collab_plugins::CollabKVDB;
use dashmap::DashMap;
use flowy_storage::object_from_disk;
use lib_infra::util::timestamp;
use tokio::io::AsyncWriteExt;
use tracing::{error, trace};
use tracing::trace;
use tracing::{event, instrument};
use collab_integrate::collab_builder::{AppFlowyCollabBuilder, CollabBuilderConfig};
use flowy_document_pub::cloud::DocumentCloudService;
use flowy_error::{internal_error, ErrorCode, FlowyError, FlowyResult};
use flowy_storage::ObjectStorageService;
use flowy_storage_pub::storage::StorageService;
use lib_dispatch::prelude::af_spawn;
use crate::document::MutexDocument;
@ -53,7 +51,7 @@ pub struct DocumentManager {
documents: Arc<DashMap<String, Arc<MutexDocument>>>,
removing_documents: Arc<DashMap<String, Arc<MutexDocument>>>,
cloud_service: Arc<dyn DocumentCloudService>,
storage_service: Weak<dyn ObjectStorageService>,
storage_service: Weak<dyn StorageService>,
snapshot_service: Arc<dyn DocumentSnapshotService>,
}
@ -62,7 +60,7 @@ impl DocumentManager {
user_service: Arc<dyn DocumentUserService>,
collab_builder: Arc<AppFlowyCollabBuilder>,
cloud_service: Arc<dyn DocumentCloudService>,
storage_service: Weak<dyn ObjectStorageService>,
storage_service: Weak<dyn StorageService>,
snapshot_service: Arc<dyn DocumentSnapshotService>,
) -> Self {
Self {
@ -323,73 +321,30 @@ impl DocumentManager {
Ok(snapshot)
}
#[instrument(level = "debug", skip_all, err)]
pub async fn upload_file(
&self,
workspace_id: String,
document_id: &str,
local_file_path: &str,
is_async: bool,
) -> FlowyResult<String> {
let (object_identity, object_value) = object_from_disk(&workspace_id, local_file_path).await?;
let storage_service = self.storage_service_upgrade()?;
let url = storage_service.get_object_url(object_identity).await?;
let clone_url = url.clone();
match is_async {
false => storage_service.put_object(clone_url, object_value).await?,
true => {
// let the upload happen in the background
af_spawn(async move {
if let Err(e) = storage_service.put_object(clone_url, object_value).await {
error!("upload file failed: {}", e);
}
});
},
}
let url = storage_service
.create_upload(&workspace_id, document_id, local_file_path)
.await?
.url;
Ok(url)
}
pub async fn download_file(&self, local_file_path: String, url: String) -> FlowyResult<()> {
// TODO(nathan): save file when the current target is wasm
#[cfg(not(target_arch = "wasm32"))]
{
if tokio::fs::metadata(&local_file_path).await.is_ok() {
tracing::warn!("file already exist in user local disk: {}", local_file_path);
return Ok(());
}
let storage_service = self.storage_service_upgrade()?;
let object_value = storage_service.get_object(url).await?;
// create file if not exist
let mut file = tokio::fs::OpenOptions::new()
.create(true)
.truncate(true)
.write(true)
.open(&local_file_path)
.await?;
let n = file.write(&object_value.raw).await?;
tracing::info!("downloaded {} bytes to file: {}", n, local_file_path);
}
let storage_service = self.storage_service_upgrade()?;
storage_service.download_object(url, local_file_path)?;
Ok(())
}
pub async fn delete_file(&self, local_file_path: String, url: String) -> FlowyResult<()> {
// TODO(nathan): delete file when the current target is wasm
#[cfg(not(target_arch = "wasm32"))]
// delete file from local
tokio::fs::remove_file(local_file_path).await?;
// delete from cloud
let storage_service = self.storage_service_upgrade()?;
af_spawn(async move {
if let Err(e) = storage_service.delete_object(url).await {
// TODO: add WAL to log the delete operation.
// keep a list of files to be deleted, and retry later
error!("delete file failed: {}", e);
}
});
storage_service.delete_object(url, local_file_path)?;
Ok(())
}
@ -424,7 +379,7 @@ impl DocumentManager {
}
}
fn storage_service_upgrade(&self) -> FlowyResult<Arc<dyn ObjectStorageService>> {
fn storage_service_upgrade(&self) -> FlowyResult<Arc<dyn StorageService>> {
let storage_service = self.storage_service.upgrade().ok_or_else(|| {
FlowyError::internal().with_context("The file storage service is already dropped")
})?;
@ -438,7 +393,7 @@ impl DocumentManager {
}
/// Only expose this method for testing
#[cfg(debug_assertions)]
pub fn get_file_storage_service(&self) -> &Weak<dyn ObjectStorageService> {
pub fn get_file_storage_service(&self) -> &Weak<dyn StorageService> {
&self.storage_service
}

View File

@ -20,8 +20,10 @@ use flowy_document::entities::{DocumentSnapshotData, DocumentSnapshotMeta};
use flowy_document::manager::{DocumentManager, DocumentSnapshotService, DocumentUserService};
use flowy_document_pub::cloud::*;
use flowy_error::{ErrorCode, FlowyError, FlowyResult};
use flowy_storage::ObjectStorageService;
use flowy_storage_pub::chunked_byte::ChunkedBytes;
use flowy_storage_pub::storage::{CreatedUpload, StorageService};
use lib_infra::async_trait::async_trait;
use lib_infra::box_any::BoxAny;
use lib_infra::future::FutureResult;
pub struct DocumentTest {
@ -32,7 +34,7 @@ impl DocumentTest {
pub fn new() -> Self {
let user = FakeUser::new();
let cloud_service = Arc::new(LocalTestDocumentCloudServiceImpl());
let file_storage = Arc::new(DocumentTestFileStorageService) as Arc<dyn ObjectStorageService>;
let file_storage = Arc::new(DocumentTestFileStorageService) as Arc<dyn StorageService>;
let document_snapshot = Arc::new(DocumentTestSnapshot);
let builder = Arc::new(AppFlowyCollabBuilder::new(
@ -173,27 +175,44 @@ impl DocumentCloudService for LocalTestDocumentCloudServiceImpl {
}
pub struct DocumentTestFileStorageService;
impl ObjectStorageService for DocumentTestFileStorageService {
fn get_object_url(
#[async_trait]
impl StorageService for DocumentTestFileStorageService {
fn upload_object(
&self,
_object_id: flowy_storage::ObjectIdentity,
_workspace_id: &str,
_local_file_path: &str,
) -> FutureResult<String, FlowyError> {
todo!()
}
fn put_object(
fn delete_object(&self, _url: String, _local_file_path: String) -> FlowyResult<()> {
todo!()
}
fn download_object(&self, _url: String, _local_file_path: String) -> FlowyResult<()> {
todo!()
}
fn create_upload(
&self,
_url: String,
_object_value: flowy_storage::ObjectValue,
) -> FutureResult<(), FlowyError> {
_workspace_id: &str,
_parent_dir: &str,
_local_file_path: &str,
) -> FutureResult<CreatedUpload, flowy_error::FlowyError> {
todo!()
}
fn delete_object(&self, _url: String) -> FutureResult<(), FlowyError> {
async fn start_upload(&self, _chunks: &ChunkedBytes, _record: &BoxAny) -> Result<(), FlowyError> {
todo!()
}
fn get_object(&self, _url: String) -> FutureResult<flowy_storage::ObjectValue, FlowyError> {
async fn resume_upload(
&self,
_workspace_id: &str,
_parent_dir: &str,
_file_id: &str,
) -> Result<(), FlowyError> {
todo!()
}
}

View File

@ -42,6 +42,7 @@ flowy-server-pub = { workspace = true }
flowy-search-pub = { workspace = true }
flowy-encrypt = { workspace = true }
flowy-storage = { workspace = true }
flowy-storage-pub = { workspace = true }
flowy-chat-pub = { workspace = true }
mime_guess = "2.0"
url = "2.4"

View File

@ -1,5 +1,8 @@
use flowy_error::FlowyError;
use flowy_storage::{ObjectIdentity, ObjectStorageService, ObjectValue};
use client_api::entity::{CompleteUploadRequest, CreateUploadRequest};
use flowy_error::{FlowyError, FlowyResult};
use flowy_storage_pub::cloud::{ObjectIdentity, ObjectValue, StorageCloudService};
use flowy_storage_pub::storage::{CompletedPartRequest, CreateUploadResponse, UploadPartResponse};
use lib_infra::async_trait::async_trait;
use lib_infra::future::FutureResult;
use crate::af_cloud::AFServer;
@ -12,7 +15,8 @@ impl<T> AFCloudFileStorageServiceImpl<T> {
}
}
impl<T> ObjectStorageService for AFCloudFileStorageServiceImpl<T>
#[async_trait]
impl<T> StorageCloudService for AFCloudFileStorageServiceImpl<T>
where
T: AFServer,
{
@ -36,7 +40,8 @@ where
})
}
fn delete_object(&self, url: String) -> FutureResult<(), FlowyError> {
fn delete_object(&self, url: &str) -> FutureResult<(), FlowyError> {
let url = url.to_string();
let try_get_client = self.0.try_get_client();
FutureResult::new(async move {
let client = try_get_client?;
@ -56,4 +61,84 @@ where
})
})
}
fn get_object_url_v1(
&self,
workspace_id: &str,
parent_dir: &str,
file_id: &str,
) -> FlowyResult<String> {
let client = self.0.try_get_client()?;
let url = client.get_blob_url_v1(workspace_id, parent_dir, file_id);
Ok(url)
}
async fn create_upload(
&self,
workspace_id: &str,
parent_dir: &str,
file_id: &str,
content_type: &str,
) -> Result<CreateUploadResponse, FlowyError> {
let parent_dir = parent_dir.to_string();
let content_type = content_type.to_string();
let file_id = file_id.to_string();
let try_get_client = self.0.try_get_client();
let client = try_get_client?;
let req = CreateUploadRequest {
file_id,
parent_dir,
content_type,
};
let resp = client.create_upload(workspace_id, req).await?;
Ok(resp)
}
async fn upload_part(
&self,
workspace_id: &str,
parent_dir: &str,
upload_id: &str,
file_id: &str,
part_number: i32,
body: Vec<u8>,
) -> Result<UploadPartResponse, FlowyError> {
let try_get_client = self.0.try_get_client();
let client = try_get_client?;
let resp = client
.upload_part(
workspace_id,
parent_dir,
file_id,
upload_id,
part_number,
body,
)
.await?;
Ok(resp)
}
async fn complete_upload(
&self,
workspace_id: &str,
parent_dir: &str,
upload_id: &str,
file_id: &str,
parts: Vec<CompletedPartRequest>,
) -> Result<(), FlowyError> {
let parent_dir = parent_dir.to_string();
let upload_id = upload_id.to_string();
let file_id = file_id.to_string();
let try_get_client = self.0.try_get_client();
let client = try_get_client?;
let request = CompleteUploadRequest {
file_id,
parent_dir,
upload_id,
parts,
};
client.complete_upload(workspace_id, request).await?;
Ok(())
}
}

View File

@ -12,7 +12,6 @@ use client_api::ws::{
use client_api::{Client, ClientConfiguration};
use flowy_chat_pub::cloud::ChatCloudService;
use flowy_search_pub::cloud::SearchCloudService;
use flowy_storage::ObjectStorageService;
use rand::Rng;
use semver::Version;
use tokio::select;
@ -28,6 +27,7 @@ use flowy_document_pub::cloud::DocumentCloudService;
use flowy_error::{ErrorCode, FlowyError};
use flowy_folder_pub::cloud::FolderCloudService;
use flowy_server_pub::af_cloud_config::AFCloudConfiguration;
use flowy_storage_pub::cloud::StorageCloudService;
use flowy_user_pub::cloud::{UserCloudService, UserUpdate};
use flowy_user_pub::entities::UserTokenState;
use lib_dispatch::prelude::af_spawn;
@ -252,7 +252,7 @@ impl AppFlowyServer for AppFlowyCloudServer {
Ok(channel.map(|c| (c, connect_state_recv, self.ws_client.is_connected())))
}
fn file_storage(&self) -> Option<Arc<dyn ObjectStorageService>> {
fn file_storage(&self) -> Option<Arc<dyn StorageCloudService>> {
let client = AFServerImpl {
client: self.get_client(),
};

View File

@ -1,5 +1,4 @@
use flowy_search_pub::cloud::SearchCloudService;
use flowy_storage::ObjectStorageService;
use std::sync::Arc;
use parking_lot::RwLock;
@ -9,6 +8,7 @@ use flowy_database_pub::cloud::DatabaseCloudService;
use flowy_document_pub::cloud::DocumentCloudService;
use flowy_error::FlowyError;
use flowy_folder_pub::cloud::FolderCloudService;
use flowy_storage_pub::cloud::StorageCloudService;
// use flowy_user::services::database::{
// get_user_profile, get_user_workspace, open_collab_db, open_user_db,
// };
@ -68,7 +68,7 @@ impl AppFlowyServer for LocalServer {
Arc::new(LocalServerDocumentCloudServiceImpl())
}
fn file_storage(&self) -> Option<Arc<dyn ObjectStorageService>> {
fn file_storage(&self) -> Option<Arc<dyn StorageCloudService>> {
None
}

View File

@ -2,7 +2,6 @@ use client_api::ws::ConnectState;
use client_api::ws::WSConnectStateReceiver;
use client_api::ws::WebSocketChannel;
use flowy_search_pub::cloud::SearchCloudService;
use flowy_storage::ObjectStorageService;
use std::sync::Arc;
use anyhow::Error;
@ -17,6 +16,7 @@ use crate::default_impl::DefaultChatCloudServiceImpl;
use flowy_database_pub::cloud::DatabaseCloudService;
use flowy_document_pub::cloud::DocumentCloudService;
use flowy_folder_pub::cloud::FolderCloudService;
use flowy_storage_pub::cloud::StorageCloudService;
use flowy_user_pub::cloud::UserCloudService;
use flowy_user_pub::entities::UserTokenState;
@ -144,7 +144,7 @@ pub trait AppFlowyServer: Send + Sync + 'static {
Ok(None)
}
fn file_storage(&self) -> Option<Arc<dyn ObjectStorageService>>;
fn file_storage(&self) -> Option<Arc<dyn StorageCloudService>>;
}
pub struct EncryptionImpl {

View File

@ -1,7 +1,5 @@
use std::borrow::Cow;
use anyhow::Error;
use flowy_storage::StorageObject;
use flowy_storage_pub::cloud::StorageObject;
use hyper::header::CONTENT_TYPE;
use reqwest::header::IntoHeaderName;
use reqwest::multipart::{Form, Part};
@ -9,12 +7,14 @@ use reqwest::{
header::{HeaderMap, HeaderValue},
Client, Method, RequestBuilder,
};
use std::borrow::Cow;
use tokio::fs::File;
use tokio::io::AsyncReadExt;
use url::Url;
use crate::supabase::file_storage::{DeleteObjects, FileOptions, NewBucket, RequestBody};
#[allow(dead_code)]
pub struct StorageRequestBuilder {
pub url: Url,
headers: HeaderMap,
@ -23,6 +23,7 @@ pub struct StorageRequestBuilder {
body: RequestBody,
}
#[allow(dead_code)]
impl StorageRequestBuilder {
pub fn new(url: Url, headers: HeaderMap, client: Client) -> Self {
Self {

View File

@ -1,141 +1,13 @@
use std::sync::{Arc, Weak};
#![allow(clippy::all)]
#![allow(unknown_lints)]
#![allow(unused_attributes)]
use std::sync::Weak;
use anyhow::{anyhow, Error};
use reqwest::{
header::{HeaderMap, HeaderValue},
Client,
};
use url::Url;
use flowy_encrypt::{decrypt_data, encrypt_data};
use flowy_error::FlowyError;
use flowy_server_pub::supabase_config::SupabaseConfiguration;
use flowy_storage::{FileStoragePlan, ObjectStorageService};
use lib_infra::future::FutureResult;
use crate::supabase::file_storage::builder::StorageRequestBuilder;
use crate::AppFlowyEncryption;
pub struct SupabaseFileStorage {
url: Url,
headers: HeaderMap,
client: Client,
#[allow(dead_code)]
encryption: ObjectEncryption,
#[allow(dead_code)]
storage_plan: Arc<dyn FileStoragePlan>,
}
impl ObjectStorageService for SupabaseFileStorage {
fn get_object_url(
&self,
_object_id: flowy_storage::ObjectIdentity,
) -> FutureResult<String, FlowyError> {
todo!()
}
fn put_object(
&self,
_url: String,
_object_value: flowy_storage::ObjectValue,
) -> FutureResult<(), FlowyError> {
todo!()
}
fn delete_object(&self, _url: String) -> FutureResult<(), FlowyError> {
todo!()
}
fn get_object(&self, _url: String) -> FutureResult<flowy_storage::ObjectValue, FlowyError> {
todo!()
}
// fn create_object(&self, object: StorageObject) -> FutureResult<String, FlowyError> {
// let mut storage = self.storage();
// let storage_plan = Arc::downgrade(&self.storage_plan);
// FutureResult::new(async move {
// let plan = storage_plan
// .upgrade()
// .ok_or(anyhow!("Storage plan is not available"))?;
// plan.check_upload_object(&object).await?;
// storage = storage.upload_object("data", object);
// let url = storage.url.to_string();
// storage.build().await?.send().await?.success().await?;
// Ok(url)
// })
// }
// fn delete_object_by_url(&self, object_url: String) -> FutureResult<(), FlowyError> {
// let storage = self.storage();
// FutureResult::new(async move {
// let url = Url::parse(&object_url)?;
// let location = get_object_location_from(&url)?;
// storage
// .delete_object(location.bucket_id, location.file_name)
// .build()
// .await?
// .send()
// .await?
// .success()
// .await?;
// Ok(())
// })
// }
// fn get_object_by_url(&self, object_url: String) -> FutureResult<Bytes, FlowyError> {
// let storage = self.storage();
// FutureResult::new(async move {
// let url = Url::parse(&object_url)?;
// let location = get_object_location_from(&url)?;
// let bytes = storage
// .get_object(location.bucket_id, location.file_name)
// .build()
// .await?
// .send()
// .await?
// .get_bytes()
// .await?;
// Ok(bytes)
// })
// }
}
impl SupabaseFileStorage {
pub fn new(
config: &SupabaseConfiguration,
encryption: Weak<dyn AppFlowyEncryption>,
storage_plan: Arc<dyn FileStoragePlan>,
) -> Result<Self, Error> {
let mut headers = HeaderMap::new();
let url = format!("{}/storage/v1", config.url);
let auth = format!("Bearer {}", config.anon_key);
headers.insert(
"Authorization",
HeaderValue::from_str(&auth).expect("Authorization is invalid"),
);
headers.insert(
"apikey",
HeaderValue::from_str(&config.anon_key).expect("apikey value is invalid"),
);
let encryption = ObjectEncryption::new(encryption);
Ok(Self {
url: Url::parse(&url)?,
headers,
client: Client::new(),
encryption,
storage_plan,
})
}
pub fn storage(&self) -> StorageRequestBuilder {
StorageRequestBuilder::new(self.url.clone(), self.headers.clone(), self.client.clone())
}
}
use flowy_encrypt::{decrypt_data, encrypt_data};
#[allow(dead_code)]
struct ObjectEncryption {
@ -143,6 +15,7 @@ struct ObjectEncryption {
}
impl ObjectEncryption {
#[allow(dead_code)]
fn new(encryption: Weak<dyn AppFlowyEncryption>) -> Self {
Self { encryption }
}

View File

@ -1,8 +1,7 @@
use bytes::Bytes;
use flowy_storage_pub::cloud::ObjectValueSupabase;
use serde::{Deserialize, Serialize};
use flowy_storage::ObjectValueSupabase;
use crate::supabase;
#[derive(Debug, Clone, Serialize, Deserialize)]

View File

@ -3,7 +3,7 @@ use std::sync::Weak;
use parking_lot::RwLock;
use flowy_error::FlowyError;
use flowy_storage::{FileStoragePlan, StorageObject};
use flowy_storage_pub::cloud::{FileStoragePlan, StorageObject};
use lib_infra::future::FutureResult;
use crate::supabase::api::RESTfulPostgresServer;

View File

@ -1,5 +1,4 @@
use flowy_search_pub::cloud::SearchCloudService;
use flowy_storage::ObjectStorageService;
use std::collections::HashMap;
use std::sync::{Arc, Weak};
@ -11,6 +10,7 @@ use flowy_database_pub::cloud::DatabaseCloudService;
use flowy_document_pub::cloud::DocumentCloudService;
use flowy_folder_pub::cloud::FolderCloudService;
use flowy_server_pub::supabase_config::SupabaseConfiguration;
use flowy_storage_pub::cloud::StorageCloudService;
use flowy_user_pub::cloud::UserCloudService;
use crate::supabase::api::{
@ -18,8 +18,7 @@ use crate::supabase::api::{
SupabaseCollabStorageImpl, SupabaseDatabaseServiceImpl, SupabaseDocumentServiceImpl,
SupabaseFolderServiceImpl, SupabaseServerServiceImpl, SupabaseUserServiceImpl,
};
use crate::supabase::file_storage::core::SupabaseFileStorage;
use crate::supabase::file_storage::FileStoragePlanImpl;
use crate::{AppFlowyEncryption, AppFlowyServer};
/// https://www.pgbouncer.org/features.html
@ -63,10 +62,10 @@ pub struct SupabaseServer {
#[allow(dead_code)]
config: SupabaseConfiguration,
device_id: String,
#[allow(dead_code)]
uid: Arc<RwLock<Option<i64>>>,
collab_update_sender: Arc<CollabUpdateSenderByOid>,
restful_postgres: Arc<RwLock<Option<Arc<RESTfulPostgresServer>>>>,
file_storage: Arc<RwLock<Option<Arc<SupabaseFileStorage>>>>,
encryption: Weak<dyn AppFlowyEncryption>,
}
@ -87,23 +86,11 @@ impl SupabaseServer {
} else {
None
};
let file_storage = if enable_sync {
let plan = FileStoragePlanImpl::new(
Arc::downgrade(&uid),
restful_postgres.as_ref().map(Arc::downgrade),
);
Some(Arc::new(
SupabaseFileStorage::new(&config, encryption.clone(), Arc::new(plan)).unwrap(),
))
} else {
None
};
Self {
config,
device_id,
collab_update_sender,
restful_postgres: Arc::new(RwLock::new(restful_postgres)),
file_storage: Arc::new(RwLock::new(file_storage)),
encryption,
uid,
}
@ -119,19 +106,8 @@ impl AppFlowyServer for SupabaseServer {
let postgres = RESTfulPostgresServer::new(self.config.clone(), self.encryption.clone());
*self.restful_postgres.write() = Some(Arc::new(postgres));
}
if self.file_storage.read().is_none() {
let plan = FileStoragePlanImpl::new(
Arc::downgrade(&self.uid),
self.restful_postgres.read().as_ref().map(Arc::downgrade),
);
let file_storage =
SupabaseFileStorage::new(&self.config, self.encryption.clone(), Arc::new(plan)).unwrap();
*self.file_storage.write() = Some(Arc::new(file_storage));
}
} else {
*self.restful_postgres.write() = None;
*self.file_storage.write() = None;
}
}
@ -188,12 +164,8 @@ impl AppFlowyServer for SupabaseServer {
)))
}
fn file_storage(&self) -> Option<Arc<dyn ObjectStorageService>> {
self
.file_storage
.read()
.clone()
.map(|s| s as Arc<dyn ObjectStorageService>)
fn file_storage(&self) -> Option<Arc<dyn StorageCloudService>> {
None
}
fn search_service(&self) -> Option<Arc<dyn SearchCloudService>> {

View File

@ -5,7 +5,7 @@ use tracing_subscriber::util::SubscriberInitExt;
use tracing_subscriber::EnvFilter;
mod af_cloud_test;
mod supabase_test;
// mod supabase_test;
pub fn setup_log() {
static START: Once = Once::new();

View File

@ -1,4 +1,3 @@
use flowy_storage::ObjectStorageService;
use std::collections::HashMap;
use std::sync::Arc;
@ -16,10 +15,8 @@ use flowy_server::supabase::api::{
SupabaseFolderServiceImpl, SupabaseServerServiceImpl, SupabaseUserServiceImpl,
};
use flowy_server::supabase::define::{USER_DEVICE_ID, USER_EMAIL, USER_UUID};
use flowy_server::supabase::file_storage::core::SupabaseFileStorage;
use flowy_server::{AppFlowyEncryption, EncryptionImpl};
use flowy_server_pub::supabase_config::SupabaseConfiguration;
use flowy_storage::{FileStoragePlan, StorageObject};
use flowy_user_pub::cloud::UserCloudService;
use lib_infra::future::FutureResult;
@ -63,7 +60,7 @@ pub fn folder_service() -> Arc<dyn FolderCloudService> {
}
#[allow(dead_code)]
pub fn file_storage_service() -> Arc<dyn ObjectStorageService> {
pub fn file_storage_service() -> Arc<dyn ObjectStorageCloudService> {
let encryption_impl: Arc<dyn AppFlowyEncryption> = Arc::new(EncryptionImpl::new(None));
let config = SupabaseConfiguration::from_env().unwrap();
Arc::new(
@ -163,19 +160,3 @@ pub fn third_party_sign_up_param(uuid: String) -> HashMap<String, String> {
}
pub struct TestFileStoragePlan;
impl FileStoragePlan for TestFileStoragePlan {
fn storage_size(&self) -> FutureResult<u64, FlowyError> {
// 1 GB
FutureResult::new(async { Ok(1024 * 1024 * 1024) })
}
fn maximum_file_size(&self) -> FutureResult<u64, FlowyError> {
// 5 MB
FutureResult::new(async { Ok(5 * 1024 * 1024) })
}
fn check_upload_object(&self, _object: &StorageObject) -> FutureResult<(), FlowyError> {
FutureResult::new(async { Ok(()) })
}
}

View File

@ -0,0 +1,2 @@
-- This file should undo anything in `up.sql`
drop table upload_file_table;

View File

@ -0,0 +1,20 @@
-- Your SQL goes here
CREATE TABLE upload_file_table (
workspace_id TEXT NOT NULL,
file_id TEXT NOT NULL,
parent_dir TEXT NOT NULL,
local_file_path TEXT NOT NULL,
content_type TEXT NOT NULL,
chunk_size INTEGER NOT NULL,
num_chunk INTEGER NOT NULL,
upload_id TEXT NOT NULL DEFAULT '',
created_at BIGINT NOT NULL,
PRIMARY KEY (workspace_id, parent_dir, file_id)
);
CREATE TABLE upload_file_part (
upload_id TEXT NOT NULL,
e_tag TEXT NOT NULL,
part_num INTEGER NOT NULL,
PRIMARY KEY (upload_id, e_tag)
);

View File

@ -32,6 +32,28 @@ diesel::table! {
}
}
diesel::table! {
upload_file_part (upload_id, e_tag) {
upload_id -> Text,
e_tag -> Text,
part_num -> Integer,
}
}
diesel::table! {
upload_file_table (workspace_id, file_id, parent_dir) {
workspace_id -> Text,
file_id -> Text,
parent_dir -> Text,
local_file_path -> Text,
content_type -> Text,
chunk_size -> Integer,
num_chunk -> Integer,
upload_id -> Text,
created_at -> BigInt,
}
}
diesel::table! {
user_data_migration_records (id) {
id -> Integer,
@ -80,11 +102,13 @@ diesel::table! {
}
diesel::allow_tables_to_appear_in_same_query!(
chat_message_table,
chat_table,
collab_snapshot,
user_data_migration_records,
user_table,
user_workspace_table,
workspace_members_table,
chat_message_table,
chat_table,
collab_snapshot,
upload_file_part,
upload_file_table,
user_data_migration_records,
user_table,
user_workspace_table,
workspace_members_table,
);

View File

@ -0,0 +1,19 @@
[package]
name = "flowy-storage-pub"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
lib-infra.workspace = true
serde_json.workspace = true
serde.workspace = true
async-trait.workspace = true
mime = "0.3.17"
flowy-error = { workspace = true, features = ["impl_from_reqwest"] }
bytes.workspace = true
mime_guess = "2.0.4"
client-api-entity = { workspace = true }
tokio = { workspace = true, features = ["sync", "io-util"] }
anyhow = "1.0.86"

View File

@ -0,0 +1,239 @@
use anyhow::anyhow;
use bytes::Bytes;
use std::fmt::Display;
use std::ops::Deref;
use std::path::Path;
use tokio::io::AsyncReadExt;
/// In Amazon S3, the minimum chunk size for multipart uploads is 5 MB,except for the last part,
/// which can be smaller.(https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html)
pub const MIN_CHUNK_SIZE: usize = 5 * 1024 * 1024; // Minimum Chunk Size 5 MB
pub struct ChunkedBytes {
pub data: Bytes,
pub chunk_size: i32,
pub offsets: Vec<(usize, usize)>,
pub current_offset: i32,
}
impl Deref for ChunkedBytes {
type Target = Bytes;
fn deref(&self) -> &Self::Target {
&self.data
}
}
impl Display for ChunkedBytes {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"ChunkedBytes: chunk_size: {}, offsets: {:?}, current_offset: {}",
self.chunk_size, self.offsets, self.current_offset
)
}
}
impl ChunkedBytes {
pub fn from_bytes_with_chunk_size(data: Bytes, chunk_size: i32) -> Result<Self, anyhow::Error> {
if chunk_size < MIN_CHUNK_SIZE as i32 {
return Err(anyhow!(
"Chunk size should be greater than or equal to {}",
MIN_CHUNK_SIZE
));
}
let offsets = split_into_chunks(&data, chunk_size as usize);
Ok(ChunkedBytes {
data,
offsets,
chunk_size,
current_offset: 0,
})
}
/// Used to create a `ChunkedBytes` from a `Bytes` object. The default chunk size is 5 MB.
pub fn from_bytes(data: Bytes) -> Self {
let chunk_size = MIN_CHUNK_SIZE as i32;
let offsets = split_into_chunks(&data, MIN_CHUNK_SIZE);
ChunkedBytes {
data,
offsets,
chunk_size,
current_offset: 0,
}
}
pub async fn from_file<P: AsRef<Path>>(
file_path: P,
chunk_size: i32,
) -> Result<Self, tokio::io::Error> {
let mut file = tokio::fs::File::open(file_path).await?;
let mut buffer = Vec::new();
file.read_to_end(&mut buffer).await?;
let data = Bytes::from(buffer);
let offsets = split_into_chunks(&data, chunk_size as usize);
Ok(ChunkedBytes {
data,
offsets,
chunk_size,
current_offset: 0,
})
}
pub fn set_current_offset(&mut self, offset: i32) {
self.current_offset = offset;
}
pub fn iter(&self) -> ChunkedBytesIterator {
ChunkedBytesIterator {
chunked_data: self,
current_index: self.current_offset as usize,
}
}
}
pub struct ChunkedBytesIterator<'a> {
chunked_data: &'a ChunkedBytes,
current_index: usize,
}
impl<'a> Iterator for ChunkedBytesIterator<'a> {
type Item = Bytes;
fn next(&mut self) -> Option<Self::Item> {
if self.current_index >= self.chunked_data.offsets.len() {
None
} else {
let (start, end) = self.chunked_data.offsets[self.current_index];
self.current_index += 1;
Some(self.chunked_data.data.slice(start..end))
}
}
}
// Function to split input bytes into several chunks and return offsets
pub fn split_into_chunks(data: &Bytes, chunk_size: usize) -> Vec<(usize, usize)> {
let mut offsets = Vec::new();
let mut start = 0;
while start < data.len() {
let end = std::cmp::min(start + chunk_size, data.len());
offsets.push((start, end));
start = end;
}
offsets
}
// Function to get chunk data using chunk number
pub async fn get_chunk(
data: Bytes,
chunk_number: usize,
offsets: &[(usize, usize)],
) -> Result<Bytes, anyhow::Error> {
if chunk_number >= offsets.len() {
return Err(anyhow!("Chunk number out of range"));
}
let (start, end) = offsets[chunk_number];
let chunk = data.slice(start..end);
Ok(chunk)
}
#[cfg(test)]
mod tests {
use crate::chunked_byte::{ChunkedBytes, MIN_CHUNK_SIZE};
use bytes::Bytes;
use std::env::temp_dir;
use tokio::io::AsyncWriteExt;
#[tokio::test]
async fn test_chunked_bytes_less_than_chunk_size() {
let data = Bytes::from(vec![0; 1024 * 1024]); // 1 MB of zeroes
let chunked_data =
ChunkedBytes::from_bytes_with_chunk_size(data.clone(), MIN_CHUNK_SIZE as i32).unwrap();
// Check if the offsets are correct
assert_eq!(chunked_data.offsets.len(), 1); // Should have 1 chunk
assert_eq!(chunked_data.offsets[0], (0, 1024 * 1024));
// Check if the data can be iterated correctly
let mut iter = chunked_data.iter();
assert_eq!(iter.next().unwrap().len(), 1024 * 1024);
assert!(iter.next().is_none());
}
#[tokio::test]
async fn test_chunked_bytes_from_bytes() {
let data = Bytes::from(vec![0; 15 * 1024 * 1024]); // 15 MB of zeroes
let chunked_data =
ChunkedBytes::from_bytes_with_chunk_size(data.clone(), MIN_CHUNK_SIZE as i32).unwrap();
// Check if the offsets are correct
assert_eq!(chunked_data.offsets.len(), 3); // Should have 3 chunks
assert_eq!(chunked_data.offsets[0], (0, 5 * 1024 * 1024));
assert_eq!(chunked_data.offsets[1], (5 * 1024 * 1024, 10 * 1024 * 1024));
assert_eq!(
chunked_data.offsets[2],
(10 * 1024 * 1024, 15 * 1024 * 1024)
);
// Check if the data can be iterated correctly
let mut iter = chunked_data.iter();
assert_eq!(iter.next().unwrap().len(), 5 * 1024 * 1024);
assert_eq!(iter.next().unwrap().len(), 5 * 1024 * 1024);
assert_eq!(iter.next().unwrap().len(), 5 * 1024 * 1024);
assert!(iter.next().is_none());
}
#[tokio::test]
async fn test_chunked_bytes_from_file() {
// Create a temporary file with 15 MB of zeroes
let mut file_path = temp_dir();
file_path.push("test_file");
let mut file = tokio::fs::File::create(&file_path).await.unwrap();
file.write_all(&vec![0; 15 * 1024 * 1024]).await.unwrap();
file.flush().await.unwrap();
// Read the file into ChunkedBytes
let chunked_data = ChunkedBytes::from_file(&file_path, MIN_CHUNK_SIZE as i32)
.await
.unwrap();
// Check if the offsets are correct
assert_eq!(chunked_data.offsets.len(), 3); // Should have 3 chunks
assert_eq!(chunked_data.offsets[0], (0, 5 * 1024 * 1024));
assert_eq!(chunked_data.offsets[1], (5 * 1024 * 1024, 10 * 1024 * 1024));
assert_eq!(
chunked_data.offsets[2],
(10 * 1024 * 1024, 15 * 1024 * 1024)
);
// Check if the data can be iterated correctly
let mut iter = chunked_data.iter();
assert_eq!(iter.next().unwrap().len(), 5 * 1024 * 1024);
assert_eq!(iter.next().unwrap().len(), 5 * 1024 * 1024);
assert_eq!(iter.next().unwrap().len(), 5 * 1024 * 1024);
assert!(iter.next().is_none());
// Clean up the temporary file
tokio::fs::remove_file(file_path).await.unwrap();
}
#[tokio::test]
async fn test_chunked_bytes_with_current_offset() {
let data = Bytes::from(vec![0; 15 * 1024 * 1024]); // 15 MB of zeroes
let mut chunked_data =
ChunkedBytes::from_bytes_with_chunk_size(data.clone(), MIN_CHUNK_SIZE as i32).unwrap();
// Set the current offset to the second chunk
chunked_data.set_current_offset(1);
// Check if the iterator starts from the second chunk
let mut iter = chunked_data.iter();
assert_eq!(iter.next().unwrap().len(), 5 * 1024 * 1024); // Second chunk
assert_eq!(iter.next().unwrap().len(), 5 * 1024 * 1024); // Third chunk
assert!(iter.next().is_none());
}
}

View File

@ -0,0 +1,176 @@
use crate::storage::{CompletedPartRequest, CreateUploadResponse, UploadPartResponse};
use async_trait::async_trait;
use bytes::Bytes;
use flowy_error::{FlowyError, FlowyResult};
use lib_infra::future::FutureResult;
use mime::Mime;
#[async_trait]
pub trait StorageCloudService: Send + Sync {
/// Creates a new storage object.
///
/// # Parameters
/// - `url`: url of the object to be created.
///
/// # Returns
/// - `Ok()`
/// - `Err(Error)`: An error occurred during the operation.
fn get_object_url(&self, object_id: ObjectIdentity) -> FutureResult<String, FlowyError>;
/// Creates a new storage object.
///
/// # Parameters
/// - `url`: url of the object to be created.
///
/// # Returns
/// - `Ok()`
/// - `Err(Error)`: An error occurred during the operation.
fn put_object(&self, url: String, object_value: ObjectValue) -> FutureResult<(), FlowyError>;
/// Deletes a storage object by its URL.
///
/// # Parameters
/// - `url`: url of the object to be deleted.
///
/// # Returns
/// - `Ok()`
/// - `Err(Error)`: An error occurred during the operation.
fn delete_object(&self, url: &str) -> FutureResult<(), FlowyError>;
/// Fetches a storage object by its URL.
///
/// # Parameters
/// - `url`: url of the object
///
/// # Returns
/// - `Ok(File)`: The returned file object.
/// - `Err(Error)`: An error occurred during the operation.
fn get_object(&self, url: String) -> FutureResult<ObjectValue, FlowyError>;
fn get_object_url_v1(
&self,
workspace_id: &str,
parent_dir: &str,
file_id: &str,
) -> FlowyResult<String>;
async fn create_upload(
&self,
workspace_id: &str,
parent_dir: &str,
file_id: &str,
content_type: &str,
) -> Result<CreateUploadResponse, FlowyError>;
async fn upload_part(
&self,
workspace_id: &str,
parent_dir: &str,
upload_id: &str,
file_id: &str,
part_number: i32,
body: Vec<u8>,
) -> Result<UploadPartResponse, FlowyError>;
async fn complete_upload(
&self,
workspace_id: &str,
parent_dir: &str,
upload_id: &str,
file_id: &str,
parts: Vec<CompletedPartRequest>,
) -> Result<(), FlowyError>;
}
pub trait FileStoragePlan: Send + Sync + 'static {
fn storage_size(&self) -> FutureResult<u64, FlowyError>;
fn maximum_file_size(&self) -> FutureResult<u64, FlowyError>;
fn check_upload_object(&self, object: &StorageObject) -> FutureResult<(), FlowyError>;
}
pub struct ObjectIdentity {
pub workspace_id: String,
pub file_id: String,
pub ext: String,
}
#[derive(Clone)]
pub struct ObjectValue {
pub raw: Bytes,
pub mime: Mime,
}
pub struct StorageObject {
pub workspace_id: String,
pub file_name: String,
pub value: ObjectValueSupabase,
}
pub enum ObjectValueSupabase {
File { file_path: String },
Bytes { bytes: Bytes, mime: String },
}
impl ObjectValueSupabase {
pub fn mime_type(&self) -> String {
match self {
ObjectValueSupabase::File { file_path } => mime_guess::from_path(file_path)
.first_or_octet_stream()
.to_string(),
ObjectValueSupabase::Bytes { mime, .. } => mime.clone(),
}
}
}
impl StorageObject {
/// Creates a `StorageObject` from a file.
///
/// # Parameters
///
/// * `name`: The name of the storage object.
/// * `file_path`: The file path to the storage object's data.
///
pub fn from_file<T: ToString>(workspace_id: &str, file_name: &str, file_path: T) -> Self {
Self {
workspace_id: workspace_id.to_string(),
file_name: file_name.to_string(),
value: ObjectValueSupabase::File {
file_path: file_path.to_string(),
},
}
}
/// Creates a `StorageObject` from bytes.
///
/// # Parameters
///
/// * `name`: The name of the storage object.
/// * `bytes`: The byte data of the storage object.
/// * `mime`: The MIME type of the storage object.
///
pub fn from_bytes<B: Into<Bytes>>(
workspace_id: &str,
file_name: &str,
bytes: B,
mime: String,
) -> Self {
let bytes = bytes.into();
Self {
workspace_id: workspace_id.to_string(),
file_name: file_name.to_string(),
value: ObjectValueSupabase::Bytes { bytes, mime },
}
}
/// Gets the file size of the `StorageObject`.
///
/// # Returns
///
/// The file size in bytes.
pub fn file_size(&self) -> u64 {
match &self.value {
ObjectValueSupabase::File { file_path } => std::fs::metadata(file_path).unwrap().len(),
ObjectValueSupabase::Bytes { bytes, .. } => bytes.len() as u64,
}
}
}

View File

@ -0,0 +1,3 @@
pub mod chunked_byte;
pub mod cloud;
pub mod storage;

View File

@ -0,0 +1,53 @@
use crate::chunked_byte::ChunkedBytes;
use async_trait::async_trait;
pub use client_api_entity::{CompletedPartRequest, CreateUploadResponse, UploadPartResponse};
use flowy_error::{FlowyError, FlowyResult};
use lib_infra::box_any::BoxAny;
use lib_infra::future::FutureResult;
#[async_trait]
pub trait StorageService: Send + Sync {
fn upload_object(
&self,
workspace_id: &str,
local_file_path: &str,
) -> FutureResult<String, FlowyError>;
fn delete_object(&self, url: String, local_file_path: String) -> FlowyResult<()>;
fn download_object(&self, url: String, local_file_path: String) -> FlowyResult<()>;
fn create_upload(
&self,
workspace_id: &str,
parent_dir: &str,
local_file_path: &str,
) -> FutureResult<CreatedUpload, FlowyError>;
async fn start_upload(&self, chunks: &ChunkedBytes, record: &BoxAny) -> Result<(), FlowyError>;
async fn resume_upload(
&self,
workspace_id: &str,
parent_dir: &str,
file_id: &str,
) -> Result<(), FlowyError>;
}
pub struct CreatedUpload {
pub url: String,
pub file_id: String,
}
#[derive(Debug, Clone)]
pub struct UploadResult {
pub file_id: String,
pub status: UploadStatus,
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum UploadStatus {
Finish,
Failed,
InProgress,
}

View File

@ -3,21 +3,24 @@ name = "flowy-storage"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
crate-type = ["cdylib", "rlib"]
[dependencies]
reqwest = { version = "0.11", features = ["json", "stream"] }
flowy-storage-pub.workspace = true
serde_json.workspace = true
serde.workspace = true
async-trait.workspace = true
bytes.workspace = true
mime_guess = "2.0"
lib-infra = { workspace = true }
url = "2.2.2"
flowy-error = { workspace = true, features = ["impl_from_reqwest"] }
mime = "0.3.17"
tokio = { workspace = true, features = ["sync", "io-util"]}
flowy-error = { workspace = true, features = ["impl_from_reqwest", "impl_from_sqlite"] }
tokio = { workspace = true, features = ["sync", "io-util"] }
tracing.workspace = true
flowy-sqlite.workspace = true
mime_guess = "2.0.4"
fxhash = "0.2.1"
anyhow = "1.0.86"
chrono = "0.4.33"
[dev-dependencies]
tokio = { workspace = true, features = ["full"] }
uuid = "1.6.1"
rand = { version = "0.8", features = ["std_rng"] }

View File

@ -0,0 +1,88 @@
use std::path::{Path, PathBuf};
use tokio::fs::{self, File};
use tokio::io::{self, AsyncReadExt, AsyncWriteExt};
use tracing::error;
/// [FileTempStorage] is used to store the temporary files for uploading. After the file is uploaded,
/// the file will be deleted.
pub struct FileTempStorage {
storage_dir: PathBuf,
}
impl FileTempStorage {
/// Creates a new `FileTempStorage` with the specified temporary directory.
pub fn new(storage_dir: PathBuf) -> Self {
if !storage_dir.exists() {
if let Err(err) = std::fs::create_dir_all(&storage_dir) {
error!("Failed to create temporary storage directory: {:?}", err);
}
}
FileTempStorage { storage_dir }
}
/// Generates a temporary file path using the given file name.
fn generate_temp_file_path_with_name(&self, file_name: &str) -> PathBuf {
self.storage_dir.join(file_name)
}
/// Creates a temporary file from an existing local file path.
pub async fn create_temp_file_from_existing(
&self,
existing_file_path: &Path,
) -> io::Result<String> {
let file_name = existing_file_path
.file_name()
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidInput, "Invalid file name"))?
.to_str()
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidInput, "Invalid file name"))?;
let temp_file_path = self.generate_temp_file_path_with_name(file_name);
fs::copy(existing_file_path, &temp_file_path).await?;
Ok(
temp_file_path
.to_str()
.ok_or(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid file path",
))?
.to_owned(),
)
}
/// Creates a temporary file from bytes and a specified file name.
#[allow(dead_code)]
pub async fn create_temp_file_from_bytes(
&self,
file_name: &str,
data: &[u8],
) -> io::Result<PathBuf> {
let temp_file_path = self.generate_temp_file_path_with_name(file_name);
let mut file = File::create(&temp_file_path).await?;
file.write_all(data).await?;
Ok(temp_file_path)
}
/// Writes data to the specified temporary file.
#[allow(dead_code)]
pub async fn write_to_temp_file(&self, file_path: &Path, data: &[u8]) -> io::Result<()> {
let mut file = File::create(file_path).await?;
file.write_all(data).await?;
Ok(())
}
/// Reads data from the specified temporary file.
#[allow(dead_code)]
pub async fn read_from_temp_file(&self, file_path: &Path) -> io::Result<Vec<u8>> {
let mut file = File::open(file_path).await?;
let mut data = Vec::new();
file.read_to_end(&mut data).await?;
Ok(data)
}
/// Deletes the specified temporary file.
pub async fn delete_temp_file<T: AsRef<Path>>(&self, file_path: T) -> io::Result<()> {
fs::remove_file(file_path).await?;
Ok(())
}
}

View File

@ -1,154 +1,4 @@
if_native! {
mod native;
pub use native::*;
}
if_wasm! {
mod wasm;
pub use wasm::*;
}
use bytes::Bytes;
use flowy_error::FlowyError;
use lib_infra::future::FutureResult;
use lib_infra::{conditional_send_sync_trait, if_native, if_wasm};
use mime::Mime;
pub struct ObjectIdentity {
pub workspace_id: String,
pub file_id: String,
pub ext: String,
}
#[derive(Clone)]
pub struct ObjectValue {
pub raw: Bytes,
pub mime: Mime,
}
conditional_send_sync_trait! {
"Provides a service for object storage. The trait includes methods for CRUD operations on storage objects.";
ObjectStorageService {
/// Creates a new storage object.
///
/// # Parameters
/// - `url`: url of the object to be created.
///
/// # Returns
/// - `Ok()`
/// - `Err(Error)`: An error occurred during the operation.
fn get_object_url(&self, object_id: ObjectIdentity) -> FutureResult<String, FlowyError>;
/// Creates a new storage object.
///
/// # Parameters
/// - `url`: url of the object to be created.
///
/// # Returns
/// - `Ok()`
/// - `Err(Error)`: An error occurred during the operation.
fn put_object(&self, url: String, object_value: ObjectValue) -> FutureResult<(), FlowyError>;
/// Deletes a storage object by its URL.
///
/// # Parameters
/// - `url`: url of the object to be deleted.
///
/// # Returns
/// - `Ok()`
/// - `Err(Error)`: An error occurred during the operation.
fn delete_object(&self, url: String) -> FutureResult<(), FlowyError>;
/// Fetches a storage object by its URL.
///
/// # Parameters
/// - `url`: url of the object
///
/// # Returns
/// - `Ok(File)`: The returned file object.
/// - `Err(Error)`: An error occurred during the operation.
fn get_object(&self, url: String) -> FutureResult<ObjectValue, FlowyError>;
}
}
pub trait FileStoragePlan: Send + Sync + 'static {
fn storage_size(&self) -> FutureResult<u64, FlowyError>;
fn maximum_file_size(&self) -> FutureResult<u64, FlowyError>;
fn check_upload_object(&self, object: &StorageObject) -> FutureResult<(), FlowyError>;
}
pub struct StorageObject {
pub workspace_id: String,
pub file_name: String,
pub value: ObjectValueSupabase,
}
pub enum ObjectValueSupabase {
File { file_path: String },
Bytes { bytes: Bytes, mime: String },
}
impl ObjectValueSupabase {
pub fn mime_type(&self) -> String {
match self {
ObjectValueSupabase::File { file_path } => mime_guess::from_path(file_path)
.first_or_octet_stream()
.to_string(),
ObjectValueSupabase::Bytes { mime, .. } => mime.clone(),
}
}
}
impl StorageObject {
/// Creates a `StorageObject` from a file.
///
/// # Parameters
///
/// * `name`: The name of the storage object.
/// * `file_path`: The file path to the storage object's data.
///
pub fn from_file<T: ToString>(workspace_id: &str, file_name: &str, file_path: T) -> Self {
Self {
workspace_id: workspace_id.to_string(),
file_name: file_name.to_string(),
value: ObjectValueSupabase::File {
file_path: file_path.to_string(),
},
}
}
/// Creates a `StorageObject` from bytes.
///
/// # Parameters
///
/// * `name`: The name of the storage object.
/// * `bytes`: The byte data of the storage object.
/// * `mime`: The MIME type of the storage object.
///
pub fn from_bytes<B: Into<Bytes>>(
workspace_id: &str,
file_name: &str,
bytes: B,
mime: String,
) -> Self {
let bytes = bytes.into();
Self {
workspace_id: workspace_id.to_string(),
file_name: file_name.to_string(),
value: ObjectValueSupabase::Bytes { bytes, mime },
}
}
/// Gets the file size of the `StorageObject`.
///
/// # Returns
///
/// The file size in bytes.
pub fn file_size(&self) -> u64 {
match &self.value {
ObjectValueSupabase::File { file_path } => std::fs::metadata(file_path).unwrap().len(),
ObjectValueSupabase::Bytes { bytes, .. } => bytes.len() as u64,
}
}
}
mod file_cache;
pub mod manager;
pub mod sqlite_sql;
mod uploader;

View File

@ -0,0 +1,648 @@
use crate::file_cache::FileTempStorage;
use crate::sqlite_sql::{
batch_select_upload_file, delete_upload_file, insert_upload_file, insert_upload_part,
select_upload_file, select_upload_parts, update_upload_file_upload_id, UploadFilePartTable,
UploadFileTable,
};
use crate::uploader::{FileUploader, FileUploaderRunner, Signal, UploadTask, UploadTaskQueue};
use async_trait::async_trait;
use flowy_error::{FlowyError, FlowyResult};
use flowy_sqlite::DBConnection;
use flowy_storage_pub::chunked_byte::{ChunkedBytes, MIN_CHUNK_SIZE};
use flowy_storage_pub::cloud::{ObjectIdentity, ObjectValue, StorageCloudService};
use flowy_storage_pub::storage::{
CompletedPartRequest, CreatedUpload, StorageService, UploadPartResponse, UploadResult,
UploadStatus,
};
use lib_infra::box_any::BoxAny;
use lib_infra::future::FutureResult;
use lib_infra::util::timestamp;
use std::io::ErrorKind;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::sync::watch;
use tracing::{debug, error, info, instrument, trace};
pub trait StorageUserService: Send + Sync + 'static {
fn user_id(&self) -> Result<i64, FlowyError>;
fn workspace_id(&self) -> Result<String, FlowyError>;
fn sqlite_connection(&self, uid: i64) -> Result<DBConnection, FlowyError>;
fn get_application_root_dir(&self) -> &str;
}
pub struct StorageManager {
pub storage_service: Arc<dyn StorageService>,
uploader: Arc<FileUploader>,
broadcast: tokio::sync::broadcast::Sender<UploadResult>,
}
impl Drop for StorageManager {
fn drop(&mut self) {
info!("[File] StorageManager is dropped");
}
}
impl StorageManager {
pub fn new(
cloud_service: Arc<dyn StorageCloudService>,
user_service: Arc<dyn StorageUserService>,
) -> Self {
let temp_storage_path = PathBuf::from(format!(
"{}/cache_files",
user_service.get_application_root_dir()
));
let temp_storage = Arc::new(FileTempStorage::new(temp_storage_path));
let (notifier, notifier_rx) = watch::channel(Signal::Proceed);
let (broadcast, _) = tokio::sync::broadcast::channel::<UploadResult>(100);
let task_queue = Arc::new(UploadTaskQueue::new(notifier));
let storage_service = Arc::new(StorageServiceImpl {
cloud_service,
user_service: user_service.clone(),
temp_storage,
task_queue: task_queue.clone(),
upload_status_notifier: broadcast.clone(),
});
let uploader = Arc::new(FileUploader::new(storage_service.clone(), task_queue));
tokio::spawn(FileUploaderRunner::run(
Arc::downgrade(&uploader),
notifier_rx,
));
let weak_uploader = Arc::downgrade(&uploader);
tokio::spawn(async move {
// Start uploading after 30 seconds
tokio::time::sleep(Duration::from_secs(30)).await;
if let Some(uploader) = weak_uploader.upgrade() {
if let Err(err) = prepare_upload_task(uploader, user_service).await {
error!("prepare upload task failed: {}", err);
}
}
});
Self {
storage_service,
uploader,
broadcast,
}
}
pub fn update_network_reachable(&self, reachable: bool) {
if reachable {
self.uploader.resume();
} else {
self.uploader.pause();
}
}
pub fn subscribe_upload_result(&self) -> tokio::sync::broadcast::Receiver<UploadResult> {
self.broadcast.subscribe()
}
}
async fn prepare_upload_task(
uploader: Arc<FileUploader>,
user_service: Arc<dyn StorageUserService>,
) -> FlowyResult<()> {
let uid = user_service.user_id()?;
let conn = user_service.sqlite_connection(uid)?;
let upload_files = batch_select_upload_file(conn, 100)?;
let tasks = upload_files
.into_iter()
.map(|upload_file| UploadTask::BackgroundTask {
workspace_id: upload_file.workspace_id,
file_id: upload_file.file_id,
parent_dir: upload_file.parent_dir,
created_at: upload_file.created_at,
retry_count: 0,
})
.collect::<Vec<_>>();
info!("prepare upload task: {}", tasks.len());
uploader.queue_tasks(tasks).await;
Ok(())
}
pub struct StorageServiceImpl {
cloud_service: Arc<dyn StorageCloudService>,
user_service: Arc<dyn StorageUserService>,
temp_storage: Arc<FileTempStorage>,
task_queue: Arc<UploadTaskQueue>,
upload_status_notifier: tokio::sync::broadcast::Sender<UploadResult>,
}
#[async_trait]
impl StorageService for StorageServiceImpl {
fn upload_object(
&self,
workspace_id: &str,
local_file_path: &str,
) -> FutureResult<String, FlowyError> {
let cloud_service = self.cloud_service.clone();
let workspace_id = workspace_id.to_string();
let local_file_path = local_file_path.to_string();
FutureResult::new(async move {
let (object_identity, object_value) =
object_from_disk(&workspace_id, &local_file_path).await?;
let url = cloud_service.get_object_url(object_identity).await?;
match cloud_service.put_object(url.clone(), object_value).await {
Ok(_) => {
debug!("[File] success uploaded file to cloud: {}", url);
},
Err(err) => {
error!("[File] upload file failed: {}", err);
return Err(err);
},
}
Ok(url)
})
}
fn delete_object(&self, url: String, local_file_path: String) -> FlowyResult<()> {
let cloud_service = self.cloud_service.clone();
tokio::spawn(async move {
match tokio::fs::remove_file(&local_file_path).await {
Ok(_) => {
debug!("[File] deleted file from local disk: {}", local_file_path)
},
Err(err) => {
error!("[File] delete file at {} failed: {}", local_file_path, err);
},
}
if let Err(e) = cloud_service.delete_object(&url).await {
// TODO: add WAL to log the delete operation.
// keep a list of files to be deleted, and retry later
error!("[File] delete file failed: {}", e);
}
debug!("[File] deleted file from cloud: {}", url);
});
Ok(())
}
fn download_object(&self, url: String, local_file_path: String) -> FlowyResult<()> {
let cloud_service = self.cloud_service.clone();
tokio::spawn(async move {
if tokio::fs::metadata(&local_file_path).await.is_ok() {
tracing::warn!("file already exist in user local disk: {}", local_file_path);
return Ok(());
}
let object_value = cloud_service.get_object(url).await?;
let mut file = tokio::fs::OpenOptions::new()
.create(true)
.truncate(true)
.write(true)
.open(&local_file_path)
.await?;
match file.write(&object_value.raw).await {
Ok(n) => {
info!("downloaded {} bytes to file: {}", n, local_file_path);
},
Err(err) => {
error!("write file failed: {}", err);
},
}
Ok::<_, FlowyError>(())
});
Ok(())
}
fn create_upload(
&self,
workspace_id: &str,
parent_dir: &str,
file_path: &str,
) -> FutureResult<CreatedUpload, FlowyError> {
if workspace_id.is_empty() {
return FutureResult::new(async {
Err(FlowyError::internal().with_context("workspace id is empty"))
});
}
if parent_dir.is_empty() {
return FutureResult::new(async {
Err(FlowyError::internal().with_context("parent dir is empty"))
});
}
if file_path.is_empty() {
return FutureResult::new(async {
Err(FlowyError::internal().with_context("local file path is empty"))
});
}
let workspace_id = workspace_id.to_string();
let parent_dir = parent_dir.to_string();
let file_path = file_path.to_string();
let temp_storage = self.temp_storage.clone();
let task_queue = self.task_queue.clone();
let user_service = self.user_service.clone();
let cloud_service = self.cloud_service.clone();
FutureResult::new(async move {
let local_file_path = temp_storage
.create_temp_file_from_existing(Path::new(&file_path))
.await
.map_err(|err| {
error!("[File] create temp file failed: {}", err);
FlowyError::internal()
.with_context(format!("create temp file for upload file failed: {}", err))
})?;
// 1. create a file record and chunk the file
let (chunks, record) =
create_upload_record(workspace_id, parent_dir, local_file_path).await?;
// 2. save the record to sqlite
let conn = user_service.sqlite_connection(user_service.user_id()?)?;
insert_upload_file(conn, &record)?;
// 3. generate url for given file
let url = cloud_service.get_object_url_v1(
&record.workspace_id,
&record.parent_dir,
&record.file_id,
)?;
let file_id = record.file_id.clone();
task_queue
.queue_task(UploadTask::Task {
chunks,
record,
retry_count: 0,
})
.await;
Ok::<_, FlowyError>(CreatedUpload { url, file_id })
})
}
async fn start_upload(&self, chunks: &ChunkedBytes, record: &BoxAny) -> Result<(), FlowyError> {
let file_record = record.downcast_ref::<UploadFileTable>().ok_or_else(|| {
FlowyError::internal().with_context("failed to downcast record to UploadFileTable")
})?;
if let Err(err) = start_upload(
&self.cloud_service,
&self.user_service,
&self.temp_storage,
chunks,
file_record,
self.upload_status_notifier.clone(),
)
.await
{
error!("[File] start upload failed: {}", err);
}
Ok(())
}
async fn resume_upload(
&self,
workspace_id: &str,
parent_dir: &str,
file_id: &str,
) -> Result<(), FlowyError> {
// Gathering the upload record and parts from the sqlite database.
let record = {
let mut conn = self
.user_service
.sqlite_connection(self.user_service.user_id()?)?;
conn.immediate_transaction(|conn| {
Ok::<_, FlowyError>(
// When resuming an upload, check if the upload_id is empty.
// If the upload_id is empty, the upload has likely not been created yet.
// If the upload_id is not empty, verify which parts have already been uploaded.
select_upload_file(conn, &workspace_id, &parent_dir, &file_id)?.and_then(|record| {
if record.upload_id.is_empty() {
Some((record, vec![]))
} else {
let parts = select_upload_parts(conn, &record.upload_id).unwrap_or_default();
Some((record, parts))
}
}),
)
})?
};
if let Some((upload_file, parts)) = record {
resume_upload(
&self.cloud_service,
&self.user_service,
&self.temp_storage,
upload_file,
parts,
self.upload_status_notifier.clone(),
)
.await?;
} else {
error!("[File] resume upload failed: record not found");
}
Ok(())
}
}
async fn create_upload_record(
workspace_id: String,
parent_dir: String,
local_file_path: String,
) -> FlowyResult<(ChunkedBytes, UploadFileTable)> {
// read file and chunk it base on CHUNK_SIZE. We use MIN_CHUNK_SIZE as the minimum chunk size
let chunked_bytes = ChunkedBytes::from_file(&local_file_path, MIN_CHUNK_SIZE as i32).await?;
let ext = Path::new(&local_file_path)
.extension()
.and_then(std::ffi::OsStr::to_str)
.unwrap_or("")
.to_owned();
let content_type = mime_guess::from_path(&local_file_path)
.first_or_octet_stream()
.to_string();
let file_id = format!("{}.{}", fxhash::hash(&chunked_bytes.data).to_string(), ext);
let record = UploadFileTable {
workspace_id,
file_id,
upload_id: "".to_string(),
parent_dir,
local_file_path,
content_type,
chunk_size: chunked_bytes.chunk_size,
num_chunk: chunked_bytes.offsets.len() as i32,
created_at: timestamp(),
};
Ok((chunked_bytes, record))
}
#[instrument(level = "debug", skip_all, err)]
async fn start_upload(
cloud_service: &Arc<dyn StorageCloudService>,
user_service: &Arc<dyn StorageUserService>,
temp_storage: &Arc<FileTempStorage>,
chunked_bytes: &ChunkedBytes,
upload_file: &UploadFileTable,
notifier: tokio::sync::broadcast::Sender<UploadResult>,
) -> FlowyResult<()> {
let mut upload_file = upload_file.clone();
if upload_file.upload_id.is_empty() {
// 1. create upload
trace!(
"[File] create upload for workspace: {}, parent_dir: {}, file_id: {}",
upload_file.workspace_id,
upload_file.parent_dir,
upload_file.file_id
);
let create_upload_resp = cloud_service
.create_upload(
&upload_file.workspace_id,
&upload_file.parent_dir,
&upload_file.file_id,
&upload_file.content_type,
)
.await?;
// 2. update upload_id
let conn = user_service.sqlite_connection(user_service.user_id()?)?;
update_upload_file_upload_id(
conn,
&upload_file.workspace_id,
&upload_file.parent_dir,
&upload_file.file_id,
&create_upload_resp.upload_id,
)?;
trace!(
"[File] {} update upload_id: {}",
upload_file.file_id,
create_upload_resp.upload_id
);
// temporary store the upload_id
upload_file.upload_id = create_upload_resp.upload_id;
}
let _ = notifier.send(UploadResult {
file_id: upload_file.file_id.clone(),
status: UploadStatus::InProgress,
});
// 3. start uploading parts
trace!(
"[File] {} start uploading parts: {}",
upload_file.file_id,
chunked_bytes.iter().count()
);
let mut iter = chunked_bytes.iter().enumerate();
let mut completed_parts = Vec::new();
while let Some((index, chunk_bytes)) = iter.next() {
let part_number = index as i32 + 1;
trace!(
"[File] {} uploading part: {}, len:{}KB",
upload_file.file_id,
part_number,
chunk_bytes.len() / 1000,
);
// start uploading parts
match upload_part(
&cloud_service,
&user_service,
&upload_file.workspace_id,
&upload_file.parent_dir,
&upload_file.upload_id,
&upload_file.file_id,
part_number,
chunk_bytes.to_vec(),
)
.await
{
Ok(resp) => {
trace!(
"[File] {} upload {} part success, total:{},",
upload_file.file_id,
part_number,
chunked_bytes.offsets.len()
);
// gather completed part
completed_parts.push(CompletedPartRequest {
e_tag: resp.e_tag,
part_number: resp.part_num,
});
},
Err(err) => {
error!("[File] {} upload part failed: {}", upload_file.file_id, err);
return Err(err);
},
}
}
// mark it as completed
complete_upload(
&cloud_service,
&user_service,
temp_storage,
&upload_file,
completed_parts,
notifier,
)
.await?;
trace!("[File] {} upload completed", upload_file.file_id);
Ok(())
}
#[instrument(level = "debug", skip_all, err)]
async fn resume_upload(
cloud_service: &Arc<dyn StorageCloudService>,
user_service: &Arc<dyn StorageUserService>,
temp_storage: &Arc<FileTempStorage>,
upload_file: UploadFileTable,
parts: Vec<UploadFilePartTable>,
notifier: tokio::sync::broadcast::Sender<UploadResult>,
) -> FlowyResult<()> {
trace!(
"[File] resume upload for workspace: {}, parent_dir: {}, file_id: {}, local_file_path:{}",
upload_file.workspace_id,
upload_file.parent_dir,
upload_file.file_id,
upload_file.local_file_path
);
match ChunkedBytes::from_file(&upload_file.local_file_path, MIN_CHUNK_SIZE as i32).await {
Ok(mut chunked_bytes) => {
// When there were any parts already uploaded, skip those parts by setting the current offset.
chunked_bytes.set_current_offset(parts.len() as i32);
start_upload(
cloud_service,
user_service,
temp_storage,
&chunked_bytes,
&upload_file,
notifier,
)
.await?;
},
Err(err) => {
//
match err.kind() {
ErrorKind::NotFound => {
error!("[File] file not found: {}", upload_file.local_file_path);
if let Ok(uid) = user_service.user_id() {
if let Ok(conn) = user_service.sqlite_connection(uid) {
delete_upload_file(conn, &upload_file.upload_id)?;
}
}
},
_ => {
error!("[File] read file failed: {}", err);
},
}
},
}
Ok(())
}
#[instrument(level = "debug", skip_all)]
async fn upload_part(
cloud_service: &Arc<dyn StorageCloudService>,
user_service: &Arc<dyn StorageUserService>,
workspace_id: &str,
parent_dir: &str,
upload_id: &str,
file_id: &str,
part_number: i32,
body: Vec<u8>,
) -> Result<UploadPartResponse, FlowyError> {
let resp = cloud_service
.upload_part(
&workspace_id,
&parent_dir,
&upload_id,
&file_id,
part_number,
body,
)
.await?;
// save uploaded part to sqlite
let conn = user_service.sqlite_connection(user_service.user_id()?)?;
insert_upload_part(
conn,
&UploadFilePartTable {
upload_id: upload_id.to_string(),
e_tag: resp.e_tag.clone(),
part_num: resp.part_num,
},
)?;
Ok(resp)
}
async fn complete_upload(
cloud_service: &Arc<dyn StorageCloudService>,
user_service: &Arc<dyn StorageUserService>,
temp_storage: &Arc<FileTempStorage>,
upload_file: &UploadFileTable,
parts: Vec<CompletedPartRequest>,
notifier: tokio::sync::broadcast::Sender<UploadResult>,
) -> Result<(), FlowyError> {
match cloud_service
.complete_upload(
&upload_file.workspace_id,
&upload_file.parent_dir,
&upload_file.upload_id,
&upload_file.file_id,
parts,
)
.await
{
Ok(_) => {
info!("[File] completed upload file: {}", upload_file.upload_id);
trace!("[File] delete upload record from sqlite");
let _ = notifier.send(UploadResult {
file_id: upload_file.file_id.clone(),
status: UploadStatus::Finish,
});
let conn = user_service.sqlite_connection(user_service.user_id()?)?;
delete_upload_file(conn, &upload_file.upload_id)?;
if let Err(err) = temp_storage
.delete_temp_file(&upload_file.local_file_path)
.await
{
error!("[File] delete temp file failed: {}", err);
}
},
Err(err) => {
error!("[File] complete upload failed: {}", err);
},
}
Ok(())
}
pub async fn object_from_disk(
workspace_id: &str,
local_file_path: &str,
) -> Result<(ObjectIdentity, ObjectValue), FlowyError> {
let ext = Path::new(local_file_path)
.extension()
.and_then(std::ffi::OsStr::to_str)
.unwrap_or("")
.to_owned();
let mut file = tokio::fs::File::open(local_file_path).await?;
let mut content = Vec::new();
let n = file.read_to_end(&mut content).await?;
info!("read {} bytes from file: {}", n, local_file_path);
let mime = mime_guess::from_path(local_file_path).first_or_octet_stream();
let hash = fxhash::hash(&content);
Ok((
ObjectIdentity {
workspace_id: workspace_id.to_owned(),
file_id: hash.to_string(),
ext,
},
ObjectValue {
raw: content.into(),
mime,
},
))
}

View File

@ -1,34 +0,0 @@
use crate::{ObjectIdentity, ObjectValue};
use flowy_error::FlowyError;
use std::path::Path;
use tokio::io::AsyncReadExt;
use tracing::info;
pub async fn object_from_disk(
workspace_id: &str,
local_file_path: &str,
) -> Result<(ObjectIdentity, ObjectValue), FlowyError> {
let ext = Path::new(local_file_path)
.extension()
.and_then(std::ffi::OsStr::to_str)
.unwrap_or("")
.to_owned();
let mut file = tokio::fs::File::open(local_file_path).await?;
let mut content = Vec::new();
let n = file.read_to_end(&mut content).await?;
info!("read {} bytes from file: {}", n, local_file_path);
let mime = mime_guess::from_path(local_file_path).first_or_octet_stream();
let hash = fxhash::hash(&content);
Ok((
ObjectIdentity {
workspace_id: workspace_id.to_owned(),
file_id: hash.to_string(),
ext,
},
ObjectValue {
raw: content.into(),
mime,
},
))
}

View File

@ -0,0 +1,161 @@
use flowy_error::{FlowyError, FlowyResult};
use flowy_sqlite::schema::{upload_file_part, upload_file_table};
use flowy_sqlite::{
diesel, AsChangeset, BoolExpressionMethods, DBConnection, ExpressionMethods, Identifiable,
Insertable, OptionalExtension, QueryDsl, Queryable, RunQueryDsl, SqliteConnection,
};
use tracing::warn;
#[derive(Queryable, Insertable, AsChangeset, Identifiable, Debug, Clone)]
#[diesel(table_name = upload_file_table)]
#[diesel(primary_key(workspace_id, parent_dir, file_id))]
pub struct UploadFileTable {
pub workspace_id: String,
pub file_id: String,
pub parent_dir: String,
pub local_file_path: String,
pub content_type: String,
pub chunk_size: i32,
pub num_chunk: i32,
pub upload_id: String,
pub created_at: i64,
}
#[derive(Queryable, Insertable, AsChangeset, Identifiable, Debug)]
#[diesel(table_name = upload_file_part)]
#[diesel(primary_key(upload_id, part_num))]
pub struct UploadFilePartTable {
pub upload_id: String,
pub e_tag: String,
pub part_num: i32,
}
pub fn is_upload_file_exist(
conn: &mut SqliteConnection,
workspace_id: &str,
parent_dir: &str,
file_id: &str,
) -> FlowyResult<bool> {
let result = upload_file_table::dsl::upload_file_table
.filter(
upload_file_table::workspace_id
.eq(workspace_id)
.and(upload_file_table::parent_dir.eq(parent_dir))
.and(upload_file_table::file_id.eq(file_id)),
)
.first::<UploadFileTable>(conn)
.optional()?;
Ok(result.is_some())
}
pub fn insert_upload_file(
mut conn: DBConnection,
upload_file: &UploadFileTable,
) -> FlowyResult<()> {
diesel::insert_into(upload_file_table::table)
.values(upload_file)
.execute(&mut *conn)?;
Ok(())
}
pub fn update_upload_file_upload_id(
mut conn: DBConnection,
workspace_id: &str,
parent_dir: &str,
file_id: &str,
upload_id: &str,
) -> FlowyResult<()> {
diesel::update(
upload_file_table::dsl::upload_file_table.filter(
upload_file_table::workspace_id
.eq(workspace_id)
.and(upload_file_table::parent_dir.eq(parent_dir))
.and(upload_file_table::file_id.eq(file_id)),
),
)
.set(upload_file_table::upload_id.eq(upload_id))
.execute(&mut *conn)?;
Ok(())
}
pub fn insert_upload_part(
mut conn: DBConnection,
upload_part: &UploadFilePartTable,
) -> FlowyResult<()> {
diesel::insert_into(upload_file_part::table)
.values(upload_part)
.execute(&mut *conn)?;
Ok(())
}
pub fn select_latest_upload_part(
mut conn: DBConnection,
upload_id: &str,
) -> FlowyResult<Option<UploadFilePartTable>> {
let result = upload_file_part::dsl::upload_file_part
.filter(upload_file_part::upload_id.eq(upload_id))
.order(upload_file_part::part_num.desc())
.first::<UploadFilePartTable>(&mut *conn)
.optional()?;
Ok(result)
}
pub fn select_upload_parts(
conn: &mut SqliteConnection,
upload_id: &str,
) -> FlowyResult<Vec<UploadFilePartTable>> {
let results = upload_file_part::dsl::upload_file_part
.filter(upload_file_part::upload_id.eq(upload_id))
.load::<UploadFilePartTable>(conn)?;
Ok(results)
}
pub fn batch_select_upload_file(
mut conn: DBConnection,
limit: i32,
) -> FlowyResult<Vec<UploadFileTable>> {
let results = upload_file_table::dsl::upload_file_table
.order(upload_file_table::created_at.desc())
.limit(limit.into())
.load::<UploadFileTable>(&mut conn)?;
Ok(results)
}
pub fn select_upload_file(
conn: &mut SqliteConnection,
workspace_id: &str,
parent_dir: &str,
file_id: &str,
) -> FlowyResult<Option<UploadFileTable>> {
let result = upload_file_table::dsl::upload_file_table
.filter(
upload_file_table::workspace_id
.eq(workspace_id)
.and(upload_file_table::parent_dir.eq(parent_dir))
.and(upload_file_table::file_id.eq(file_id)),
)
.first::<UploadFileTable>(conn)
.optional()?;
Ok(result)
}
pub fn delete_upload_file(mut conn: DBConnection, upload_id: &str) -> FlowyResult<()> {
conn.immediate_transaction(|conn| {
diesel::delete(
upload_file_table::dsl::upload_file_table.filter(upload_file_table::upload_id.eq(upload_id)),
)
.execute(&mut *conn)?;
if let Err(err) = diesel::delete(
upload_file_part::dsl::upload_file_part.filter(upload_file_part::upload_id.eq(upload_id)),
)
.execute(&mut *conn)
{
warn!("Failed to delete upload parts: {:?}", err)
}
Ok::<_, FlowyError>(())
})?;
Ok(())
}

View File

@ -0,0 +1,294 @@
use crate::sqlite_sql::UploadFileTable;
use crate::uploader::UploadTask::BackgroundTask;
use flowy_storage_pub::chunked_byte::ChunkedBytes;
use flowy_storage_pub::storage::StorageService;
use lib_infra::box_any::BoxAny;
use std::cmp::Ordering;
use std::collections::BinaryHeap;
use std::fmt::Display;
use std::sync::atomic::{AtomicBool, AtomicU8};
use std::sync::{Arc, Weak};
use std::time::Duration;
use tokio::sync::{watch, RwLock};
use tracing::{info, trace};
#[derive(Clone)]
pub enum Signal {
Stop,
Proceed,
ProceedAfterSecs(u64),
}
pub struct UploadTaskQueue {
tasks: RwLock<BinaryHeap<UploadTask>>,
notifier: watch::Sender<Signal>,
}
impl UploadTaskQueue {
pub fn new(notifier: watch::Sender<Signal>) -> Self {
Self {
tasks: Default::default(),
notifier,
}
}
pub async fn queue_task(&self, task: UploadTask) {
trace!("[File] Queued task: {}", task);
self.tasks.write().await.push(task);
let _ = self.notifier.send(Signal::Proceed);
}
}
pub struct FileUploader {
storage_service: Arc<dyn StorageService>,
queue: Arc<UploadTaskQueue>,
max_uploads: u8,
current_uploads: AtomicU8,
pause_sync: AtomicBool,
}
impl Drop for FileUploader {
fn drop(&mut self) {
let _ = self.queue.notifier.send(Signal::Stop);
}
}
impl FileUploader {
pub fn new(storage_service: Arc<dyn StorageService>, queue: Arc<UploadTaskQueue>) -> Self {
Self {
storage_service,
queue,
max_uploads: 3,
current_uploads: Default::default(),
pause_sync: Default::default(),
}
}
pub async fn queue_tasks(&self, tasks: Vec<UploadTask>) {
let mut queue_lock = self.queue.tasks.write().await;
for task in tasks {
queue_lock.push(task);
}
let _ = self.queue.notifier.send(Signal::Proceed);
}
pub fn pause(&self) {
self
.pause_sync
.store(true, std::sync::atomic::Ordering::SeqCst);
}
pub fn resume(&self) {
self
.pause_sync
.store(false, std::sync::atomic::Ordering::SeqCst);
let _ = self.queue.notifier.send(Signal::ProceedAfterSecs(3));
}
pub async fn process_next(&self) -> Option<()> {
// Do not proceed if the uploader is paused.
if self.pause_sync.load(std::sync::atomic::Ordering::Relaxed) {
return None;
}
trace!(
"[File] Max concurrent uploads: {}, current: {}",
self.max_uploads,
self
.current_uploads
.load(std::sync::atomic::Ordering::SeqCst)
);
if self
.current_uploads
.load(std::sync::atomic::Ordering::SeqCst)
>= self.max_uploads
{
// If the current uploads count is greater than or equal to the max uploads, do not proceed.
let _ = self.queue.notifier.send(Signal::ProceedAfterSecs(10));
return None;
}
let task = self.queue.tasks.write().await.pop()?;
if task.retry_count() > 5 {
// If the task has been retried more than 5 times, we should not retry it anymore.
let _ = self.queue.notifier.send(Signal::ProceedAfterSecs(2));
return None;
}
// increment the current uploads count
self
.current_uploads
.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
match task {
UploadTask::Task {
chunks,
record,
mut retry_count,
} => {
let record = BoxAny::new(record);
if let Err(err) = self.storage_service.start_upload(&chunks, &record).await {
info!(
"Failed to upload file: {}, retry_count:{}",
err, retry_count
);
let record = record.unbox_or_error().unwrap();
retry_count += 1;
self.queue.tasks.write().await.push(UploadTask::Task {
chunks,
record,
retry_count,
});
}
},
UploadTask::BackgroundTask {
workspace_id,
parent_dir,
file_id,
created_at,
mut retry_count,
} => {
if let Err(err) = self
.storage_service
.resume_upload(&workspace_id, &parent_dir, &file_id)
.await
{
info!(
"Failed to resume upload file: {}, retry_count:{}",
err, retry_count
);
retry_count += 1;
self.queue.tasks.write().await.push(BackgroundTask {
workspace_id,
parent_dir,
file_id,
created_at,
retry_count,
});
}
},
}
self
.current_uploads
.fetch_sub(1, std::sync::atomic::Ordering::SeqCst);
let _ = self.queue.notifier.send(Signal::ProceedAfterSecs(2));
None
}
}
pub struct FileUploaderRunner;
impl FileUploaderRunner {
pub async fn run(weak_uploader: Weak<FileUploader>, mut notifier: watch::Receiver<Signal>) {
loop {
// stops the runner if the notifier was closed.
if notifier.changed().await.is_err() {
break;
}
if let Some(uploader) = weak_uploader.upgrade() {
let value = notifier.borrow().clone();
match value {
Signal::Stop => break,
Signal::Proceed => {
tokio::spawn(async move {
uploader.process_next().await;
});
},
Signal::ProceedAfterSecs(secs) => {
tokio::time::sleep(Duration::from_secs(secs)).await;
tokio::spawn(async move {
uploader.process_next().await;
});
},
}
} else {
break;
}
}
}
}
pub enum UploadTask {
Task {
chunks: ChunkedBytes,
record: UploadFileTable,
retry_count: u8,
},
BackgroundTask {
workspace_id: String,
file_id: String,
parent_dir: String,
created_at: i64,
retry_count: u8,
},
}
impl UploadTask {
pub fn retry_count(&self) -> u8 {
match self {
Self::Task { retry_count, .. } => *retry_count,
Self::BackgroundTask { retry_count, .. } => *retry_count,
}
}
}
impl Display for UploadTask {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Task { record, .. } => write!(f, "Task: {}", record.file_id),
Self::BackgroundTask { file_id, .. } => write!(f, "BackgroundTask: {}", file_id),
}
}
}
impl Eq for UploadTask {}
impl PartialEq for UploadTask {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(Self::Task { record: lhs, .. }, Self::Task { record: rhs, .. }) => {
lhs.local_file_path == rhs.local_file_path
},
(
Self::BackgroundTask {
workspace_id: l_workspace_id,
file_id: l_file_id,
..
},
Self::BackgroundTask {
workspace_id: r_workspace_id,
file_id: r_file_id,
..
},
) => l_workspace_id == r_workspace_id && l_file_id == r_file_id,
_ => false,
}
}
}
impl PartialOrd for UploadTask {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for UploadTask {
fn cmp(&self, other: &Self) -> Ordering {
match (self, other) {
(Self::Task { record: lhs, .. }, Self::Task { record: rhs, .. }) => {
lhs.created_at.cmp(&rhs.created_at)
},
(_, Self::Task { .. }) => Ordering::Less,
(Self::Task { .. }, _) => Ordering::Greater,
(
Self::BackgroundTask {
created_at: lhs, ..
},
Self::BackgroundTask {
created_at: rhs, ..
},
) => lhs.cmp(rhs),
}
}
}

View File

@ -1,12 +0,0 @@
use crate::{ObjectIdentity, ObjectValue};
use flowy_error::FlowyError;
pub async fn object_from_disk(
_workspace_id: &str,
_local_file_path: &str,
) -> Result<(ObjectIdentity, ObjectValue), FlowyError> {
Err(
FlowyError::not_support()
.with_context(format!("object_from_disk is not implemented for wasm32")),
)
}

View File

@ -0,0 +1,181 @@
use flowy_sqlite::Database;
use flowy_storage::sqlite_sql::{
batch_select_upload_file, delete_upload_file, insert_upload_file, insert_upload_part,
select_latest_upload_part, select_upload_parts, UploadFilePartTable, UploadFileTable,
};
use flowy_storage_pub::chunked_byte::{ChunkedBytes, MIN_CHUNK_SIZE};
use rand::distributions::Alphanumeric;
use rand::{thread_rng, Rng};
use std::env::temp_dir;
use std::fs::File;
use std::io::Write;
use std::path::PathBuf;
use std::time::Duration;
pub fn test_database() -> (Database, PathBuf) {
let db_path = temp_dir().join(&format!("test-{}.db", generate_random_string(8)));
(flowy_sqlite::init(&db_path).unwrap(), db_path)
}
#[tokio::test]
async fn test_insert_new_upload() {
let (db, _) = test_database();
let workspace_id = uuid::Uuid::new_v4().to_string();
// test insert one upload file record
let mut upload_ids = vec![];
for _i in 0..5 {
let upload_id = uuid::Uuid::new_v4().to_string();
let local_file_path = create_temp_file_with_random_content(8 * 1024 * 1024).unwrap();
let upload_file =
create_upload_file_record(workspace_id.clone(), upload_id.clone(), local_file_path).await;
upload_ids.push(upload_file.upload_id.clone());
// insert
let conn = db.get_connection().unwrap();
insert_upload_file(conn, &upload_file).unwrap();
tokio::time::sleep(Duration::from_secs(1)).await;
}
upload_ids.reverse();
// select
let conn = db.get_connection().unwrap();
let records = batch_select_upload_file(conn, 100).unwrap();
assert_eq!(records.len(), 5);
// compare the upload id order is the same as upload_ids
for i in 0..5 {
assert_eq!(records[i].upload_id, upload_ids[i]);
// delete
let conn = db.get_connection().unwrap();
delete_upload_file(conn, &records[i].upload_id).unwrap();
}
let conn = db.get_connection().unwrap();
let records = batch_select_upload_file(conn, 100).unwrap();
assert!(records.is_empty());
}
#[tokio::test]
async fn test_upload_part_test() {
let (db, _) = test_database();
let workspace_id = uuid::Uuid::new_v4().to_string();
// test insert one upload file record
let upload_id = uuid::Uuid::new_v4().to_string();
let local_file_path = create_temp_file_with_random_content(20 * 1024 * 1024).unwrap();
let upload_file =
create_upload_file_record(workspace_id.clone(), upload_id.clone(), local_file_path).await;
// insert
let conn = db.get_connection().unwrap();
insert_upload_file(conn, &upload_file).unwrap();
tokio::time::sleep(Duration::from_secs(1)).await;
// insert uploaded part 1
let part = UploadFilePartTable {
upload_id: upload_id.clone(),
e_tag: "1".to_string(),
part_num: 1,
};
let conn = db.get_connection().unwrap();
insert_upload_part(conn, &part).unwrap();
// insert uploaded part 2
let part = UploadFilePartTable {
upload_id: upload_id.clone(),
e_tag: "2".to_string(),
part_num: 2,
};
let conn = db.get_connection().unwrap();
insert_upload_part(conn, &part).unwrap();
// get latest part
let conn = db.get_connection().unwrap();
let part = select_latest_upload_part(conn, &upload_id)
.unwrap()
.unwrap();
assert_eq!(part.part_num, 2);
// get all existing parts
let mut conn = db.get_connection().unwrap();
let parts = select_upload_parts(&mut *conn, &upload_id).unwrap();
assert_eq!(parts.len(), 2);
assert_eq!(parts[0].part_num, 1);
assert_eq!(parts[1].part_num, 2);
// delete upload file and then all existing parts will be deleted
let conn = db.get_connection().unwrap();
delete_upload_file(conn, &upload_id).unwrap();
let mut conn = db.get_connection().unwrap();
let parts = select_upload_parts(&mut *conn, &upload_id).unwrap();
assert!(parts.is_empty())
}
pub fn generate_random_string(len: usize) -> String {
let rng = thread_rng();
rng
.sample_iter(&Alphanumeric)
.take(len)
.map(char::from)
.collect()
}
fn create_temp_file_with_random_content(
size_in_bytes: usize,
) -> Result<String, Box<dyn std::error::Error>> {
// Generate a random string of the specified size
let content: String = rand::thread_rng()
.sample_iter(&Alphanumeric)
.take(size_in_bytes)
.map(char::from)
.collect();
// Create a temporary file path
let file_path = std::env::temp_dir().join("test.txt");
// Write the content to the temporary file
let mut file = File::create(&file_path)?;
file.write_all(content.as_bytes())?;
// Return the file path
Ok(file_path.to_str().unwrap().to_string())
}
pub async fn create_upload_file_record(
workspace_id: String,
upload_id: String,
local_file_path: String,
) -> UploadFileTable {
// Create ChunkedBytes from file
let chunked_bytes = ChunkedBytes::from_file(&local_file_path, MIN_CHUNK_SIZE as i32)
.await
.unwrap();
// Determine content type
let content_type = mime_guess::from_path(&local_file_path)
.first_or_octet_stream()
.to_string();
// Calculate file ID
let file_id = fxhash::hash(&chunked_bytes.data).to_string();
// Create UploadFileTable record
let upload_file = UploadFileTable {
workspace_id,
file_id,
upload_id,
parent_dir: "test".to_string(),
local_file_path,
content_type,
chunk_size: MIN_CHUNK_SIZE as i32,
num_chunk: chunked_bytes.offsets.len() as i32,
created_at: chrono::Utc::now().timestamp(),
};
upload_file
}

View File

@ -88,6 +88,10 @@ impl AuthenticateUser {
PathBuf::from(self.user_paths.user_data_dir(uid)).join("indexes")
}
pub fn get_application_root_dir(&self) -> &str {
self.user_paths.root()
}
pub fn close_db(&self) -> FlowyResult<()> {
let session = self.get_session()?;
info!("Close db for user: {}", session.user_id);

View File

@ -63,6 +63,11 @@ impl UserPaths {
pub(crate) fn user_data_dir(&self, uid: i64) -> String {
format!("{}/{}", self.root, uid)
}
/// The root directory of the application
pub(crate) fn root(&self) -> &str {
&self.root
}
}
impl UserDBPath for UserPaths {