test: Import folder test (#4321)

* fix: import old version appflowy data

* chore: add 037 test

* chore: add default appflowy cloud url

* chore: bump collab
This commit is contained in:
Nathan.fooo 2024-01-07 11:12:05 +08:00 committed by GitHub
parent 2557e4f3cc
commit 76416cfdba
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
33 changed files with 643 additions and 251 deletions

View File

@ -137,7 +137,7 @@ AuthenticatorType currentCloudType() {
Future<void> setAppFlowyCloudUrl(Option<String> url) async {
await url.fold(
() => getIt<KeyValueStorage>().remove(KVKeys.kAppflowyCloudBaseURL),
() => getIt<KeyValueStorage>().set(KVKeys.kAppflowyCloudBaseURL, ""),
(s) => getIt<KeyValueStorage>().set(KVKeys.kAppflowyCloudBaseURL, s),
);
}
@ -233,7 +233,7 @@ Future<String> getAppFlowyCloudUrl() async {
final result =
await getIt<KeyValueStorage>().get(KVKeys.kAppflowyCloudBaseURL);
return result.fold(
() => "",
() => "https://beta.appflowy.cloud",
(url) => url,
);
}

View File

@ -30,13 +30,13 @@ class AppFlowyCloudURLsBloc
await setAppFlowyCloudUrl(none());
} else {
validateUrl(state.updatedServerUrl).fold(
(error) => emit(state.copyWith(urlError: Some(error))),
(_) async {
(url) async {
if (state.config.base_url != state.updatedServerUrl) {
await setAppFlowyCloudUrl(Some(state.updatedServerUrl));
}
add(const AppFlowyCloudURLsEvent.didSaveConfig());
},
(err) => emit(state.copyWith(urlError: Some(err))),
);
}
},
@ -80,16 +80,16 @@ class AppFlowyCloudURLsState with _$AppFlowyCloudURLsState {
);
}
Either<String, ()> validateUrl(String url) {
Either<String, String> validateUrl(String url) {
try {
// Use Uri.parse to validate the url.
final uri = Uri.parse(url);
if (uri.isScheme('HTTP') || uri.isScheme('HTTPS')) {
return right(());
return left(uri.toString());
} else {
return left(LocaleKeys.settings_menu_invalidCloudURLScheme.tr());
return right(LocaleKeys.settings_menu_invalidCloudURLScheme.tr());
}
} catch (e) {
return left(e.toString());
return right(e.toString());
}
}

View File

@ -21,6 +21,16 @@ class SettingAppFlowyCloudView extends StatelessWidget {
final VoidCallback didResetServerUrl;
const SettingAppFlowyCloudView({required this.didResetServerUrl, super.key});
@override
Widget build(BuildContext context) {
return CustomAppFlowyCloudView(didResetServerUrl: didResetServerUrl);
}
}
class CustomAppFlowyCloudView extends StatelessWidget {
final VoidCallback didResetServerUrl;
const CustomAppFlowyCloudView({required this.didResetServerUrl, super.key});
@override
Widget build(BuildContext context) {
return FutureBuilder<Either<CloudSettingPB, FlowyError>>(

View File

@ -780,7 +780,7 @@ dependencies = [
[[package]]
name = "collab"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=b566a65b3eb3d0abff7c09371cad309341865cde#b566a65b3eb3d0abff7c09371cad309341865cde"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=5eea65abae724f9260a42f96c143308104e8c63#5eea65abae724f9260a42f96c143308104e8c63c"
dependencies = [
"anyhow",
"async-trait",
@ -800,7 +800,7 @@ dependencies = [
[[package]]
name = "collab-database"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=b566a65b3eb3d0abff7c09371cad309341865cde#b566a65b3eb3d0abff7c09371cad309341865cde"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=5eea65abae724f9260a42f96c143308104e8c63#5eea65abae724f9260a42f96c143308104e8c63c"
dependencies = [
"anyhow",
"async-trait",
@ -827,10 +827,11 @@ dependencies = [
[[package]]
name = "collab-document"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=b566a65b3eb3d0abff7c09371cad309341865cde#b566a65b3eb3d0abff7c09371cad309341865cde"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=5eea65abae724f9260a42f96c143308104e8c63#5eea65abae724f9260a42f96c143308104e8c63c"
dependencies = [
"anyhow",
"collab",
"collab-entity",
"nanoid",
"parking_lot",
"serde",
@ -844,7 +845,7 @@ dependencies = [
[[package]]
name = "collab-entity"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=b566a65b3eb3d0abff7c09371cad309341865cde#b566a65b3eb3d0abff7c09371cad309341865cde"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=5eea65abae724f9260a42f96c143308104e8c63#5eea65abae724f9260a42f96c143308104e8c63c"
dependencies = [
"anyhow",
"bytes",
@ -858,11 +859,12 @@ dependencies = [
[[package]]
name = "collab-folder"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=b566a65b3eb3d0abff7c09371cad309341865cde#b566a65b3eb3d0abff7c09371cad309341865cde"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=5eea65abae724f9260a42f96c143308104e8c63#5eea65abae724f9260a42f96c143308104e8c63c"
dependencies = [
"anyhow",
"chrono",
"collab",
"collab-entity",
"parking_lot",
"serde",
"serde_json",
@ -893,7 +895,7 @@ dependencies = [
[[package]]
name = "collab-plugins"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=b566a65b3eb3d0abff7c09371cad309341865cde#b566a65b3eb3d0abff7c09371cad309341865cde"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=5eea65abae724f9260a42f96c143308104e8c63#5eea65abae724f9260a42f96c143308104e8c63c"
dependencies = [
"anyhow",
"async-trait",
@ -923,7 +925,7 @@ dependencies = [
[[package]]
name = "collab-user"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=b566a65b3eb3d0abff7c09371cad309341865cde#b566a65b3eb3d0abff7c09371cad309341865cde"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=5eea65abae724f9260a42f96c143308104e8c63#5eea65abae724f9260a42f96c143308104e8c63c"
dependencies = [
"anyhow",
"collab",

View File

@ -67,13 +67,13 @@ client-api = { git = "https://github.com/AppFlowy-IO/AppFlowy-Cloud", rev = "215
# To switch to the local path, run:
# scripts/tool/update_collab_source.sh
# ⚠️⚠️⚠️️
collab = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "b566a65b3eb3d0abff7c09371cad309341865cde" }
collab-folder = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "b566a65b3eb3d0abff7c09371cad309341865cde" }
collab-document = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "b566a65b3eb3d0abff7c09371cad309341865cde" }
collab-database = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "b566a65b3eb3d0abff7c09371cad309341865cde" }
collab-plugins = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "b566a65b3eb3d0abff7c09371cad309341865cde" }
collab-user = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "b566a65b3eb3d0abff7c09371cad309341865cde" }
collab-entity = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "b566a65b3eb3d0abff7c09371cad309341865cde" }
collab = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "5eea65abae724f9260a42f96c143308104e8c63" }
collab-folder = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "5eea65abae724f9260a42f96c143308104e8c63" }
collab-document = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "5eea65abae724f9260a42f96c143308104e8c63" }
collab-database = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "5eea65abae724f9260a42f96c143308104e8c63" }
collab-plugins = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "5eea65abae724f9260a42f96c143308104e8c63" }
collab-user = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "5eea65abae724f9260a42f96c143308104e8c63" }
collab-entity = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "5eea65abae724f9260a42f96c143308104e8c63" }

View File

@ -686,7 +686,7 @@ dependencies = [
[[package]]
name = "collab"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=b566a65b3eb3d0abff7c09371cad309341865cde#b566a65b3eb3d0abff7c09371cad309341865cde"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=5eea65abae724f9260a42f96c143308104e8c63#5eea65abae724f9260a42f96c143308104e8c63c"
dependencies = [
"anyhow",
"async-trait",
@ -706,7 +706,7 @@ dependencies = [
[[package]]
name = "collab-database"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=b566a65b3eb3d0abff7c09371cad309341865cde#b566a65b3eb3d0abff7c09371cad309341865cde"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=5eea65abae724f9260a42f96c143308104e8c63#5eea65abae724f9260a42f96c143308104e8c63c"
dependencies = [
"anyhow",
"async-trait",
@ -733,10 +733,11 @@ dependencies = [
[[package]]
name = "collab-document"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=b566a65b3eb3d0abff7c09371cad309341865cde#b566a65b3eb3d0abff7c09371cad309341865cde"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=5eea65abae724f9260a42f96c143308104e8c63#5eea65abae724f9260a42f96c143308104e8c63c"
dependencies = [
"anyhow",
"collab",
"collab-entity",
"nanoid",
"parking_lot",
"serde",
@ -750,7 +751,7 @@ dependencies = [
[[package]]
name = "collab-entity"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=b566a65b3eb3d0abff7c09371cad309341865cde#b566a65b3eb3d0abff7c09371cad309341865cde"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=5eea65abae724f9260a42f96c143308104e8c63#5eea65abae724f9260a42f96c143308104e8c63c"
dependencies = [
"anyhow",
"bytes",
@ -764,11 +765,12 @@ dependencies = [
[[package]]
name = "collab-folder"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=b566a65b3eb3d0abff7c09371cad309341865cde#b566a65b3eb3d0abff7c09371cad309341865cde"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=5eea65abae724f9260a42f96c143308104e8c63#5eea65abae724f9260a42f96c143308104e8c63c"
dependencies = [
"anyhow",
"chrono",
"collab",
"collab-entity",
"parking_lot",
"serde",
"serde_json",
@ -799,7 +801,7 @@ dependencies = [
[[package]]
name = "collab-plugins"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=b566a65b3eb3d0abff7c09371cad309341865cde#b566a65b3eb3d0abff7c09371cad309341865cde"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=5eea65abae724f9260a42f96c143308104e8c63#5eea65abae724f9260a42f96c143308104e8c63c"
dependencies = [
"anyhow",
"async-trait",
@ -829,7 +831,7 @@ dependencies = [
[[package]]
name = "collab-user"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=b566a65b3eb3d0abff7c09371cad309341865cde#b566a65b3eb3d0abff7c09371cad309341865cde"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=5eea65abae724f9260a42f96c143308104e8c63#5eea65abae724f9260a42f96c143308104e8c63c"
dependencies = [
"anyhow",
"collab",
@ -1426,6 +1428,7 @@ dependencies = [
"nanoid",
"parking_lot",
"protobuf",
"rand 0.8.5",
"serde",
"serde_json",
"tempdir",

View File

@ -117,10 +117,10 @@ client-api = { git = "https://github.com/AppFlowy-IO/AppFlowy-Cloud", rev = "215
# To switch to the local path, run:
# scripts/tool/update_collab_source.sh
# ⚠️⚠️⚠️️
collab = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "b566a65b3eb3d0abff7c09371cad309341865cde" }
collab-folder = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "b566a65b3eb3d0abff7c09371cad309341865cde" }
collab-document = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "b566a65b3eb3d0abff7c09371cad309341865cde" }
collab-database = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "b566a65b3eb3d0abff7c09371cad309341865cde" }
collab-plugins = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "b566a65b3eb3d0abff7c09371cad309341865cde" }
collab-user = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "b566a65b3eb3d0abff7c09371cad309341865cde" }
collab-entity = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "b566a65b3eb3d0abff7c09371cad309341865cde" }
collab = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "5eea65abae724f9260a42f96c143308104e8c63" }
collab-folder = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "5eea65abae724f9260a42f96c143308104e8c63" }
collab-document = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "5eea65abae724f9260a42f96c143308104e8c63" }
collab-database = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "5eea65abae724f9260a42f96c143308104e8c63" }
collab-plugins = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "5eea65abae724f9260a42f96c143308104e8c63" }
collab-user = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "5eea65abae724f9260a42f96c143308104e8c63" }
collab-entity = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "5eea65abae724f9260a42f96c143308104e8c63" }

View File

@ -8,7 +8,6 @@ use collab_entity::{CollabObject, CollabType};
use collab_plugins::connect_state::{CollabConnectReachability, CollabConnectState};
use collab_plugins::local_storage::kv::snapshot::SnapshotPersistence;
use collab_plugins::local_storage::rocksdb::rocksdb_plugin::{RocksdbBackup, RocksdbDiskPlugin};
use collab_plugins::local_storage::rocksdb::snapshot_plugin::CollabSnapshotPlugin;
use collab_plugins::local_storage::CollabPersistenceConfig;
use parking_lot::{Mutex, RwLock};
use tracing::trace;
@ -230,8 +229,11 @@ impl AppFlowyCollabBuilder {
.with_doc_state(collab_doc_state)
.with_plugin(RocksdbDiskPlugin::new_with_config(
uid,
object_id.to_string(),
object_type.clone(),
collab_db.clone(),
persistence_config.clone(),
self.snapshot_persistence.lock().as_ref().map(Arc::clone),
))
.with_device_id(self.device_id.clone())
.build()?,
@ -252,7 +254,7 @@ impl AppFlowyCollabBuilder {
.await
.get_plugins(CollabPluginProviderContext::AppFlowyCloud {
uid,
collab_object: collab_object.clone(),
collab_object,
local_collab,
})
.await;
@ -272,7 +274,7 @@ impl AppFlowyCollabBuilder {
.await
.get_plugins(CollabPluginProviderContext::Supabase {
uid,
collab_object: collab_object.clone(),
collab_object,
local_collab,
local_collab_db,
})
@ -284,20 +286,6 @@ impl AppFlowyCollabBuilder {
CollabPluginProviderType::Local => {},
}
}
if let Some(snapshot_persistence) = self.snapshot_persistence.lock().as_ref() {
if persistence_config.enable_snapshot {
let snapshot_plugin = CollabSnapshotPlugin::new(
uid,
collab_object,
snapshot_persistence.clone(),
collab_db,
persistence_config.snapshot_per_update,
);
// tracing::trace!("add snapshot plugin: {}", object_id);
collab.lock().add_plugin(Arc::new(snapshot_plugin));
}
}
}
collab.lock().initialize();

View File

@ -41,6 +41,7 @@ collab-folder = { version = "0.1.0" }
collab-database = { version = "0.1.0" }
collab-plugins = { version = "0.1.0" }
collab-entity = { version = "0.1.0" }
rand = { version = "0.8.5", features = [] }
[dev-dependencies]
dotenv = "0.15.0"

View File

@ -1,3 +1,4 @@
use collab::core::collab_plugin::EncodedCollab;
use std::collections::HashMap;
use serde_json::Value;
@ -36,6 +37,18 @@ impl DocumentEventTest {
Self { event_test: core }
}
pub async fn get_encoded_v1(&self, doc_id: &str) -> EncodedCollab {
let doc = self
.event_test
.appflowy_core
.document_manager
.get_document(doc_id)
.await
.unwrap();
let guard = doc.lock();
guard.get_collab().encode_collab_v1()
}
pub async fn create_document(&self) -> ViewPB {
let core = &self.event_test;
let current_workspace = core.get_current_workspace().await;
@ -88,7 +101,12 @@ impl DocumentEventTest {
children_map.get(&children_id).map(|c| c.children.clone())
}
pub async fn get_block_text_delta(&self, doc_id: &str, text_id: &str) -> Option<String> {
pub async fn get_text_id(&self, doc_id: &str, block_id: &str) -> Option<String> {
let block = self.get_block(doc_id, block_id).await?;
block.external_id
}
pub async fn get_delta(&self, doc_id: &str, text_id: &str) -> Option<String> {
let document_data = self.get_document_data(doc_id).await;
document_data.meta.text_map.get(text_id).cloned()
}
@ -199,6 +217,33 @@ impl DocumentEventTest {
.await;
}
pub async fn get_document_snapshot_metas(&self, doc_id: &str) -> Vec<DocumentSnapshotMetaPB> {
let core = &self.event_test;
let payload = OpenDocumentPayloadPB {
document_id: doc_id.to_string(),
};
EventBuilder::new(core.clone())
.event(DocumentEvent::GetDocumentSnapshotMeta)
.payload(payload)
.async_send()
.await
.parse::<RepeatedDocumentSnapshotMetaPB>()
.items
}
pub async fn get_document_snapshot(
&self,
snapshot_meta: DocumentSnapshotMetaPB,
) -> DocumentSnapshotPB {
let core = &self.event_test;
EventBuilder::new(core.clone())
.event(DocumentEvent::GetDocumentSnapshot)
.payload(snapshot_meta)
.async_send()
.await
.parse::<DocumentSnapshotPB>()
}
/// Insert a new text block at the index of parent's children.
/// return the new block id.
pub async fn insert_index(

View File

@ -124,12 +124,15 @@ pub fn document_data_from_document_doc_state(
doc_id: &str,
doc_state: CollabDocState,
) -> DocumentData {
Document::from_doc_state(CollabOrigin::Empty, doc_state, doc_id, vec![])
.unwrap()
document_from_document_doc_state(doc_id, doc_state)
.get_document_data()
.unwrap()
}
pub fn document_from_document_doc_state(doc_id: &str, doc_state: CollabDocState) -> Document {
Document::from_doc_state(CollabOrigin::Empty, doc_state, doc_id, vec![]).unwrap()
}
#[cfg(feature = "single_thread")]
async fn init_core(config: AppFlowyCoreConfig) -> AppFlowyCore {
// let runtime = tokio::runtime::Runtime::new().unwrap();

View File

@ -1,3 +1,4 @@
use crate::document::generate_random_string;
use collab_document::blocks::json_str_to_hashmap;
use event_integration::document::document_event::DocumentEventTest;
use event_integration::document::utils::*;
@ -73,14 +74,33 @@ async fn insert_text_block_test() {
let view = test.create_document().await;
let text = "Hello World";
let block_id = test.insert_index(&view.id, text, 1, None).await;
let block = test.get_block(&view.id, &block_id).await;
assert!(block.is_some());
let block = block.unwrap();
assert!(block.external_id.is_some());
let external_id = block.external_id.unwrap();
let delta = test.get_block_text_delta(&view.id, &external_id).await;
let text_id = test.get_text_id(&view.id, &block_id).await.unwrap();
let delta = test.get_delta(&view.id, &text_id).await;
assert_eq!(delta.unwrap(), json!([{ "insert": text }]).to_string());
}
#[tokio::test]
async fn document_size_test() {
let test = DocumentEventTest::new().await;
let view = test.create_document().await;
let max_size = 1024 * 1024; // 1mb
let total_string_size = 500 * 1024; // 500kb
let string_size = 1000;
let iter_len = total_string_size / string_size;
for _ in 0..iter_len {
let s = generate_random_string(string_size);
test.insert_index(&view.id, &s, 1, None).await;
}
let encoded_v1 = test.get_encoded_v1(&view.id).await;
if encoded_v1.doc_state.len() > max_size {
panic!(
"The document size is too large. {}",
encoded_v1.doc_state.len()
);
}
println!("The document size is {}", encoded_v1.doc_state.len());
}
#[tokio::test]
async fn update_block_test() {
@ -111,16 +131,11 @@ async fn apply_text_delta_test() {
let view = test.create_document().await;
let text = "Hello World";
let block_id = test.insert_index(&view.id, text, 1, None).await;
let update_delta = json!([{ "retain": 5 }, { "insert": "!" }]).to_string();
test
.apply_delta_for_block(&view.id, &block_id, update_delta)
.await;
let block = test.get_block(&view.id, &block_id).await;
let text_id = block.unwrap().external_id.unwrap();
let block_delta = test.get_block_text_delta(&view.id, &text_id).await;
let text_id = test.get_text_id(&view.id, &block_id).await.unwrap();
let delta = test.get_delta(&view.id, &text_id).await;
assert_eq!(
block_delta.unwrap(),
json!([{ "insert": "Hello! World" }]).to_string()
delta.unwrap(),
json!([{ "insert": "Hello World" }]).to_string()
);
}

View File

@ -1 +1,2 @@
mod test;
mod edit_test;
mod snapshot_test;

View File

@ -0,0 +1,44 @@
use event_integration::document::document_event::DocumentEventTest;
use event_integration::document_data_from_document_doc_state;
use std::time::Duration;
use tokio::task::yield_now;
#[tokio::test]
async fn create_document_snapshot_test() {
let test = DocumentEventTest::new().await;
let view = test.create_document().await;
for i in 0..60 {
test.insert_index(&view.id, &i.to_string(), 1, None).await;
yield_now().await;
}
// wait for the snapshot to save to disk
tokio::time::sleep(Duration::from_secs(2)).await;
let snapshot_metas = test.get_document_snapshot_metas(&view.id).await;
assert_eq!(snapshot_metas.len(), 1);
for snapshot_meta in snapshot_metas {
let data = test.get_document_snapshot(snapshot_meta).await;
let _ = document_data_from_document_doc_state(&view.id, data.encoded_v1);
}
}
#[tokio::test]
async fn maximum_document_snapshot_tests() {
let test = DocumentEventTest::new().await;
let view = test.create_document().await;
for i in 0..400 {
test.insert_index(&view.id, &i.to_string(), 1, None).await;
if i % 50 == 0 {
tokio::time::sleep(Duration::from_secs(1)).await;
}
yield_now().await;
}
// wait for the snapshot to save to disk
tokio::time::sleep(Duration::from_secs(1)).await;
let snapshot_metas = test.get_document_snapshot_metas(&view.id).await;
// The default maximum snapshot is 5
assert_eq!(snapshot_metas.len(), 5);
}

View File

@ -3,3 +3,14 @@ mod local_test;
mod af_cloud_test;
#[cfg(feature = "supabase_cloud_test")]
mod supabase_test;
use rand::{distributions::Alphanumeric, Rng};
pub fn generate_random_string(len: usize) -> String {
let rng = rand::thread_rng();
rng
.sample_iter(&Alphanumeric)
.take(len)
.map(char::from)
.collect()
}

View File

@ -1,8 +1,8 @@
use std::ops::Deref;
use event_integration::event_builder::EventBuilder;
use flowy_document::entities::{OpenDocumentPayloadPB, RepeatedDocumentSnapshotPB};
use flowy_document::event_map::DocumentEvent::GetDocumentSnapshots;
use flowy_document::entities::{OpenDocumentPayloadPB, RepeatedDocumentSnapshotMetaPB};
use flowy_document::event_map::DocumentEvent::GetDocumentSnapshotMeta;
use flowy_folder::entities::ViewPB;
use crate::util::FlowySupabaseTest;
@ -28,15 +28,15 @@ impl FlowySupabaseDocumentTest {
}
#[allow(dead_code)]
pub async fn get_document_snapshots(&self, view_id: &str) -> RepeatedDocumentSnapshotPB {
pub async fn get_document_snapshots(&self, view_id: &str) -> RepeatedDocumentSnapshotMetaPB {
EventBuilder::new(self.inner.deref().clone())
.event(GetDocumentSnapshots)
.event(GetDocumentSnapshotMeta)
.payload(OpenDocumentPayloadPB {
document_id: view_id.to_string(),
})
.async_send()
.await
.parse::<RepeatedDocumentSnapshotPB>()
.parse::<RepeatedDocumentSnapshotMetaPB>()
}
}

View File

@ -9,6 +9,47 @@ use flowy_user::errors::ErrorCode;
use serde_json::{json, Value};
use std::env::temp_dir;
#[tokio::test]
async fn import_appflowy_data_need_migration_test() {
// In 037, the workspace array will be migrated to view.
let import_container_name = "037_local".to_string();
let (cleaner, user_db_path) =
unzip_history_user_db("./tests/asset", &import_container_name).unwrap();
// Getting started
// Document1
// Document2(fav)
user_localhost_af_cloud().await;
let test = EventIntegrationTest::new_with_name(DEFAULT_NAME).await;
let _ = test.af_cloud_sign_up().await;
test
.import_appflowy_data(
user_db_path.to_str().unwrap().to_string(),
Some(import_container_name.clone()),
)
.await
.unwrap();
// after import, the structure is:
// workspace:
// view: Getting Started
// view: 037_local
// view: Getting Started
// view: Document1
// view: Document2
let views = test.get_all_workspace_views().await;
assert_eq!(views.len(), 2);
assert_eq!(views[1].name, import_container_name);
let child_views = test.get_view(&views[1].id).await.child_views;
assert_eq!(child_views.len(), 1);
let child_views = test.get_view(&child_views[0].id).await.child_views;
assert_eq!(child_views.len(), 2);
assert_eq!(child_views[0].name, "Document1");
assert_eq!(child_views[1].name, "Document2");
drop(cleaner);
}
#[tokio::test]
async fn import_appflowy_data_folder_into_new_view_test() {
let import_container_name = "040_local".to_string();

View File

@ -1,36 +1,20 @@
use collab_entity::CollabType;
use collab_integrate::{CollabSnapshot, PersistenceError, SnapshotPersistence};
use diesel::dsl::count_star;
use diesel::internal::derives::multiconnection::chrono;
use diesel::internal::derives::multiconnection::chrono::Local;
use diesel::SqliteConnection;
use flowy_error::FlowyError;
use flowy_sqlite::{
insert_or_ignore_into,
prelude::*,
schema::{collab_snapshot, collab_snapshot::dsl},
};
use flowy_user::manager::UserManager;
use lib_infra::util::timestamp;
use std::sync::Weak;
use tracing::debug;
pub struct SnapshotDBImpl(pub Weak<UserManager>);
impl SnapshotPersistence for SnapshotDBImpl {
fn get_snapshots(&self, uid: i64, object_id: &str) -> Vec<CollabSnapshot> {
match self.0.upgrade() {
None => vec![],
Some(user_session) => user_session
.db_pool(uid)
.and_then(|pool| Ok(pool.get()?))
.and_then(|mut conn| {
CollabSnapshotTableSql::get_all_snapshots(object_id, &mut conn)
.map(|rows| rows.into_iter().map(|row| row.into()).collect())
})
.unwrap_or_else(|_| vec![]),
}
}
fn create_snapshot(
&self,
uid: i64,
@ -51,7 +35,7 @@ impl SnapshotPersistence for SnapshotDBImpl {
.map_err(|e| PersistenceError::Internal(e.into()))?;
// Save the snapshot data to disk
let result = CollabSnapshotTableSql::create(
let result = CollabSnapshotSql::create(
CollabSnapshotRow::new(object_id.clone(), collab_type.to_string(), encoded_v1),
&mut conn,
)
@ -68,14 +52,14 @@ impl SnapshotPersistence for SnapshotDBImpl {
#[derive(PartialEq, Clone, Debug, Queryable, Identifiable, Insertable)]
#[diesel(table_name = collab_snapshot)]
struct CollabSnapshotRow {
id: String,
pub(crate) struct CollabSnapshotRow {
pub(crate) id: String,
object_id: String,
title: String,
desc: String,
collab_type: String,
timestamp: i64,
data: Vec<u8>,
pub(crate) timestamp: i64,
pub(crate) data: Vec<u8>,
}
impl CollabSnapshotRow {
@ -101,56 +85,95 @@ impl From<CollabSnapshotRow> for CollabSnapshot {
}
}
struct CollabSnapshotTableSql;
impl CollabSnapshotTableSql {
fn create(row: CollabSnapshotRow, conn: &mut SqliteConnection) -> Result<(), FlowyError> {
let values = (
dsl::id.eq(row.id),
dsl::object_id.eq(row.object_id),
dsl::title.eq(row.title),
dsl::desc.eq(row.desc),
dsl::collab_type.eq(row.collab_type),
dsl::data.eq(row.data),
dsl::timestamp.eq(row.timestamp),
);
let _ = insert_or_ignore_into(dsl::collab_snapshot)
.values(values)
.execute(conn)?;
pub struct CollabSnapshotMeta {
pub id: String,
pub object_id: String,
pub timestamp: i64,
}
// Count the total number of rows
// If there are more than 10 rows, delete snapshots older than 5 days
let total_rows: i64 = dsl::collab_snapshot.select(count_star()).first(conn)?;
if total_rows > 10 {
let five_days_ago = Local::now() - chrono::Duration::days(5);
let _ =
diesel::delete(dsl::collab_snapshot.filter(dsl::timestamp.lt(five_days_ago.timestamp())))
.execute(conn)?;
};
pub(crate) struct CollabSnapshotSql;
impl CollabSnapshotSql {
pub(crate) fn create(
row: CollabSnapshotRow,
conn: &mut SqliteConnection,
) -> Result<(), FlowyError> {
conn.immediate_transaction::<_, Error, _>(|conn| {
// Insert the new snapshot
insert_into(dsl::collab_snapshot)
.values((
dsl::id.eq(row.id),
dsl::object_id.eq(&row.object_id),
dsl::title.eq(row.title),
dsl::desc.eq(row.desc),
dsl::collab_type.eq(row.collab_type),
dsl::data.eq(row.data),
dsl::timestamp.eq(row.timestamp),
))
.execute(conn)?;
// Count the total number of snapshots for the specific object_id
let total_snapshots: i64 = dsl::collab_snapshot
.filter(dsl::object_id.eq(&row.object_id))
.select(count_star())
.first(conn)?;
// If there are more than 5 snapshots, delete the oldest one
if total_snapshots > 5 {
let ids_to_delete: Vec<String> = dsl::collab_snapshot
.filter(dsl::object_id.eq(&row.object_id))
.order(dsl::timestamp.asc())
.select(dsl::id)
.limit(1)
.load(conn)?;
debug!(
"Delete {} snapshots for object_id: {}",
ids_to_delete.len(),
row.object_id
);
for id in ids_to_delete {
delete(dsl::collab_snapshot.filter(dsl::id.eq(id))).execute(conn)?;
}
}
Ok(())
})?;
Ok(())
}
fn get_all_snapshots(
pub(crate) fn get_all_snapshots(
object_id: &str,
conn: &mut SqliteConnection,
) -> Result<Vec<CollabSnapshotRow>, FlowyError> {
let sql = dsl::collab_snapshot
.filter(dsl::object_id.eq(object_id))
.into_boxed();
) -> Result<Vec<CollabSnapshotMeta>, FlowyError> {
let results = collab_snapshot::table
.filter(collab_snapshot::object_id.eq(object_id))
.select((
collab_snapshot::id,
collab_snapshot::object_id,
collab_snapshot::timestamp,
))
.load::<(String, String, i64)>(conn)
.expect("Error loading collab_snapshot");
let rows = sql
.order(dsl::timestamp.asc())
.load::<CollabSnapshotRow>(conn)?;
// Map the results to CollabSnapshotMeta
let snapshots: Vec<CollabSnapshotMeta> = results
.into_iter()
.map(|(id, object_id, timestamp)| CollabSnapshotMeta {
id,
object_id,
timestamp,
})
.collect();
Ok(rows)
Ok(snapshots)
}
#[allow(dead_code)]
fn get_latest_snapshot(
pub(crate) fn get_snapshot(
object_id: &str,
conn: &mut SqliteConnection,
) -> Option<CollabSnapshotRow> {
let sql = dsl::collab_snapshot
.filter(dsl::object_id.eq(object_id))
.filter(dsl::id.eq(object_id))
.into_boxed();
sql
@ -160,7 +183,7 @@ impl CollabSnapshotTableSql {
}
#[allow(dead_code)]
fn delete(
pub(crate) fn delete(
object_id: &str,
snapshot_ids: Option<Vec<String>>,
conn: &mut SqliteConnection,

View File

@ -1,11 +1,13 @@
use std::sync::{Arc, Weak};
use crate::deps_resolve::CollabSnapshotSql;
use collab_integrate::collab_builder::AppFlowyCollabBuilder;
use collab_integrate::CollabKVDB;
use flowy_database2::DatabaseManager;
use flowy_document::manager::{DocumentManager, DocumentUser};
use flowy_document::entities::{DocumentSnapshotData, DocumentSnapshotMeta};
use flowy_document::manager::{DocumentManager, DocumentSnapshotService, DocumentUserService};
use flowy_document_deps::cloud::DocumentCloudService;
use flowy_error::FlowyError;
use flowy_error::{FlowyError, FlowyResult};
use flowy_storage::FileStorageService;
use flowy_user::manager::UserManager;
@ -18,18 +20,67 @@ impl DocumentDepsResolver {
cloud_service: Arc<dyn DocumentCloudService>,
storage_service: Weak<dyn FileStorageService>,
) -> Arc<DocumentManager> {
let user: Arc<dyn DocumentUser> = Arc::new(DocumentUserImpl(user_manager));
let user_service: Arc<dyn DocumentUserService> =
Arc::new(DocumentUserImpl(user_manager.clone()));
let snapshot_service = Arc::new(DocumentSnapshotImpl(user_manager));
Arc::new(DocumentManager::new(
user.clone(),
user_service.clone(),
collab_builder,
cloud_service,
storage_service,
snapshot_service,
))
}
}
struct DocumentSnapshotImpl(Weak<UserManager>);
impl DocumentSnapshotImpl {
pub fn get_user_manager(&self) -> FlowyResult<Arc<UserManager>> {
self
.0
.upgrade()
.ok_or(FlowyError::internal().with_context("Unexpected error: UserSession is None"))
}
}
impl DocumentSnapshotService for DocumentSnapshotImpl {
fn get_document_snapshot_metas(
&self,
document_id: &str,
) -> FlowyResult<Vec<DocumentSnapshotMeta>> {
let user_manager = self.get_user_manager()?;
let uid = user_manager.user_id()?;
let mut db = user_manager.db_connection(uid)?;
CollabSnapshotSql::get_all_snapshots(document_id, &mut db).map(|rows| {
rows
.into_iter()
.map(|row| DocumentSnapshotMeta {
snapshot_id: row.id,
object_id: row.object_id,
created_at: row.timestamp,
})
.collect()
})
}
fn get_document_snapshot(&self, snapshot_id: &str) -> FlowyResult<DocumentSnapshotData> {
let user_manager = self.get_user_manager()?;
let uid = user_manager.user_id()?;
let mut db = user_manager.db_connection(uid)?;
CollabSnapshotSql::get_snapshot(snapshot_id, &mut db)
.map(|row| DocumentSnapshotData {
object_id: row.id,
encoded_v1: row.data,
})
.ok_or(
FlowyError::record_not_found().with_context(format!("Snapshot {} not found", snapshot_id)),
)
}
}
struct DocumentUserImpl(Weak<UserManager>);
impl DocumentUser for DocumentUserImpl {
impl DocumentUserService for DocumentUserImpl {
fn user_id(&self) -> Result<i64, FlowyError> {
self
.0

View File

@ -383,21 +383,30 @@ impl TryInto<ConvertDataParams> for ConvertDataPayloadPB {
}
#[derive(Debug, Default, ProtoBuf)]
pub struct RepeatedDocumentSnapshotPB {
pub struct RepeatedDocumentSnapshotMetaPB {
#[pb(index = 1)]
pub items: Vec<DocumentSnapshotPB>,
pub items: Vec<DocumentSnapshotMetaPB>,
}
#[derive(Debug, Default, ProtoBuf)]
pub struct DocumentSnapshotMetaPB {
#[pb(index = 1)]
pub snapshot_id: String,
#[pb(index = 2)]
pub object_id: String,
#[pb(index = 3)]
pub created_at: i64,
}
#[derive(Debug, Default, ProtoBuf)]
pub struct DocumentSnapshotPB {
#[pb(index = 1)]
pub snapshot_id: i64,
pub object_id: String,
#[pb(index = 2)]
pub snapshot_desc: String,
#[pb(index = 3)]
pub created_at: i64,
pub encoded_v1: Vec<u8>,
}
#[derive(Debug, Default, ProtoBuf)]
@ -452,3 +461,14 @@ impl TryInto<TextDeltaParams> for TextDeltaPayloadPB {
})
}
}
pub struct DocumentSnapshotMeta {
pub snapshot_id: String,
pub object_id: String,
pub created_at: i64,
}
pub struct DocumentSnapshotData {
pub object_id: String,
pub encoded_v1: Vec<u8>,
}

View File

@ -39,7 +39,7 @@ pub(crate) async fn create_document_handler(
) -> FlowyResult<()> {
let manager = upgrade_document(manager)?;
let params: CreateDocumentParams = data.into_inner().try_into()?;
let uid = manager.user.user_id()?;
let uid = manager.user_service.user_id()?;
manager
.create_document(uid, &params.document_id, params.initial_data)
.await?;
@ -208,15 +208,25 @@ pub(crate) async fn can_undo_redo_handler(
})
}
pub(crate) async fn get_snapshot_handler(
pub(crate) async fn get_snapshot_meta_handler(
data: AFPluginData<OpenDocumentPayloadPB>,
manager: AFPluginState<Weak<DocumentManager>>,
) -> DataResult<RepeatedDocumentSnapshotPB, FlowyError> {
) -> DataResult<RepeatedDocumentSnapshotMetaPB, FlowyError> {
let manager = upgrade_document(manager)?;
let params: OpenDocumentParams = data.into_inner().try_into()?;
let doc_id = params.document_id;
let snapshots = manager.get_document_snapshots(&doc_id, 10).await?;
data_result_ok(RepeatedDocumentSnapshotPB { items: snapshots })
let snapshots = manager.get_document_snapshot_meta(&doc_id, 10).await?;
data_result_ok(RepeatedDocumentSnapshotMetaPB { items: snapshots })
}
pub(crate) async fn get_snapshot_data_handler(
data: AFPluginData<DocumentSnapshotMetaPB>,
manager: AFPluginState<Weak<DocumentManager>>,
) -> DataResult<DocumentSnapshotPB, FlowyError> {
let manager = upgrade_document(manager)?;
let params = data.into_inner();
let snapshot = manager.get_document_snapshot(&params.snapshot_id).await?;
data_result_ok(snapshot)
}
impl From<BlockActionPB> for BlockAction {

View File

@ -5,7 +5,7 @@ use strum_macros::Display;
use flowy_derive::{Flowy_Event, ProtoBuf_Enum};
use lib_dispatch::prelude::AFPlugin;
use crate::event_handler::get_snapshot_handler;
use crate::event_handler::get_snapshot_meta_handler;
use crate::{event_handler::*, manager::DocumentManager};
pub fn init(document_manager: Weak<DocumentManager>) -> AFPlugin {
@ -24,7 +24,14 @@ pub fn init(document_manager: Weak<DocumentManager>) -> AFPlugin {
.event(DocumentEvent::Redo, redo_handler)
.event(DocumentEvent::Undo, undo_handler)
.event(DocumentEvent::CanUndoRedo, can_undo_redo_handler)
.event(DocumentEvent::GetDocumentSnapshots, get_snapshot_handler)
.event(
DocumentEvent::GetDocumentSnapshotMeta,
get_snapshot_meta_handler,
)
.event(
DocumentEvent::GetDocumentSnapshot,
get_snapshot_data_handler,
)
.event(DocumentEvent::CreateText, create_text_handler)
.event(DocumentEvent::ApplyTextDeltaEvent, apply_text_delta_handler)
.event(DocumentEvent::ConvertDocument, convert_document_handler)
@ -73,8 +80,11 @@ pub enum DocumentEvent {
)]
CanUndoRedo = 8,
#[event(input = "OpenDocumentPayloadPB", output = "RepeatedDocumentSnapshotPB")]
GetDocumentSnapshots = 9,
#[event(
input = "OpenDocumentPayloadPB",
output = "RepeatedDocumentSnapshotMetaPB"
)]
GetDocumentSnapshotMeta = 9,
#[event(input = "TextDeltaPayloadPB")]
CreateText = 10,
@ -95,4 +105,7 @@ pub enum DocumentEvent {
output = "ConvertDataToJsonResponsePB"
)]
ConvertDataToJSON = 13,
#[event(input = "DocumentSnapshotMetaPB", output = "DocumentSnapshotPB")]
GetDocumentSnapshot = 14,
}

View File

@ -21,39 +21,51 @@ use flowy_error::{internal_error, ErrorCode, FlowyError, FlowyResult};
use flowy_storage::FileStorageService;
use crate::document::MutexDocument;
use crate::entities::DocumentSnapshotPB;
use crate::entities::{
DocumentSnapshotData, DocumentSnapshotMeta, DocumentSnapshotMetaPB, DocumentSnapshotPB,
};
use crate::reminder::DocumentReminderAction;
pub trait DocumentUser: Send + Sync {
pub trait DocumentUserService: Send + Sync {
fn user_id(&self) -> Result<i64, FlowyError>;
fn workspace_id(&self) -> Result<String, FlowyError>;
fn token(&self) -> Result<Option<String>, FlowyError>; // unused now.
fn collab_db(&self, uid: i64) -> Result<Weak<CollabKVDB>, FlowyError>;
}
pub trait DocumentSnapshotService: Send + Sync {
fn get_document_snapshot_metas(
&self,
document_id: &str,
) -> FlowyResult<Vec<DocumentSnapshotMeta>>;
fn get_document_snapshot(&self, snapshot_id: &str) -> FlowyResult<DocumentSnapshotData>;
}
pub struct DocumentManager {
pub user: Arc<dyn DocumentUser>,
pub user_service: Arc<dyn DocumentUserService>,
collab_builder: Arc<AppFlowyCollabBuilder>,
documents: Arc<Mutex<LruCache<String, Arc<MutexDocument>>>>,
#[allow(dead_code)]
cloud_service: Arc<dyn DocumentCloudService>,
storage_service: Weak<dyn FileStorageService>,
snapshot_service: Arc<dyn DocumentSnapshotService>,
}
impl DocumentManager {
pub fn new(
user: Arc<dyn DocumentUser>,
user_service: Arc<dyn DocumentUserService>,
collab_builder: Arc<AppFlowyCollabBuilder>,
cloud_service: Arc<dyn DocumentCloudService>,
storage_service: Weak<dyn FileStorageService>,
snapshot_service: Arc<dyn DocumentSnapshotService>,
) -> Self {
let documents = Arc::new(Mutex::new(LruCache::new(NonZeroUsize::new(10).unwrap())));
Self {
user,
user_service,
collab_builder,
documents,
cloud_service,
storage_service,
snapshot_service,
}
}
@ -100,7 +112,7 @@ impl DocumentManager {
} else {
let result: Result<CollabDocState, FlowyError> = self
.cloud_service
.get_document_doc_state(doc_id, &self.user.workspace_id()?)
.get_document_doc_state(doc_id, &self.user_service.workspace_id()?)
.await;
match result {
@ -140,11 +152,11 @@ impl DocumentManager {
// Try to get the document from the cloud service
doc_state = self
.cloud_service
.get_document_doc_state(doc_id, &self.user.workspace_id()?)
.get_document_doc_state(doc_id, &self.user_service.workspace_id()?)
.await?;
}
let uid = self.user.user_id()?;
let uid = self.user_service.user_id()?;
event!(tracing::Level::DEBUG, "Initialize document: {}", doc_id);
let collab = self
.collab_for_document(uid, doc_id, doc_state, true)
@ -165,10 +177,10 @@ impl DocumentManager {
if !self.is_doc_exist(doc_id)? {
updates = self
.cloud_service
.get_document_doc_state(doc_id, &self.user.workspace_id()?)
.get_document_doc_state(doc_id, &self.user_service.workspace_id()?)
.await?;
}
let uid = self.user.user_id()?;
let uid = self.user_service.user_id()?;
let collab = self
.collab_for_document(uid, doc_id, updates, false)
.await?;
@ -190,8 +202,8 @@ impl DocumentManager {
}
pub fn delete_document(&self, doc_id: &str) -> FlowyResult<()> {
let uid = self.user.user_id()?;
if let Some(db) = self.user.collab_db(uid)?.upgrade() {
let uid = self.user_service.user_id()?;
if let Some(db) = self.user_service.collab_db(uid)?.upgrade() {
let _ = db.with_write_txn(|txn| {
txn.delete_doc(uid, &doc_id)?;
Ok(())
@ -204,25 +216,46 @@ impl DocumentManager {
}
/// Return the list of snapshots of the document.
pub async fn get_document_snapshots(
pub async fn get_document_snapshot_meta(
&self,
document_id: &str,
limit: usize,
) -> FlowyResult<Vec<DocumentSnapshotPB>> {
let workspace_id = self.user.workspace_id()?;
let snapshots = self
.cloud_service
.get_document_snapshots(document_id, limit, &workspace_id)
.await?
_limit: usize,
) -> FlowyResult<Vec<DocumentSnapshotMetaPB>> {
let metas = self
.snapshot_service
.get_document_snapshot_metas(document_id)?
.into_iter()
.map(|snapshot| DocumentSnapshotPB {
snapshot_id: snapshot.snapshot_id,
snapshot_desc: "".to_string(),
created_at: snapshot.created_at,
.map(|meta| DocumentSnapshotMetaPB {
snapshot_id: meta.snapshot_id,
object_id: meta.object_id,
created_at: meta.created_at,
})
.collect::<Vec<_>>();
Ok(snapshots)
// let snapshots = self
// .cloud_service
// .get_document_snapshots(document_id, limit, &workspace_id)
// .await?
// .into_iter()
// .map(|snapshot| DocumentSnapshotPB {
// snapshot_id: snapshot.snapshot_id,
// snapshot_desc: "".to_string(),
// created_at: snapshot.created_at,
// })
// .collect::<Vec<_>>();
Ok(metas)
}
pub async fn get_document_snapshot(&self, snapshot_id: &str) -> FlowyResult<DocumentSnapshotPB> {
let snapshot = self
.snapshot_service
.get_document_snapshot(snapshot_id)
.map(|snapshot| DocumentSnapshotPB {
object_id: snapshot.object_id,
encoded_v1: snapshot.encoded_v1,
})?;
Ok(snapshot)
}
async fn collab_for_document(
@ -232,7 +265,7 @@ impl DocumentManager {
doc_state: CollabDocState,
sync_enable: bool,
) -> FlowyResult<Arc<MutexCollab>> {
let db = self.user.collab_db(uid)?;
let db = self.user_service.collab_db(uid)?;
let collab = self
.collab_builder
.build_with_config(
@ -249,8 +282,8 @@ impl DocumentManager {
}
fn is_doc_exist(&self, doc_id: &str) -> FlowyResult<bool> {
let uid = self.user.user_id()?;
if let Some(collab_db) = self.user.collab_db(uid)?.upgrade() {
let uid = self.user_service.user_id()?;
if let Some(collab_db) = self.user_service.collab_db(uid)?.upgrade() {
let read_txn = collab_db.read_txn();
Ok(read_txn.is_exist(uid, doc_id))
} else {

View File

@ -14,7 +14,11 @@ async fn undo_redo_test() {
// create a document
_ = test
.create_document(test.user.user_id().unwrap(), &doc_id, Some(data.clone()))
.create_document(
test.user_service.user_id().unwrap(),
&doc_id,
Some(data.clone()),
)
.await;
// open a document

View File

@ -13,7 +13,7 @@ async fn restore_document() {
// create a document
let doc_id: String = gen_document_id();
let data = default_document_data();
let uid = test.user.user_id().unwrap();
let uid = test.user_service.user_id().unwrap();
test
.create_document(uid, &doc_id, Some(data.clone()))
.await
@ -51,7 +51,7 @@ async fn restore_document() {
#[tokio::test]
async fn document_apply_insert_action() {
let test = DocumentTest::new();
let uid = test.user.user_id().unwrap();
let uid = test.user_service.user_id().unwrap();
let doc_id: String = gen_document_id();
let data = default_document_data();
@ -105,7 +105,7 @@ async fn document_apply_insert_action() {
async fn document_apply_update_page_action() {
let test = DocumentTest::new();
let doc_id: String = gen_document_id();
let uid = test.user.user_id().unwrap();
let uid = test.user_service.user_id().unwrap();
let data = default_document_data();
// create a document
@ -147,7 +147,7 @@ async fn document_apply_update_page_action() {
#[tokio::test]
async fn document_apply_update_action() {
let test = DocumentTest::new();
let uid = test.user.user_id().unwrap();
let uid = test.user_service.user_id().unwrap();
let doc_id: String = gen_document_id();
let data = default_document_data();

View File

@ -19,9 +19,10 @@ use collab_integrate::collab_builder::{
};
use collab_integrate::CollabKVDB;
use flowy_document::document::MutexDocument;
use flowy_document::manager::{DocumentManager, DocumentUser};
use flowy_document::entities::{DocumentSnapshotData, DocumentSnapshotMeta};
use flowy_document::manager::{DocumentManager, DocumentSnapshotService, DocumentUserService};
use flowy_document_deps::cloud::*;
use flowy_error::{ErrorCode, FlowyError};
use flowy_error::{ErrorCode, FlowyError, FlowyResult};
use flowy_storage::{FileStorageService, StorageObject};
use lib_infra::async_trait::async_trait;
use lib_infra::future::{to_fut, Fut, FutureResult};
@ -35,11 +36,13 @@ impl DocumentTest {
let user = FakeUser::new();
let cloud_service = Arc::new(LocalTestDocumentCloudServiceImpl());
let file_storage = Arc::new(DocumentTestFileStorageService) as Arc<dyn FileStorageService>;
let document_snapshot = Arc::new(DocumentTestSnapshot);
let manager = DocumentManager::new(
Arc::new(user),
default_collab_builder(),
cloud_service,
Arc::downgrade(&file_storage),
document_snapshot,
);
Self { inner: manager }
}
@ -69,7 +72,7 @@ impl FakeUser {
}
}
impl DocumentUser for FakeUser {
impl DocumentUserService for FakeUser {
fn user_id(&self) -> Result<i64, FlowyError> {
Ok(1)
}
@ -110,7 +113,7 @@ pub async fn create_and_open_empty_document() -> (DocumentTest, Arc<MutexDocumen
let test = DocumentTest::new();
let doc_id: String = gen_document_id();
let data = default_document_data();
let uid = test.user.user_id().unwrap();
let uid = test.user_service.user_id().unwrap();
// create a document
test
.create_document(uid, &doc_id, Some(data.clone()))
@ -196,3 +199,17 @@ impl CollabCloudPluginProvider for DefaultCollabStorageProvider {
false
}
}
struct DocumentTestSnapshot;
impl DocumentSnapshotService for DocumentTestSnapshot {
fn get_document_snapshot_metas(
&self,
_document_id: &str,
) -> FlowyResult<Vec<DocumentSnapshotMeta>> {
todo!()
}
fn get_document_snapshot(&self, _snapshot_id: &str) -> FlowyResult<DocumentSnapshotData> {
todo!()
}
}

View File

@ -44,7 +44,7 @@ use crate::services::db::UserDB;
use crate::services::entities::{ResumableSignUp, Session, UserConfig, UserPaths};
use crate::services::user_awareness::UserAwarenessDataSource;
use crate::services::user_encryption::validate_encryption_sign;
use crate::services::user_sql::{UserTable, UserTableChangeset};
use crate::services::user_sql::{select_user_profile, UserTable, UserTableChangeset};
use crate::services::user_workspace::save_user_workspaces;
use crate::{errors::FlowyError, notification::*};
@ -211,23 +211,7 @@ impl UserManager {
self.database.get_pool(session.user_id),
) {
(Ok(collab_db), Ok(sqlite_pool)) => {
// ⚠The order of migrations is crucial. If you're adding a new migration, please ensure
// it's appended to the end of the list.
let migrations: Vec<Box<dyn UserDataMigration>> = vec![
Box::new(HistoricalEmptyDocumentMigration),
Box::new(FavoriteV1AndWorkspaceArrayMigration),
Box::new(WorkspaceTrashMapToSectionMigration),
];
match UserLocalDataMigration::new(session.clone(), collab_db, sqlite_pool)
.run(migrations, &user.authenticator)
{
Ok(applied_migrations) => {
if !applied_migrations.is_empty() {
info!("Did apply migrations: {:?}", applied_migrations);
}
},
Err(e) => error!("User data migration failed: {:?}", e),
}
run_collab_data_migration(&session, &user, collab_db, sqlite_pool);
},
_ => error!("Failed to get collab db or sqlite pool"),
}
@ -522,18 +506,7 @@ impl UserManager {
/// Fetches the user profile for the given user ID.
pub async fn get_user_profile_from_disk(&self, uid: i64) -> Result<UserProfile, FlowyError> {
let user: UserProfile = user_table::dsl::user_table
.filter(user_table::id.eq(&uid.to_string()))
.first::<UserTable>(&mut *(self.db_connection(uid)?))
.map_err(|err| {
FlowyError::record_not_found().with_context(format!(
"Can't find the user profile for user id: {}, error: {:?}",
uid, err
))
})?
.into();
Ok(user)
select_user_profile(uid, self.db_connection(uid)?)
}
#[tracing::instrument(level = "info", skip_all, err)]
@ -907,3 +880,28 @@ fn save_user_token(uid: i64, pool: Arc<ConnectionPool>, token: String) -> FlowyR
let changeset = UserTableChangeset::new(params);
upsert_user_profile_change(uid, pool, changeset)
}
pub(crate) fn run_collab_data_migration(
session: &Session,
user: &UserProfile,
collab_db: Arc<CollabKVDB>,
sqlite_pool: Arc<ConnectionPool>,
) {
// ⚠The order of migrations is crucial. If you're adding a new migration, please ensure
// it's appended to the end of the list.
let migrations: Vec<Box<dyn UserDataMigration>> = vec![
Box::new(HistoricalEmptyDocumentMigration),
Box::new(FavoriteV1AndWorkspaceArrayMigration),
Box::new(WorkspaceTrashMapToSectionMigration),
];
match UserLocalDataMigration::new(session.clone(), collab_db, sqlite_pool)
.run(migrations, &user.authenticator)
{
Ok(applied_migrations) => {
if !applied_migrations.is_empty() {
info!("Did apply migrations: {:?}", applied_migrations);
}
},
Err(e) => error!("User data migration failed: {:?}", e),
}
}

View File

@ -26,15 +26,8 @@ impl UserDataMigration for FavoriteV1AndWorkspaceArrayMigration {
&self,
session: &Session,
collab_db: &Arc<CollabKVDB>,
authenticator: &Authenticator,
_authenticator: &Authenticator,
) -> FlowyResult<()> {
// Note on `favorite` Struct Refactoring and Migration:
// - The `favorite` struct has already undergone refactoring prior to the launch of the AppFlowy cloud version.
// - Consequently, if a user is utilizing the AppFlowy cloud version, there is no need to perform any migration for the `favorite` struct.
// - This migration step is only necessary for users who are transitioning from a local version of AppFlowy to the cloud version.
if !matches!(authenticator, Authenticator::Local) {
return Ok(());
}
collab_db
.with_write_txn(|write_txn| {
if let Ok(collab) = load_collab(session.user_id, write_txn, &session.user_workspace.id) {

View File

@ -1,13 +1,18 @@
use crate::migrations::session_migration::migrate_session_with_user_uuid;
use crate::manager::run_collab_data_migration;
use crate::services::data_import::importer::load_collab_by_oid;
use crate::services::db::UserDBPath;
use crate::services::entities::{Session, UserPaths};
use crate::services::user_awareness::awareness_oid_from_user_uuid;
use crate::services::user_sql::select_user_profile;
use anyhow::anyhow;
use collab::core::collab::{CollabDocState, MutexCollab};
use collab::core::origin::CollabOrigin;
use collab::preclude::Collab;
use collab::core::transaction::DocTransactionExtension;
use collab::preclude::updates::decoder::Decode;
use collab::preclude::{Collab, Doc, Transact, Update};
use collab_database::database::{
is_database_collab, mut_database_views_with_collab, reset_inline_view_id,
};
@ -47,18 +52,34 @@ pub(crate) fn get_appflowy_data_folder_import_context(path: &str) -> anyhow::Res
let user_paths = UserPaths::new(path.to_string());
let other_store_preferences = Arc::new(StorePreferences::new(path)?);
migrate_session_with_user_uuid("appflowy_session_cache", &other_store_preferences);
let session = other_store_preferences
let imported_session = other_store_preferences
.get_object::<Session>("appflowy_session_cache")
.ok_or(anyhow!(
"Can't find the session cache in the appflowy data folder at path: {}",
path
))?;
let collab_db_path = user_paths.collab_db_path(session.user_id);
let collab_db = Arc::new(CollabKVDB::open(collab_db_path)?);
let collab_db_path = user_paths.collab_db_path(imported_session.user_id);
let sqlite_db_path = user_paths.sqlite_db_path(imported_session.user_id);
let imported_sqlite_db = flowy_sqlite::init(sqlite_db_path).map_err(|e| {
FlowyError::internal().with_context(format!("open import sqlite db failed, {:?}", e))
})?;
let imported_collab_db = Arc::new(CollabKVDB::open(collab_db_path)?);
let imported_user = select_user_profile(
imported_session.user_id,
imported_sqlite_db.get_connection()?,
)?;
run_collab_data_migration(
&imported_session,
&imported_user,
imported_collab_db.clone(),
imported_sqlite_db.get_pool(),
);
Ok(ImportContext {
imported_session: session,
imported_collab_db: collab_db,
imported_session,
imported_collab_db,
container_name: None,
})
}
@ -368,9 +389,25 @@ where
W: CollabKVAction<'a>,
PersistenceError: From<W::Error>,
{
let txn = collab.transact();
if let Err(err) = w_txn.create_new_doc(new_uid, &new_object_id, &txn) {
tracing::error!("import collab:{} failed: {:?}", new_object_id, err);
if let Ok(update) = Update::decode_v1(&collab.encode_collab_v1().doc_state) {
let doc = Doc::new();
{
let mut txn = doc.transact_mut();
txn.apply_update(update);
drop(txn);
}
let encoded_collab = doc.get_encoded_collab_v1();
if let Err(err) = w_txn.flush_doc(
new_uid,
&new_object_id,
encoded_collab.state_vector.to_vec(),
encoded_collab.doc_state.to_vec(),
) {
tracing::error!("import collab:{} failed: {:?}", new_object_id, err);
}
} else {
event!(tracing::Level::ERROR, "decode v1 failed");
}
}
@ -419,6 +456,7 @@ where
None,
)
.map_err(|err| PersistenceError::InvalidData(err.to_string()))?;
let other_folder_data = other_folder
.get_folder_data()
.ok_or(PersistenceError::Internal(anyhow!(
@ -613,9 +651,14 @@ pub async fn upload_imported_data(
.join(", "),
size_counter
);
user_cloud_service
if let Err(e) = user_cloud_service
.batch_create_collab_object(workspace_id, objects)
.await?;
.await
{
tracing::error!("batch create collab object failed: {:?}", e);
}
objects = Vec::new();
size_counter = 0;
}
@ -641,9 +684,13 @@ pub async fn upload_imported_data(
.join(", "),
size_counter
);
user_cloud_service
if let Err(e) = user_cloud_service
.batch_create_collab_object(workspace_id, objects)
.await?;
.await
{
tracing::error!("batch create collab object failed: {:?}", e);
}
}
Ok(())
}

View File

@ -22,7 +22,7 @@ use crate::services::user_sql::UserTable;
use crate::services::workspace_sql::UserWorkspaceTable;
pub trait UserDBPath: Send + Sync + 'static {
fn user_db_path(&self, uid: i64) -> PathBuf;
fn sqlite_db_path(&self, uid: i64) -> PathBuf;
fn collab_db_path(&self, uid: i64) -> PathBuf;
fn collab_db_history(&self, uid: i64, create_if_not_exist: bool) -> std::io::Result<PathBuf>;
}
@ -134,7 +134,7 @@ impl UserDB {
}
pub(crate) fn get_pool(&self, user_id: i64) -> Result<Arc<ConnectionPool>, FlowyError> {
let pool = self.open_user_db(self.paths.user_db_path(user_id), user_id)?;
let pool = self.open_sqlite_db(self.paths.sqlite_db_path(user_id), user_id)?;
Ok(pool)
}
@ -143,7 +143,7 @@ impl UserDB {
Ok(collab_db)
}
pub fn open_user_db(
pub fn open_sqlite_db(
&self,
db_path: impl AsRef<Path>,
user_id: i64,

View File

@ -196,7 +196,7 @@ impl UserPaths {
}
impl UserDBPath for UserPaths {
fn user_db_path(&self, uid: i64) -> PathBuf {
fn sqlite_db_path(&self, uid: i64) -> PathBuf {
PathBuf::from(self.user_data_dir(uid))
}

View File

@ -1,9 +1,13 @@
use diesel::RunQueryDsl;
use flowy_error::FlowyError;
use std::str::FromStr;
use flowy_sqlite::schema::user_table;
use flowy_user_deps::cloud::UserUpdate;
use flowy_user_deps::entities::*;
use flowy_sqlite::schema::user_table;
use flowy_sqlite::{query_dsl::*, DBConnection, ExpressionMethods};
/// The order of the fields in the struct must be the same as the order of the fields in the table.
/// Check out the [schema.rs] for table schema.
#[derive(Clone, Default, Queryable, Identifiable, Insertable)]
@ -126,3 +130,18 @@ impl From<UserUpdate> for UserTableChangeset {
}
}
}
pub fn select_user_profile(uid: i64, mut conn: DBConnection) -> Result<UserProfile, FlowyError> {
let user: UserProfile = user_table::dsl::user_table
.filter(user_table::id.eq(&uid.to_string()))
.first::<UserTable>(&mut *conn)
.map_err(|err| {
FlowyError::record_not_found().with_context(format!(
"Can't find the user profile for user id: {}, error: {:?}",
uid, err
))
})?
.into();
Ok(user)
}