add conflict resolver

This commit is contained in:
appflowy 2022-01-20 23:51:11 +08:00
parent 324dc53e5f
commit d1c5df4b88
31 changed files with 1078 additions and 721 deletions

View File

@ -7,9 +7,10 @@ use crate::{
util::sqlx_ext::{map_sqlx_error, DBTransaction, SqlBuilder}, util::sqlx_ext::{map_sqlx_error, DBTransaction, SqlBuilder},
}; };
use backend_service::errors::{invalid_params, ServerError}; use backend_service::errors::{invalid_params, ServerError};
use bytes::Bytes;
use chrono::Utc; use chrono::Utc;
use flowy_collaboration::{ use flowy_collaboration::{
client_document::default::initial_delta,
entities::revision::{RepeatedRevision, Revision}, entities::revision::{RepeatedRevision, Revision},
protobuf::CreateDocParams as CreateDocParamsPB, protobuf::CreateDocParams as CreateDocParamsPB,
}; };
@ -90,9 +91,9 @@ pub(crate) async fn create_view(
.await .await
.map_err(map_sqlx_error)?; .map_err(map_sqlx_error)?;
let delta_data = Bytes::from(params.view_data); let initial_delta_data = initial_delta().to_bytes();
let md5 = format!("{:x}", md5::compute(&delta_data)); let md5 = format!("{:x}", md5::compute(&initial_delta_data));
let revision = Revision::new(&view.id, 0, 0, delta_data, user_id, md5); let revision = Revision::new(&view.id, 0, 0, initial_delta_data, user_id, md5);
let repeated_revision = RepeatedRevision::new(vec![revision]); let repeated_revision = RepeatedRevision::new(vec![revision]);
let mut create_doc_params = CreateDocParamsPB::new(); let mut create_doc_params = CreateDocParamsPB::new();
create_doc_params.set_revisions(repeated_revision.try_into().unwrap()); create_doc_params.set_revisions(repeated_revision.try_into().unwrap());

View File

@ -6,16 +6,18 @@ use flowy_document::context::DocumentContext;
use flowy_sync::RevisionWebSocket; use flowy_sync::RevisionWebSocket;
use lazy_static::lazy_static; use lazy_static::lazy_static;
use flowy_collaboration::folder::FolderPad; use flowy_collaboration::{entities::ws_data::ServerRevisionWSData, folder::FolderPad};
use parking_lot::RwLock; use parking_lot::RwLock;
use std::{collections::HashMap, sync::Arc}; use std::{collections::HashMap, convert::TryInto, sync::Arc};
use tokio::sync::RwLock as TokioRwLock;
use crate::{ use crate::{
dart_notification::{send_dart_notification, WorkspaceNotification}, dart_notification::{send_dart_notification, WorkspaceNotification},
entities::workspace::RepeatedWorkspace, entities::workspace::RepeatedWorkspace,
errors::FlowyResult, errors::FlowyResult,
module::{FolderCouldServiceV1, WorkspaceUser}, module::{FolderCouldServiceV1, WorkspaceDatabase, WorkspaceUser},
services::{ services::{
folder_editor::FolderEditor,
persistence::FolderPersistence, persistence::FolderPersistence,
set_current_workspace, set_current_workspace,
AppController, AppController,
@ -33,25 +35,29 @@ pub struct FolderManager {
pub user: Arc<dyn WorkspaceUser>, pub user: Arc<dyn WorkspaceUser>,
pub(crate) cloud_service: Arc<dyn FolderCouldServiceV1>, pub(crate) cloud_service: Arc<dyn FolderCouldServiceV1>,
pub(crate) persistence: Arc<FolderPersistence>, pub(crate) persistence: Arc<FolderPersistence>,
pub workspace_controller: Arc<WorkspaceController>, pub(crate) workspace_controller: Arc<WorkspaceController>,
pub(crate) app_controller: Arc<AppController>, pub(crate) app_controller: Arc<AppController>,
pub(crate) view_controller: Arc<ViewController>, pub(crate) view_controller: Arc<ViewController>,
pub(crate) trash_controller: Arc<TrashController>, pub(crate) trash_controller: Arc<TrashController>,
ws_sender: Arc<dyn RevisionWebSocket>, web_socket: Arc<dyn RevisionWebSocket>,
folder_editor: Arc<TokioRwLock<Option<Arc<FolderEditor>>>>,
} }
impl FolderManager { impl FolderManager {
pub(crate) fn new( pub fn new(
user: Arc<dyn WorkspaceUser>, user: Arc<dyn WorkspaceUser>,
cloud_service: Arc<dyn FolderCouldServiceV1>, cloud_service: Arc<dyn FolderCouldServiceV1>,
persistence: Arc<FolderPersistence>, database: Arc<dyn WorkspaceDatabase>,
flowy_document: Arc<DocumentContext>, flowy_document: Arc<DocumentContext>,
ws_sender: Arc<dyn RevisionWebSocket>, web_socket: Arc<dyn RevisionWebSocket>,
) -> Self { ) -> Self {
if let Ok(token) = user.token() { if let Ok(token) = user.token() {
INIT_FOLDER_FLAG.write().insert(token, false); INIT_FOLDER_FLAG.write().insert(token, false);
} }
let folder_editor = Arc::new(TokioRwLock::new(None));
let persistence = Arc::new(FolderPersistence::new(database.clone(), folder_editor.clone()));
let trash_controller = Arc::new(TrashController::new( let trash_controller = Arc::new(TrashController::new(
persistence.clone(), persistence.clone(),
cloud_service.clone(), cloud_service.clone(),
@ -88,7 +94,8 @@ impl FolderManager {
app_controller, app_controller,
view_controller, view_controller,
trash_controller, trash_controller,
ws_sender, web_socket,
folder_editor,
} }
} }
@ -101,7 +108,21 @@ impl FolderManager {
// } // }
// } // }
pub async fn did_receive_ws_data(&self, _data: Bytes) {} pub async fn did_receive_ws_data(&self, data: Bytes) {
let result: Result<ServerRevisionWSData, protobuf::ProtobufError> = data.try_into();
match result {
Ok(data) => match self.folder_editor.read().await.clone() {
None => {},
Some(editor) => match editor.receive_ws_data(data).await {
Ok(_) => {},
Err(e) => tracing::error!("Folder receive data error: {:?}", e),
},
},
Err(e) => {
tracing::error!("Folder ws data parser failed: {:?}", e);
},
}
}
pub async fn initialize(&self, user_id: &str) -> FlowyResult<()> { pub async fn initialize(&self, user_id: &str) -> FlowyResult<()> {
if let Some(is_init) = INIT_FOLDER_FLAG.read().get(user_id) { if let Some(is_init) = INIT_FOLDER_FLAG.read().get(user_id) {
@ -110,6 +131,12 @@ impl FolderManager {
} }
} }
let _ = self.persistence.initialize(user_id).await?; let _ = self.persistence.initialize(user_id).await?;
let token = self.user.token()?;
let pool = self.persistence.db_pool()?;
let folder_editor = FolderEditor::new(user_id, &token, pool, self.web_socket.clone()).await?;
*self.folder_editor.write().await = Some(Arc::new(folder_editor));
let _ = self.app_controller.initialize()?; let _ = self.app_controller.initialize()?;
let _ = self.view_controller.initialize()?; let _ = self.view_controller.initialize()?;
INIT_FOLDER_FLAG.write().insert(user_id.to_owned(), true); INIT_FOLDER_FLAG.write().insert(user_id.to_owned(), true);
@ -121,7 +148,7 @@ impl FolderManager {
self.initialize(user_id).await self.initialize(user_id).await
} }
pub async fn clear(&self) { self.persistence.user_did_logout() } pub async fn clear(&self) { *self.folder_editor.write().await = None; }
} }
struct DefaultFolderBuilder(); struct DefaultFolderBuilder();

View File

@ -8,17 +8,10 @@ use crate::{
}, },
errors::FlowyError, errors::FlowyError,
event::WorkspaceEvent, event::WorkspaceEvent,
services::{ services::{app::event_handler::*, trash::event_handler::*, view::event_handler::*, workspace::event_handler::*},
app::event_handler::*,
persistence::FolderPersistence,
trash::event_handler::*,
view::event_handler::*,
workspace::event_handler::*,
},
}; };
use flowy_database::DBConnection; use flowy_database::DBConnection;
use flowy_document::context::DocumentContext;
use flowy_sync::RevisionWebSocket;
use lib_dispatch::prelude::*; use lib_dispatch::prelude::*;
use lib_infra::future::FutureResult; use lib_infra::future::FutureResult;
use lib_sqlite::ConnectionPool; use lib_sqlite::ConnectionPool;
@ -41,23 +34,6 @@ pub trait WorkspaceDatabase: Send + Sync {
} }
} }
pub fn init_folder(
user: Arc<dyn WorkspaceUser>,
database: Arc<dyn WorkspaceDatabase>,
flowy_document: Arc<DocumentContext>,
cloud_service: Arc<dyn FolderCouldServiceV1>,
ws_sender: Arc<dyn RevisionWebSocket>,
) -> Arc<FolderManager> {
let persistence = Arc::new(FolderPersistence::new(user.clone(), database.clone()));
Arc::new(FolderManager::new(
user,
cloud_service,
persistence,
flowy_document,
ws_sender,
))
}
pub fn create(folder: Arc<FolderManager>) -> Module { pub fn create(folder: Arc<FolderManager>) -> Module {
let mut module = Module::new() let mut module = Module::new()
.name("Flowy-Workspace") .name("Flowy-Workspace")

View File

@ -50,23 +50,29 @@ impl AppController {
} }
pub(crate) async fn create_app_on_local(&self, app: App) -> Result<App, FlowyError> { pub(crate) async fn create_app_on_local(&self, app: App) -> Result<App, FlowyError> {
let _ = self.persistence.begin_transaction(|transaction| { let _ = self
.persistence
.begin_transaction(|transaction| {
let _ = transaction.create_app(app.clone())?; let _ = transaction.create_app(app.clone())?;
let _ = notify_apps_changed(&app.workspace_id, self.trash_controller.clone(), &transaction)?; let _ = notify_apps_changed(&app.workspace_id, self.trash_controller.clone(), &transaction)?;
Ok(()) Ok(())
})?; })
.await?;
Ok(app) Ok(app)
} }
pub(crate) async fn read_app(&self, params: AppId) -> Result<App, FlowyError> { pub(crate) async fn read_app(&self, params: AppId) -> Result<App, FlowyError> {
let app = self.persistence.begin_transaction(|transaction| { let app = self
.persistence
.begin_transaction(|transaction| {
let app = transaction.read_app(&params.app_id)?; let app = transaction.read_app(&params.app_id)?;
let trash_ids = self.trash_controller.read_trash_ids(&transaction)?; let trash_ids = self.trash_controller.read_trash_ids(&transaction)?;
if trash_ids.contains(&app.id) { if trash_ids.contains(&app.id) {
return Err(FlowyError::record_not_found()); return Err(FlowyError::record_not_found());
} }
Ok(app) Ok(app)
})?; })
.await?;
let _ = self.read_app_on_server(params)?; let _ = self.read_app_on_server(params)?;
Ok(app) Ok(app)
} }
@ -75,11 +81,14 @@ impl AppController {
let changeset = AppChangeset::new(params.clone()); let changeset = AppChangeset::new(params.clone());
let app_id = changeset.id.clone(); let app_id = changeset.id.clone();
let app = self.persistence.begin_transaction(|transaction| { let app = self
.persistence
.begin_transaction(|transaction| {
let _ = transaction.update_app(changeset)?; let _ = transaction.update_app(changeset)?;
let app = transaction.read_app(&app_id)?; let app = transaction.read_app(&app_id)?;
Ok(app) Ok(app)
})?; })
.await?;
send_dart_notification(&app_id, WorkspaceNotification::AppUpdated) send_dart_notification(&app_id, WorkspaceNotification::AppUpdated)
.payload(app) .payload(app)
.send(); .send();
@ -87,14 +96,17 @@ impl AppController {
Ok(()) Ok(())
} }
pub(crate) fn read_local_apps(&self, ids: Vec<String>) -> Result<Vec<App>, FlowyError> { pub(crate) async fn read_local_apps(&self, ids: Vec<String>) -> Result<Vec<App>, FlowyError> {
let apps = self.persistence.begin_transaction(|transaction| { let apps = self
.persistence
.begin_transaction(|transaction| {
let mut apps = vec![]; let mut apps = vec![];
for id in ids { for id in ids {
apps.push(transaction.read_app(&id)?); apps.push(transaction.read_app(&id)?);
} }
Ok(apps) Ok(apps)
})?; })
.await?;
Ok(apps) Ok(apps)
} }
} }
@ -131,7 +143,10 @@ impl AppController {
tokio::spawn(async move { tokio::spawn(async move {
match server.read_app(&token, params).await { match server.read_app(&token, params).await {
Ok(Some(app)) => { Ok(Some(app)) => {
match persistence.begin_transaction(|transaction| transaction.create_app(app.clone())) { match persistence
.begin_transaction(|transaction| transaction.create_app(app.clone()))
.await
{
Ok(_) => { Ok(_) => {
send_dart_notification(&app.id, WorkspaceNotification::AppUpdated) send_dart_notification(&app.id, WorkspaceNotification::AppUpdated)
.payload(app) .payload(app)
@ -175,17 +190,20 @@ async fn handle_trash_event(
) { ) {
match event { match event {
TrashEvent::NewTrash(identifiers, ret) | TrashEvent::Putback(identifiers, ret) => { TrashEvent::NewTrash(identifiers, ret) | TrashEvent::Putback(identifiers, ret) => {
let result = persistence.begin_transaction(|transaction| { let result = persistence
.begin_transaction(|transaction| {
for identifier in identifiers.items { for identifier in identifiers.items {
let app = transaction.read_app(&identifier.id)?; let app = transaction.read_app(&identifier.id)?;
let _ = notify_apps_changed(&app.workspace_id, trash_controller.clone(), &transaction)?; let _ = notify_apps_changed(&app.workspace_id, trash_controller.clone(), &transaction)?;
} }
Ok(()) Ok(())
}); })
.await;
let _ = ret.send(result).await; let _ = ret.send(result).await;
}, },
TrashEvent::Delete(identifiers, ret) => { TrashEvent::Delete(identifiers, ret) => {
let result = persistence.begin_transaction(|transaction| { let result = persistence
.begin_transaction(|transaction| {
let mut notify_ids = HashSet::new(); let mut notify_ids = HashSet::new();
for identifier in identifiers.items { for identifier in identifiers.items {
let app = transaction.read_app(&identifier.id)?; let app = transaction.read_app(&identifier.id)?;
@ -197,7 +215,8 @@ async fn handle_trash_event(
let _ = notify_apps_changed(&notify_id, trash_controller.clone(), &transaction)?; let _ = notify_apps_changed(&notify_id, trash_controller.clone(), &transaction)?;
} }
Ok(()) Ok(())
}); })
.await;
let _ = ret.send(result).await; let _ = ret.send(result).await;
}, },
} }

View File

@ -26,7 +26,8 @@ pub(crate) async fn delete_app_handler(
) -> Result<(), FlowyError> { ) -> Result<(), FlowyError> {
let params: AppId = data.into_inner().try_into()?; let params: AppId = data.into_inner().try_into()?;
let trash = app_controller let trash = app_controller
.read_local_apps(vec![params.app_id])? .read_local_apps(vec![params.app_id])
.await?
.into_iter() .into_iter()
.map(|app| app.into()) .map(|app| app.into())
.collect::<Vec<Trash>>(); .collect::<Vec<Trash>>();

View File

@ -0,0 +1,99 @@
use crate::services::{persistence::FOLDER_ID, web_socket::make_folder_ws_manager};
use flowy_collaboration::{
entities::{revision::Revision, ws_data::ServerRevisionWSData},
folder::{FolderChange, FolderPad},
};
use flowy_error::{FlowyError, FlowyResult};
use flowy_sync::{
RevisionCache,
RevisionCloudService,
RevisionManager,
RevisionObjectBuilder,
RevisionWebSocket,
RevisionWebSocketManager,
};
use lib_infra::future::FutureResult;
use lib_sqlite::ConnectionPool;
use parking_lot::RwLock;
use std::sync::Arc;
pub struct FolderEditor {
user_id: String,
pub(crate) folder: Arc<RwLock<FolderPad>>,
rev_manager: Arc<RevisionManager>,
ws_manager: Arc<RevisionWebSocketManager>,
}
impl FolderEditor {
pub async fn new(
user_id: &str,
token: &str,
pool: Arc<ConnectionPool>,
web_socket: Arc<dyn RevisionWebSocket>,
) -> FlowyResult<Self> {
let cache = Arc::new(RevisionCache::new(user_id, FOLDER_ID, pool));
let mut rev_manager = RevisionManager::new(user_id, FOLDER_ID, cache);
let cloud = Arc::new(FolderRevisionCloudServiceImpl {
token: token.to_string(),
});
let folder_pad = Arc::new(RwLock::new(rev_manager.load::<FolderPadBuilder>(cloud).await?));
let rev_manager = Arc::new(rev_manager);
let ws_manager = make_folder_ws_manager(rev_manager.clone(), web_socket, folder_pad.clone()).await;
let user_id = user_id.to_owned();
Ok(Self {
user_id,
folder: folder_pad,
rev_manager,
ws_manager,
})
}
pub async fn receive_ws_data(&self, data: ServerRevisionWSData) -> FlowyResult<()> {
let _ = self.ws_manager.ws_passthrough_tx.send(data).await.map_err(|e| {
let err_msg = format!("{} passthrough error: {}", FOLDER_ID, e);
FlowyError::internal().context(err_msg)
})?;
Ok(())
}
pub(crate) fn apply_change(&self, change: FolderChange) -> FlowyResult<()> {
let FolderChange { delta, md5 } = change;
let (base_rev_id, rev_id) = self.rev_manager.next_rev_id_pair();
let delta_data = delta.to_bytes();
let revision = Revision::new(
&self.rev_manager.object_id,
base_rev_id,
rev_id,
delta_data,
&self.user_id,
md5,
);
let _ = futures::executor::block_on(async { self.rev_manager.add_local_revision(&revision).await })?;
Ok(())
}
}
struct FolderPadBuilder();
impl RevisionObjectBuilder for FolderPadBuilder {
type Output = FolderPad;
fn build_with_revisions(_object_id: &str, revisions: Vec<Revision>) -> FlowyResult<Self::Output> {
let pad = FolderPad::from_revisions(revisions)?;
Ok(pad)
}
}
struct FolderRevisionCloudServiceImpl {
#[allow(dead_code)]
token: String,
// server: Arc<dyn FolderCouldServiceV2>,
}
impl RevisionCloudService for FolderRevisionCloudServiceImpl {
#[tracing::instrument(level = "debug", skip(self))]
fn fetch_object(&self, _user_id: &str, _object_id: &str) -> FutureResult<Vec<Revision>, FlowyError> {
FutureResult::new(async move { Ok(vec![]) })
}
}

View File

@ -4,7 +4,9 @@ pub(crate) use view::controller::*;
pub(crate) use workspace::controller::*; pub(crate) use workspace::controller::*;
pub(crate) mod app; pub(crate) mod app;
pub(crate) mod folder_editor;
pub(crate) mod persistence; pub(crate) mod persistence;
pub(crate) mod trash; pub(crate) mod trash;
pub(crate) mod view; pub(crate) mod view;
mod web_socket;
pub(crate) mod workspace; pub(crate) mod workspace;

View File

@ -1,11 +1,8 @@
use crate::{ use crate::{
module::WorkspaceDatabase, module::WorkspaceDatabase,
services::persistence::{AppTableSql, TrashTableSql, ViewTableSql, WorkspaceTableSql, FOLDER_ID}, services::persistence::{AppTableSql, TrashTableSql, ViewTableSql, WorkspaceTableSql},
};
use flowy_collaboration::{
entities::revision::{md5, Revision},
folder::FolderPad,
}; };
use flowy_collaboration::{entities::revision::md5, folder::FolderPad};
use flowy_core_data_model::entities::{ use flowy_core_data_model::entities::{
app::{App, RepeatedApp}, app::{App, RepeatedApp},
view::{RepeatedView, View}, view::{RepeatedView, View},
@ -13,7 +10,6 @@ use flowy_core_data_model::entities::{
}; };
use flowy_database::kv::KV; use flowy_database::kv::KV;
use flowy_error::{FlowyError, FlowyResult}; use flowy_error::{FlowyError, FlowyResult};
use flowy_sync::{RevisionCache, RevisionManager};
use std::sync::Arc; use std::sync::Arc;
pub(crate) const V1_MIGRATION: &str = "FOLDER_V1_MIGRATION"; pub(crate) const V1_MIGRATION: &str = "FOLDER_V1_MIGRATION";

View File

@ -6,13 +6,13 @@ use flowy_collaboration::{
entities::revision::{Revision, RevisionState}, entities::revision::{Revision, RevisionState},
folder::FolderPad, folder::FolderPad,
}; };
use parking_lot::RwLock;
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::RwLock;
pub use version_1::{app_sql::*, trash_sql::*, v1_impl::V1Transaction, view_sql::*, workspace_sql::*}; pub use version_1::{app_sql::*, trash_sql::*, v1_impl::V1Transaction, view_sql::*, workspace_sql::*};
use crate::{ use crate::{
module::{WorkspaceDatabase, WorkspaceUser}, module::{WorkspaceDatabase, WorkspaceUser},
services::persistence::{migration::FolderMigration, version_2::v2_impl::FolderEditor}, services::{folder_editor::FolderEditor, persistence::migration::FolderMigration},
}; };
use flowy_core_data_model::entities::{ use flowy_core_data_model::entities::{
app::App, app::App,
@ -22,7 +22,8 @@ use flowy_core_data_model::entities::{
workspace::Workspace, workspace::Workspace,
}; };
use flowy_error::{FlowyError, FlowyResult}; use flowy_error::{FlowyError, FlowyResult};
use flowy_sync::{mk_revision_disk_cache, RevisionCache, RevisionManager, RevisionRecord}; use flowy_sync::{mk_revision_disk_cache, RevisionRecord};
use lib_sqlite::ConnectionPool;
pub const FOLDER_ID: &str = "flowy_folder"; pub const FOLDER_ID: &str = "flowy_folder";
@ -50,16 +51,13 @@ pub trait FolderPersistenceTransaction {
} }
pub struct FolderPersistence { pub struct FolderPersistence {
user: Arc<dyn WorkspaceUser>,
database: Arc<dyn WorkspaceDatabase>, database: Arc<dyn WorkspaceDatabase>,
folder_editor: RwLock<Option<Arc<FolderEditor>>>, folder_editor: Arc<RwLock<Option<Arc<FolderEditor>>>>,
} }
impl FolderPersistence { impl FolderPersistence {
pub fn new(user: Arc<dyn WorkspaceUser>, database: Arc<dyn WorkspaceDatabase>) -> Self { pub fn new(database: Arc<dyn WorkspaceDatabase>, folder_editor: Arc<RwLock<Option<Arc<FolderEditor>>>>) -> Self {
let folder_editor = RwLock::new(None);
Self { Self {
user,
database, database,
folder_editor, folder_editor,
} }
@ -89,21 +87,17 @@ impl FolderPersistence {
conn.immediate_transaction::<_, FlowyError, _>(|| f(Box::new(V1Transaction(&conn)))) conn.immediate_transaction::<_, FlowyError, _>(|| f(Box::new(V1Transaction(&conn))))
} }
pub fn begin_transaction<F, O>(&self, f: F) -> FlowyResult<O> pub async fn begin_transaction<F, O>(&self, f: F) -> FlowyResult<O>
where where
F: FnOnce(Arc<dyn FolderPersistenceTransaction>) -> FlowyResult<O>, F: FnOnce(Arc<dyn FolderPersistenceTransaction>) -> FlowyResult<O>,
{ {
match self.folder_editor.read().clone() { match self.folder_editor.read().await.clone() {
None => { None => Err(FlowyError::internal().context("FolderEditor should be initialized after user login in.")),
tracing::error!("FolderEditor should be initialized after user login in.");
let editor = futures::executor::block_on(async { self.init_folder_editor().await })?;
f(editor)
},
Some(editor) => f(editor), Some(editor) => f(editor),
} }
} }
pub fn user_did_logout(&self) { *self.folder_editor.write() = None; } pub fn db_pool(&self) -> FlowyResult<Arc<ConnectionPool>> { self.database.db_pool() }
pub async fn initialize(&self, user_id: &str) -> FlowyResult<()> { pub async fn initialize(&self, user_id: &str) -> FlowyResult<()> {
let migrations = FolderMigration::new(user_id, self.database.clone()); let migrations = FolderMigration::new(user_id, self.database.clone());
@ -112,20 +106,9 @@ impl FolderPersistence {
self.save_folder(user_id, migrated_folder).await?; self.save_folder(user_id, migrated_folder).await?;
} }
let _ = self.init_folder_editor().await?;
Ok(()) Ok(())
} }
async fn init_folder_editor(&self) -> FlowyResult<Arc<FolderEditor>> {
let user_id = self.user.user_id()?;
let token = self.user.token()?;
let pool = self.database.db_pool()?;
let folder_editor = FolderEditor::new(&user_id, &token, pool).await?;
let editor = Arc::new(folder_editor);
*self.folder_editor.write() = Some(editor.clone());
Ok(editor)
}
pub async fn save_folder(&self, user_id: &str, folder: FolderPad) -> FlowyResult<()> { pub async fn save_folder(&self, user_id: &str, folder: FolderPad) -> FlowyResult<()> {
let pool = self.database.db_pool()?; let pool = self.database.db_pool()?;
let delta_data = folder.delta().to_bytes(); let delta_data = folder.delta().to_bytes();

View File

@ -1,81 +1,32 @@
use crate::services::persistence::{ use crate::services::{
AppChangeset, folder_editor::FolderEditor,
FolderPersistenceTransaction, persistence::{AppChangeset, FolderPersistenceTransaction, ViewChangeset, WorkspaceChangeset},
ViewChangeset,
WorkspaceChangeset,
FOLDER_ID,
};
use flowy_collaboration::{
entities::revision::Revision,
folder::{FolderChange, FolderPad},
}; };
use flowy_core_data_model::entities::{ use flowy_core_data_model::entities::{
app::App, app::App,
prelude::{RepeatedTrash, Trash, View, Workspace}, prelude::{RepeatedTrash, Trash, View, Workspace},
}; };
use flowy_error::{FlowyError, FlowyResult}; use flowy_error::{FlowyError, FlowyResult};
use flowy_sync::{RevisionCache, RevisionCloudService, RevisionManager, RevisionObjectBuilder};
use lib_infra::future::FutureResult;
use lib_sqlite::ConnectionPool;
use parking_lot::RwLock;
use std::sync::Arc; use std::sync::Arc;
pub struct FolderEditor {
user_id: String,
folder_pad: Arc<RwLock<FolderPad>>,
rev_manager: Arc<RevisionManager>,
}
impl FolderEditor {
pub async fn new(user_id: &str, token: &str, pool: Arc<ConnectionPool>) -> FlowyResult<Self> {
let cache = Arc::new(RevisionCache::new(user_id, FOLDER_ID, pool));
let mut rev_manager = RevisionManager::new(user_id, FOLDER_ID, cache);
let cloud = Arc::new(FolderRevisionCloudServiceImpl {
token: token.to_string(),
});
let folder_pad = Arc::new(RwLock::new(rev_manager.load::<FolderPadBuilder>(cloud).await?));
let rev_manager = Arc::new(rev_manager);
let user_id = user_id.to_owned();
Ok(Self {
user_id,
folder_pad,
rev_manager,
})
}
fn apply_change(&self, change: FolderChange) -> FlowyResult<()> {
let FolderChange { delta, md5 } = change;
let (base_rev_id, rev_id) = self.rev_manager.next_rev_id_pair();
let delta_data = delta.to_bytes();
let revision = Revision::new(
&self.rev_manager.object_id,
base_rev_id,
rev_id,
delta_data,
&self.user_id,
md5,
);
let _ = futures::executor::block_on(async { self.rev_manager.add_local_revision(&revision).await })?;
Ok(())
}
}
impl FolderPersistenceTransaction for FolderEditor { impl FolderPersistenceTransaction for FolderEditor {
fn create_workspace(&self, _user_id: &str, workspace: Workspace) -> FlowyResult<()> { fn create_workspace(&self, _user_id: &str, workspace: Workspace) -> FlowyResult<()> {
if let Some(change) = self.folder_pad.write().create_workspace(workspace)? { if let Some(change) = self.folder.write().create_workspace(workspace)? {
let _ = self.apply_change(change)?; let _ = self.apply_change(change)?;
} }
Ok(()) Ok(())
} }
fn read_workspaces(&self, _user_id: &str, workspace_id: Option<String>) -> FlowyResult<Vec<Workspace>> { fn read_workspaces(&self, _user_id: &str, workspace_id: Option<String>) -> FlowyResult<Vec<Workspace>> {
let workspaces = self.folder_pad.read().read_workspaces(workspace_id)?; let workspaces = self.folder.read().read_workspaces(workspace_id)?;
Ok(workspaces) Ok(workspaces)
} }
fn update_workspace(&self, changeset: WorkspaceChangeset) -> FlowyResult<()> { fn update_workspace(&self, changeset: WorkspaceChangeset) -> FlowyResult<()> {
if let Some(change) = self if let Some(change) = self
.folder_pad .folder
.write() .write()
.update_workspace(&changeset.id, changeset.name, changeset.desc)? .update_workspace(&changeset.id, changeset.name, changeset.desc)?
{ {
@ -85,14 +36,14 @@ impl FolderPersistenceTransaction for FolderEditor {
} }
fn delete_workspace(&self, workspace_id: &str) -> FlowyResult<()> { fn delete_workspace(&self, workspace_id: &str) -> FlowyResult<()> {
if let Some(change) = self.folder_pad.write().delete_workspace(workspace_id)? { if let Some(change) = self.folder.write().delete_workspace(workspace_id)? {
let _ = self.apply_change(change)?; let _ = self.apply_change(change)?;
} }
Ok(()) Ok(())
} }
fn create_app(&self, app: App) -> FlowyResult<()> { fn create_app(&self, app: App) -> FlowyResult<()> {
if let Some(change) = self.folder_pad.write().create_app(app)? { if let Some(change) = self.folder.write().create_app(app)? {
let _ = self.apply_change(change)?; let _ = self.apply_change(change)?;
} }
Ok(()) Ok(())
@ -100,7 +51,7 @@ impl FolderPersistenceTransaction for FolderEditor {
fn update_app(&self, changeset: AppChangeset) -> FlowyResult<()> { fn update_app(&self, changeset: AppChangeset) -> FlowyResult<()> {
if let Some(change) = self if let Some(change) = self
.folder_pad .folder
.write() .write()
.update_app(&changeset.id, changeset.name, changeset.desc)? .update_app(&changeset.id, changeset.name, changeset.desc)?
{ {
@ -110,12 +61,12 @@ impl FolderPersistenceTransaction for FolderEditor {
} }
fn read_app(&self, app_id: &str) -> FlowyResult<App> { fn read_app(&self, app_id: &str) -> FlowyResult<App> {
let app = self.folder_pad.read().read_app(app_id)?; let app = self.folder.read().read_app(app_id)?;
Ok(app) Ok(app)
} }
fn read_workspace_apps(&self, workspace_id: &str) -> FlowyResult<Vec<App>> { fn read_workspace_apps(&self, workspace_id: &str) -> FlowyResult<Vec<App>> {
let workspaces = self.folder_pad.read().read_workspaces(Some(workspace_id.to_owned()))?; let workspaces = self.folder.read().read_workspaces(Some(workspace_id.to_owned()))?;
match workspaces.first() { match workspaces.first() {
None => { None => {
Err(FlowyError::record_not_found().context(format!("can't find workspace with id {}", workspace_id))) Err(FlowyError::record_not_found().context(format!("can't find workspace with id {}", workspace_id)))
@ -125,92 +76,68 @@ impl FolderPersistenceTransaction for FolderEditor {
} }
fn delete_app(&self, app_id: &str) -> FlowyResult<App> { fn delete_app(&self, app_id: &str) -> FlowyResult<App> {
let app = self.folder_pad.read().read_app(app_id)?; let app = self.folder.read().read_app(app_id)?;
if let Some(change) = self.folder_pad.write().delete_app(app_id)? { if let Some(change) = self.folder.write().delete_app(app_id)? {
let _ = self.apply_change(change)?; let _ = self.apply_change(change)?;
} }
Ok(app) Ok(app)
} }
fn create_view(&self, view: View) -> FlowyResult<()> { fn create_view(&self, view: View) -> FlowyResult<()> {
if let Some(change) = self.folder_pad.write().create_view(view)? { if let Some(change) = self.folder.write().create_view(view)? {
let _ = self.apply_change(change)?; let _ = self.apply_change(change)?;
} }
Ok(()) Ok(())
} }
fn read_view(&self, view_id: &str) -> FlowyResult<View> { fn read_view(&self, view_id: &str) -> FlowyResult<View> {
let view = self.folder_pad.read().read_view(view_id)?; let view = self.folder.read().read_view(view_id)?;
Ok(view) Ok(view)
} }
fn read_views(&self, belong_to_id: &str) -> FlowyResult<Vec<View>> { fn read_views(&self, belong_to_id: &str) -> FlowyResult<Vec<View>> {
let views = self.folder_pad.read().read_views(belong_to_id)?; let views = self.folder.read().read_views(belong_to_id)?;
Ok(views) Ok(views)
} }
fn update_view(&self, changeset: ViewChangeset) -> FlowyResult<()> { fn update_view(&self, changeset: ViewChangeset) -> FlowyResult<()> {
if let Some(change) = self.folder_pad.write().update_view( if let Some(change) =
&changeset.id, self.folder
changeset.name, .write()
changeset.desc, .update_view(&changeset.id, changeset.name, changeset.desc, changeset.modified_time)?
changeset.modified_time, {
)? {
let _ = self.apply_change(change)?; let _ = self.apply_change(change)?;
} }
Ok(()) Ok(())
} }
fn delete_view(&self, view_id: &str) -> FlowyResult<()> { fn delete_view(&self, view_id: &str) -> FlowyResult<()> {
if let Some(change) = self.folder_pad.write().delete_view(view_id)? { if let Some(change) = self.folder.write().delete_view(view_id)? {
let _ = self.apply_change(change)?; let _ = self.apply_change(change)?;
} }
Ok(()) Ok(())
} }
fn create_trash(&self, trashes: Vec<Trash>) -> FlowyResult<()> { fn create_trash(&self, trashes: Vec<Trash>) -> FlowyResult<()> {
if let Some(change) = self.folder_pad.write().create_trash(trashes)? { if let Some(change) = self.folder.write().create_trash(trashes)? {
let _ = self.apply_change(change)?; let _ = self.apply_change(change)?;
} }
Ok(()) Ok(())
} }
fn read_trash(&self, trash_id: Option<String>) -> FlowyResult<RepeatedTrash> { fn read_trash(&self, trash_id: Option<String>) -> FlowyResult<RepeatedTrash> {
let trash = self.folder_pad.read().read_trash(trash_id)?; let trash = self.folder.read().read_trash(trash_id)?;
Ok(RepeatedTrash { items: trash }) Ok(RepeatedTrash { items: trash })
} }
fn delete_trash(&self, trash_ids: Option<Vec<String>>) -> FlowyResult<()> { fn delete_trash(&self, trash_ids: Option<Vec<String>>) -> FlowyResult<()> {
if let Some(change) = self.folder_pad.write().delete_trash(trash_ids)? { if let Some(change) = self.folder.write().delete_trash(trash_ids)? {
let _ = self.apply_change(change)?; let _ = self.apply_change(change)?;
} }
Ok(()) Ok(())
} }
} }
struct FolderPadBuilder();
impl RevisionObjectBuilder for FolderPadBuilder {
type Output = FolderPad;
fn build_with_revisions(_object_id: &str, revisions: Vec<Revision>) -> FlowyResult<Self::Output> {
let pad = FolderPad::from_revisions(revisions)?;
Ok(pad)
}
}
struct FolderRevisionCloudServiceImpl {
#[allow(dead_code)]
token: String,
// server: Arc<dyn FolderCouldServiceV2>,
}
impl RevisionCloudService for FolderRevisionCloudServiceImpl {
#[tracing::instrument(level = "debug", skip(self))]
fn fetch_object(&self, _user_id: &str, _object_id: &str) -> FutureResult<Vec<Revision>, FlowyError> {
FutureResult::new(async move { Ok(vec![]) })
}
}
impl<T> FolderPersistenceTransaction for Arc<T> impl<T> FolderPersistenceTransaction for Arc<T>
where where
T: FolderPersistenceTransaction + ?Sized, T: FolderPersistenceTransaction + ?Sized,

View File

@ -34,7 +34,9 @@ impl TrashController {
#[tracing::instrument(level = "debug", skip(self), fields(putback) err)] #[tracing::instrument(level = "debug", skip(self), fields(putback) err)]
pub async fn putback(&self, trash_id: &str) -> FlowyResult<()> { pub async fn putback(&self, trash_id: &str) -> FlowyResult<()> {
let (tx, mut rx) = mpsc::channel::<FlowyResult<()>>(1); let (tx, mut rx) = mpsc::channel::<FlowyResult<()>>(1);
let trash = self.persistence.begin_transaction(|transaction| { let trash = self
.persistence
.begin_transaction(|transaction| {
let mut repeated_trash = transaction.read_trash(Some(trash_id.to_owned()))?; let mut repeated_trash = transaction.read_trash(Some(trash_id.to_owned()))?;
let _ = transaction.delete_trash(Some(vec![trash_id.to_owned()]))?; let _ = transaction.delete_trash(Some(vec![trash_id.to_owned()]))?;
notify_trash_changed(transaction.read_trash(None)?); notify_trash_changed(transaction.read_trash(None)?);
@ -43,7 +45,8 @@ impl TrashController {
return Err(FlowyError::internal().context("Try to put back trash is not exists")); return Err(FlowyError::internal().context("Try to put back trash is not exists"));
} }
Ok(repeated_trash.pop().unwrap()) Ok(repeated_trash.pop().unwrap())
})?; })
.await?;
let identifier = TrashId { let identifier = TrashId {
id: trash.id, id: trash.id,
@ -63,11 +66,14 @@ impl TrashController {
#[tracing::instrument(level = "debug", skip(self) err)] #[tracing::instrument(level = "debug", skip(self) err)]
pub async fn restore_all(&self) -> FlowyResult<()> { pub async fn restore_all(&self) -> FlowyResult<()> {
let repeated_trash = self.persistence.begin_transaction(|transaction| { let repeated_trash = self
.persistence
.begin_transaction(|transaction| {
let trash = transaction.read_trash(None); let trash = transaction.read_trash(None);
let _ = transaction.delete_trash(None); let _ = transaction.delete_trash(None);
trash trash
})?; })
.await?;
let identifiers: RepeatedTrashId = repeated_trash.items.clone().into(); let identifiers: RepeatedTrashId = repeated_trash.items.clone().into();
let (tx, mut rx) = mpsc::channel::<FlowyResult<()>>(1); let (tx, mut rx) = mpsc::channel::<FlowyResult<()>>(1);
@ -83,7 +89,8 @@ impl TrashController {
pub async fn delete_all(&self) -> FlowyResult<()> { pub async fn delete_all(&self) -> FlowyResult<()> {
let repeated_trash = self let repeated_trash = self
.persistence .persistence
.begin_transaction(|transaction| transaction.read_trash(None))?; .begin_transaction(|transaction| transaction.read_trash(None))
.await?;
let trash_identifiers: RepeatedTrashId = repeated_trash.items.clone().into(); let trash_identifiers: RepeatedTrashId = repeated_trash.items.clone().into();
let _ = self.delete_with_identifiers(trash_identifiers.clone()).await?; let _ = self.delete_with_identifiers(trash_identifiers.clone()).await?;
@ -97,7 +104,8 @@ impl TrashController {
let _ = self.delete_with_identifiers(trash_identifiers.clone()).await?; let _ = self.delete_with_identifiers(trash_identifiers.clone()).await?;
let repeated_trash = self let repeated_trash = self
.persistence .persistence
.begin_transaction(|transaction| transaction.read_trash(None))?; .begin_transaction(|transaction| transaction.read_trash(None))
.await?;
notify_trash_changed(repeated_trash); notify_trash_changed(repeated_trash);
let _ = self.delete_trash_on_server(trash_identifiers)?; let _ = self.delete_trash_on_server(trash_identifiers)?;
@ -117,14 +125,17 @@ impl TrashController {
Err(e) => log::error!("{}", e), Err(e) => log::error!("{}", e),
}, },
} }
let _ = self.persistence.begin_transaction(|transaction| { let _ = self
.persistence
.begin_transaction(|transaction| {
let ids = trash_identifiers let ids = trash_identifiers
.items .items
.into_iter() .into_iter()
.map(|item| item.id) .map(|item| item.id)
.collect::<Vec<_>>(); .collect::<Vec<_>>();
transaction.delete_trash(Some(ids)) transaction.delete_trash(Some(ids))
})?; })
.await?;
Ok(()) Ok(())
} }
@ -153,12 +164,15 @@ impl TrashController {
.as_str(), .as_str(),
); );
let _ = self.persistence.begin_transaction(|transaction| { let _ = self
.persistence
.begin_transaction(|transaction| {
let _ = transaction.create_trash(repeated_trash.clone())?; let _ = transaction.create_trash(repeated_trash.clone())?;
let _ = self.create_trash_on_server(repeated_trash); let _ = self.create_trash_on_server(repeated_trash);
notify_trash_changed(transaction.read_trash(None)?); notify_trash_changed(transaction.read_trash(None)?);
Ok(()) Ok(())
})?; })
.await?;
let _ = self.notify.send(TrashEvent::NewTrash(identifiers.into(), tx)); let _ = self.notify.send(TrashEvent::NewTrash(identifiers.into(), tx));
let _ = rx.recv().await.unwrap()?; let _ = rx.recv().await.unwrap()?;
@ -167,10 +181,11 @@ impl TrashController {
pub fn subscribe(&self) -> broadcast::Receiver<TrashEvent> { self.notify.subscribe() } pub fn subscribe(&self) -> broadcast::Receiver<TrashEvent> { self.notify.subscribe() }
pub fn read_trash(&self) -> Result<RepeatedTrash, FlowyError> { pub async fn read_trash(&self) -> Result<RepeatedTrash, FlowyError> {
let repeated_trash = self let repeated_trash = self
.persistence .persistence
.begin_transaction(|transaction| transaction.read_trash(None))?; .begin_transaction(|transaction| transaction.read_trash(None))
.await?;
let _ = self.read_trash_on_server()?; let _ = self.read_trash_on_server()?;
Ok(repeated_trash) Ok(repeated_trash)
} }
@ -229,10 +244,12 @@ impl TrashController {
match server.read_trash(&token).await { match server.read_trash(&token).await {
Ok(repeated_trash) => { Ok(repeated_trash) => {
tracing::debug!("Remote trash count: {}", repeated_trash.items.len()); tracing::debug!("Remote trash count: {}", repeated_trash.items.len());
let result = persistence.begin_transaction(|transaction| { let result = persistence
.begin_transaction(|transaction| {
let _ = transaction.create_trash(repeated_trash.items.clone())?; let _ = transaction.create_trash(repeated_trash.items.clone())?;
transaction.read_trash(None) transaction.read_trash(None)
}); })
.await;
match result { match result {
Ok(repeated_trash) => { Ok(repeated_trash) => {

View File

@ -10,7 +10,7 @@ use std::sync::Arc;
pub(crate) async fn read_trash_handler( pub(crate) async fn read_trash_handler(
controller: Unit<Arc<TrashController>>, controller: Unit<Arc<TrashController>>,
) -> DataResult<RepeatedTrash, FlowyError> { ) -> DataResult<RepeatedTrash, FlowyError> {
let repeated_trash = controller.read_trash()?; let repeated_trash = controller.read_trash().await?;
data_result(repeated_trash) data_result(repeated_trash)
} }

View File

@ -106,36 +106,43 @@ impl ViewController {
pub(crate) async fn create_view_on_local(&self, view: View) -> Result<(), FlowyError> { pub(crate) async fn create_view_on_local(&self, view: View) -> Result<(), FlowyError> {
let trash_controller = self.trash_controller.clone(); let trash_controller = self.trash_controller.clone();
self.persistence.begin_transaction(|transaction| { self.persistence
.begin_transaction(|transaction| {
let belong_to_id = view.belong_to_id.clone(); let belong_to_id = view.belong_to_id.clone();
let _ = transaction.create_view(view)?; let _ = transaction.create_view(view)?;
let _ = notify_views_changed(&belong_to_id, trash_controller, &transaction)?; let _ = notify_views_changed(&belong_to_id, trash_controller, &transaction)?;
Ok(()) Ok(())
}) })
.await
} }
#[tracing::instrument(skip(self, params), fields(view_id = %params.view_id), err)] #[tracing::instrument(skip(self, params), fields(view_id = %params.view_id), err)]
pub(crate) async fn read_view(&self, params: ViewId) -> Result<View, FlowyError> { pub(crate) async fn read_view(&self, params: ViewId) -> Result<View, FlowyError> {
let view = self.persistence.begin_transaction(|transaction| { let view = self
.persistence
.begin_transaction(|transaction| {
let view = transaction.read_view(&params.view_id)?; let view = transaction.read_view(&params.view_id)?;
let trash_ids = self.trash_controller.read_trash_ids(&transaction)?; let trash_ids = self.trash_controller.read_trash_ids(&transaction)?;
if trash_ids.contains(&view.id) { if trash_ids.contains(&view.id) {
return Err(FlowyError::record_not_found()); return Err(FlowyError::record_not_found());
} }
Ok(view) Ok(view)
})?; })
.await?;
let _ = self.read_view_on_server(params); let _ = self.read_view_on_server(params);
Ok(view) Ok(view)
} }
pub(crate) fn read_local_views(&self, ids: Vec<String>) -> Result<Vec<View>, FlowyError> { pub(crate) async fn read_local_views(&self, ids: Vec<String>) -> Result<Vec<View>, FlowyError> {
self.persistence.begin_transaction(|transaction| { self.persistence
.begin_transaction(|transaction| {
let mut views = vec![]; let mut views = vec![];
for view_id in ids { for view_id in ids {
views.push(transaction.read_view(&view_id)?); views.push(transaction.read_view(&view_id)?);
} }
Ok(views) Ok(views)
}) })
.await
} }
#[tracing::instrument(level = "debug", skip(self), err)] #[tracing::instrument(level = "debug", skip(self), err)]
@ -170,7 +177,8 @@ impl ViewController {
pub(crate) async fn duplicate_view(&self, doc_id: &str) -> Result<(), FlowyError> { pub(crate) async fn duplicate_view(&self, doc_id: &str) -> Result<(), FlowyError> {
let view = self let view = self
.persistence .persistence
.begin_transaction(|transaction| transaction.read_view(doc_id))?; .begin_transaction(|transaction| transaction.read_view(doc_id))
.await?;
let editor = self.document_ctx.controller.open_document(doc_id).await?; let editor = self.document_ctx.controller.open_document(doc_id).await?;
let document_json = editor.document_json().await?; let document_json = editor.document_json().await?;
@ -201,16 +209,20 @@ impl ViewController {
// belong_to_id will be the app_id or view_id. // belong_to_id will be the app_id or view_id.
#[tracing::instrument(level = "debug", skip(self), err)] #[tracing::instrument(level = "debug", skip(self), err)]
pub(crate) async fn read_views_belong_to(&self, belong_to_id: &str) -> Result<RepeatedView, FlowyError> { pub(crate) async fn read_views_belong_to(&self, belong_to_id: &str) -> Result<RepeatedView, FlowyError> {
self.persistence.begin_transaction(|transaction| { self.persistence
.begin_transaction(|transaction| {
read_belonging_views_on_local(belong_to_id, self.trash_controller.clone(), &transaction) read_belonging_views_on_local(belong_to_id, self.trash_controller.clone(), &transaction)
}) })
.await
} }
#[tracing::instrument(level = "debug", skip(self, params), err)] #[tracing::instrument(level = "debug", skip(self, params), err)]
pub(crate) async fn update_view(&self, params: UpdateViewParams) -> Result<View, FlowyError> { pub(crate) async fn update_view(&self, params: UpdateViewParams) -> Result<View, FlowyError> {
let changeset = ViewChangeset::new(params.clone()); let changeset = ViewChangeset::new(params.clone());
let view_id = changeset.id.clone(); let view_id = changeset.id.clone();
let view = self.persistence.begin_transaction(|transaction| { let view = self
.persistence
.begin_transaction(|transaction| {
let _ = transaction.update_view(changeset)?; let _ = transaction.update_view(changeset)?;
let view = transaction.read_view(&view_id)?; let view = transaction.read_view(&view_id)?;
send_dart_notification(&view_id, WorkspaceNotification::ViewUpdated) send_dart_notification(&view_id, WorkspaceNotification::ViewUpdated)
@ -218,7 +230,8 @@ impl ViewController {
.send(); .send();
let _ = notify_views_changed(&view.belong_to_id, self.trash_controller.clone(), &transaction)?; let _ = notify_views_changed(&view.belong_to_id, self.trash_controller.clone(), &transaction)?;
Ok(view) Ok(view)
})?; })
.await?;
let _ = self.update_view_on_server(params); let _ = self.update_view_on_server(params);
Ok(view) Ok(view)
@ -229,13 +242,14 @@ impl ViewController {
Ok(doc) Ok(doc)
} }
pub(crate) fn latest_visit_view(&self) -> FlowyResult<Option<View>> { pub(crate) async fn latest_visit_view(&self) -> FlowyResult<Option<View>> {
match KV::get_str(LATEST_VIEW_ID) { match KV::get_str(LATEST_VIEW_ID) {
None => Ok(None), None => Ok(None),
Some(view_id) => { Some(view_id) => {
let view = self let view = self
.persistence .persistence
.begin_transaction(|transaction| transaction.read_view(&view_id))?; .begin_transaction(|transaction| transaction.read_view(&view_id))
.await?;
Ok(Some(view)) Ok(Some(view))
}, },
} }
@ -277,7 +291,10 @@ impl ViewController {
tokio::spawn(async move { tokio::spawn(async move {
match server.read_view(&token, params).await { match server.read_view(&token, params).await {
Ok(Some(view)) => { Ok(Some(view)) => {
match persistence.begin_transaction(|transaction| transaction.create_view(view.clone())) { match persistence
.begin_transaction(|transaction| transaction.create_view(view.clone()))
.await
{
Ok(_) => { Ok(_) => {
send_dart_notification(&view.id, WorkspaceNotification::ViewUpdated) send_dart_notification(&view.id, WorkspaceNotification::ViewUpdated)
.payload(view.clone()) .payload(view.clone())
@ -324,29 +341,34 @@ async fn handle_trash_event(
) { ) {
match event { match event {
TrashEvent::NewTrash(identifiers, ret) => { TrashEvent::NewTrash(identifiers, ret) => {
let result = persistence.begin_transaction(|transaction| { let result = persistence
.begin_transaction(|transaction| {
let views = read_local_views_with_transaction(identifiers, &transaction)?; let views = read_local_views_with_transaction(identifiers, &transaction)?;
for view in views { for view in views {
let _ = notify_views_changed(&view.belong_to_id, trash_can.clone(), &transaction)?; let _ = notify_views_changed(&view.belong_to_id, trash_can.clone(), &transaction)?;
notify_dart(view, WorkspaceNotification::ViewDeleted); notify_dart(view, WorkspaceNotification::ViewDeleted);
} }
Ok(()) Ok(())
}); })
.await;
let _ = ret.send(result).await; let _ = ret.send(result).await;
}, },
TrashEvent::Putback(identifiers, ret) => { TrashEvent::Putback(identifiers, ret) => {
let result = persistence.begin_transaction(|transaction| { let result = persistence
.begin_transaction(|transaction| {
let views = read_local_views_with_transaction(identifiers, &transaction)?; let views = read_local_views_with_transaction(identifiers, &transaction)?;
for view in views { for view in views {
let _ = notify_views_changed(&view.belong_to_id, trash_can.clone(), &transaction)?; let _ = notify_views_changed(&view.belong_to_id, trash_can.clone(), &transaction)?;
notify_dart(view, WorkspaceNotification::ViewRestored); notify_dart(view, WorkspaceNotification::ViewRestored);
} }
Ok(()) Ok(())
}); })
.await;
let _ = ret.send(result).await; let _ = ret.send(result).await;
}, },
TrashEvent::Delete(identifiers, ret) => { TrashEvent::Delete(identifiers, ret) => {
let result = persistence.begin_transaction(|transaction| { let result = persistence
.begin_transaction(|transaction| {
let mut notify_ids = HashSet::new(); let mut notify_ids = HashSet::new();
for identifier in identifiers.items { for identifier in identifiers.items {
let view = transaction.read_view(&identifier.id)?; let view = transaction.read_view(&identifier.id)?;
@ -360,7 +382,8 @@ async fn handle_trash_event(
} }
Ok(()) Ok(())
}); })
.await;
let _ = ret.send(result).await; let _ = ret.send(result).await;
}, },
} }

View File

@ -72,7 +72,8 @@ pub(crate) async fn delete_view_handler(
} }
let trash = view_controller let trash = view_controller
.read_local_views(params.items)? .read_local_views(params.items)
.await?
.into_iter() .into_iter()
.map(|view| view.into()) .map(|view| view.into())
.collect::<Vec<Trash>>(); .collect::<Vec<Trash>>();

View File

@ -0,0 +1,85 @@
use crate::services::persistence::FOLDER_ID;
use bytes::Bytes;
use flowy_collaboration::{
entities::{
revision::RevisionRange,
ws_data::{ClientRevisionWSData, NewDocumentUser, ServerRevisionWSDataType},
},
folder::FolderPad,
};
use flowy_error::FlowyError;
use flowy_sync::{
CompositeWSSinkDataProvider,
RevisionManager,
RevisionWSSinkDataProvider,
RevisionWSSteamConsumer,
RevisionWebSocket,
RevisionWebSocketManager,
};
use lib_infra::future::FutureResult;
use parking_lot::RwLock;
use std::{sync::Arc, time::Duration};
pub(crate) async fn make_folder_ws_manager(
rev_manager: Arc<RevisionManager>,
web_socket: Arc<dyn RevisionWebSocket>,
folder_pad: Arc<RwLock<FolderPad>>,
) -> Arc<RevisionWebSocketManager> {
let object_id = FOLDER_ID;
let composite_sink_provider = Arc::new(CompositeWSSinkDataProvider::new(object_id, rev_manager.clone()));
let ws_stream_consumer = Arc::new(FolderWSStreamConsumerAdapter {
object_id: object_id.to_string(),
folder_pad,
rev_manager,
sink_provider: composite_sink_provider.clone(),
});
let sink_provider = Arc::new(FolderWSSinkDataProviderAdapter(composite_sink_provider));
let ping_duration = Duration::from_millis(2000);
Arc::new(RevisionWebSocketManager::new(
object_id,
web_socket,
sink_provider,
ws_stream_consumer,
ping_duration,
))
}
pub(crate) struct FolderWSSinkDataProviderAdapter(Arc<CompositeWSSinkDataProvider>);
impl RevisionWSSinkDataProvider for FolderWSSinkDataProviderAdapter {
fn next(&self) -> FutureResult<Option<ClientRevisionWSData>, FlowyError> {
let sink_provider = self.0.clone();
FutureResult::new(async move { sink_provider.next().await })
}
}
struct FolderWSStreamConsumerAdapter {
object_id: String,
folder_pad: Arc<RwLock<FolderPad>>,
rev_manager: Arc<RevisionManager>,
sink_provider: Arc<CompositeWSSinkDataProvider>,
}
impl RevisionWSSteamConsumer for FolderWSStreamConsumerAdapter {
fn receive_push_revision(&self, bytes: Bytes) -> FutureResult<(), FlowyError> { todo!() }
fn receive_ack(&self, id: String, ty: ServerRevisionWSDataType) -> FutureResult<(), FlowyError> {
let sink_provider = self.sink_provider.clone();
FutureResult::new(async move { sink_provider.ack_data(id, ty).await })
}
fn receive_new_user_connect(&self, _new_user: NewDocumentUser) -> FutureResult<(), FlowyError> {
FutureResult::new(async move { Ok(()) })
}
fn pull_revisions_in_range(&self, range: RevisionRange) -> FutureResult<(), FlowyError> {
let rev_manager = self.rev_manager.clone();
let sink_provider = self.sink_provider.clone();
let object_id = self.object_id.clone();
FutureResult::new(async move {
let revisions = rev_manager.get_revisions_in_range(range).await?;
let data = ClientRevisionWSData::from_revisions(&object_id, revisions);
sink_provider.push_data(data).await;
Ok(())
})
}
}

View File

@ -41,10 +41,13 @@ impl WorkspaceController {
let workspace = self.create_workspace_on_server(params.clone()).await?; let workspace = self.create_workspace_on_server(params.clone()).await?;
let user_id = self.user.user_id()?; let user_id = self.user.user_id()?;
let token = self.user.token()?; let token = self.user.token()?;
let workspaces = self.persistence.begin_transaction(|transaction| { let workspaces = self
.persistence
.begin_transaction(|transaction| {
let _ = transaction.create_workspace(&user_id, workspace.clone())?; let _ = transaction.create_workspace(&user_id, workspace.clone())?;
transaction.read_workspaces(&user_id, None) transaction.read_workspaces(&user_id, None)
})?; })
.await?;
let repeated_workspace = RepeatedWorkspace { items: workspaces }; let repeated_workspace = RepeatedWorkspace { items: workspaces };
send_dart_notification(&token, WorkspaceNotification::UserCreateWorkspace) send_dart_notification(&token, WorkspaceNotification::UserCreateWorkspace)
.payload(repeated_workspace) .payload(repeated_workspace)
@ -57,11 +60,14 @@ impl WorkspaceController {
pub(crate) async fn update_workspace(&self, params: UpdateWorkspaceParams) -> Result<(), FlowyError> { pub(crate) async fn update_workspace(&self, params: UpdateWorkspaceParams) -> Result<(), FlowyError> {
let changeset = WorkspaceChangeset::new(params.clone()); let changeset = WorkspaceChangeset::new(params.clone());
let workspace_id = changeset.id.clone(); let workspace_id = changeset.id.clone();
let workspace = self.persistence.begin_transaction(|transaction| { let workspace = self
.persistence
.begin_transaction(|transaction| {
let _ = transaction.update_workspace(changeset)?; let _ = transaction.update_workspace(changeset)?;
let user_id = self.user.user_id()?; let user_id = self.user.user_id()?;
self.read_local_workspace(workspace_id.clone(), &user_id, &transaction) self.read_local_workspace(workspace_id.clone(), &user_id, &transaction)
})?; })
.await?;
send_dart_notification(&workspace_id, WorkspaceNotification::WorkspaceUpdated) send_dart_notification(&workspace_id, WorkspaceNotification::WorkspaceUpdated)
.payload(workspace) .payload(workspace)
@ -75,10 +81,13 @@ impl WorkspaceController {
pub(crate) async fn delete_workspace(&self, workspace_id: &str) -> Result<(), FlowyError> { pub(crate) async fn delete_workspace(&self, workspace_id: &str) -> Result<(), FlowyError> {
let user_id = self.user.user_id()?; let user_id = self.user.user_id()?;
let token = self.user.token()?; let token = self.user.token()?;
let repeated_workspace = self.persistence.begin_transaction(|transaction| { let repeated_workspace = self
.persistence
.begin_transaction(|transaction| {
let _ = transaction.delete_workspace(workspace_id)?; let _ = transaction.delete_workspace(workspace_id)?;
self.read_local_workspaces(None, &user_id, &transaction) self.read_local_workspaces(None, &user_id, &transaction)
})?; })
.await?;
send_dart_notification(&token, WorkspaceNotification::UserDeleteWorkspace) send_dart_notification(&token, WorkspaceNotification::UserDeleteWorkspace)
.payload(repeated_workspace) .payload(repeated_workspace)
.send(); .send();
@ -91,7 +100,8 @@ impl WorkspaceController {
if let Some(workspace_id) = params.workspace_id { if let Some(workspace_id) = params.workspace_id {
let workspace = self let workspace = self
.persistence .persistence
.begin_transaction(|transaction| self.read_local_workspace(workspace_id, &user_id, &transaction))?; .begin_transaction(|transaction| self.read_local_workspace(workspace_id, &user_id, &transaction))
.await?;
set_current_workspace(&workspace.id); set_current_workspace(&workspace.id);
Ok(workspace) Ok(workspace)
} else { } else {
@ -101,9 +111,12 @@ impl WorkspaceController {
pub(crate) async fn read_current_workspace_apps(&self) -> Result<RepeatedApp, FlowyError> { pub(crate) async fn read_current_workspace_apps(&self) -> Result<RepeatedApp, FlowyError> {
let workspace_id = get_current_workspace()?; let workspace_id = get_current_workspace()?;
let repeated_app = self.persistence.begin_transaction(|transaction| { let repeated_app = self
.persistence
.begin_transaction(|transaction| {
read_local_workspace_apps(&workspace_id, self.trash_controller.clone(), &transaction) read_local_workspace_apps(&workspace_id, self.trash_controller.clone(), &transaction)
})?; })
.await?;
// TODO: read from server // TODO: read from server
Ok(repeated_app) Ok(repeated_app)
} }

View File

@ -52,15 +52,19 @@ pub(crate) async fn read_workspaces_handler(
let workspace_controller = folder.workspace_controller.clone(); let workspace_controller = folder.workspace_controller.clone();
let trash_controller = folder.trash_controller.clone(); let trash_controller = folder.trash_controller.clone();
let workspaces = folder.persistence.begin_transaction(|transaction| { let workspaces = folder
.persistence
.begin_transaction(|transaction| {
let mut workspaces = let mut workspaces =
workspace_controller.read_local_workspaces(params.workspace_id.clone(), &user_id, &transaction)?; workspace_controller.read_local_workspaces(params.workspace_id.clone(), &user_id, &transaction)?;
for workspace in workspaces.iter_mut() { for workspace in workspaces.iter_mut() {
let apps = read_local_workspace_apps(&workspace.id, trash_controller.clone(), &transaction)?.into_inner(); let apps =
read_local_workspace_apps(&workspace.id, trash_controller.clone(), &transaction)?.into_inner();
workspace.apps.items = apps; workspace.apps.items = apps;
} }
Ok(workspaces) Ok(workspaces)
})?; })
.await?;
let _ = read_workspaces_on_server(folder, user_id, params); let _ = read_workspaces_on_server(folder, user_id, params);
data_result(workspaces) data_result(workspaces)
} }
@ -75,13 +79,16 @@ pub async fn read_cur_workspace_handler(
workspace_id: Some(workspace_id.clone()), workspace_id: Some(workspace_id.clone()),
}; };
let workspace = folder.persistence.begin_transaction(|transaction| { let workspace = folder
.persistence
.begin_transaction(|transaction| {
folder folder
.workspace_controller .workspace_controller
.read_local_workspace(workspace_id, &user_id, &transaction) .read_local_workspace(workspace_id, &user_id, &transaction)
})?; })
.await?;
let latest_view: Option<View> = folder.view_controller.latest_visit_view().unwrap_or(None); let latest_view: Option<View> = folder.view_controller.latest_visit_view().await.unwrap_or(None);
let setting = CurrentWorkspaceSetting { workspace, latest_view }; let setting = CurrentWorkspaceSetting { workspace, latest_view };
let _ = read_workspaces_on_server(folder, user_id, params); let _ = read_workspaces_on_server(folder, user_id, params);
data_result(setting) data_result(setting)
@ -98,7 +105,8 @@ fn read_workspaces_on_server(
tokio::spawn(async move { tokio::spawn(async move {
let workspaces = server.read_workspace(&token, params).await?; let workspaces = server.read_workspace(&token, params).await?;
let _ = persistence.begin_transaction(|transaction| { let _ = persistence
.begin_transaction(|transaction| {
tracing::debug!("Save {} workspace", workspaces.len()); tracing::debug!("Save {} workspace", workspaces.len());
for workspace in &workspaces.items { for workspace in &workspaces.items {
let m_workspace = workspace.clone(); let m_workspace = workspace.clone();
@ -122,7 +130,8 @@ fn read_workspaces_on_server(
} }
} }
Ok(()) Ok(())
})?; })
.await?;
send_dart_notification(&token, WorkspaceNotification::WorkspaceListUpdated) send_dart_notification(&token, WorkspaceNotification::WorkspaceListUpdated)
.payload(workspaces) .payload(workspaces)

View File

@ -97,13 +97,18 @@ impl DocumentController {
} }
pub async fn did_receive_ws_data(&self, data: Bytes) { pub async fn did_receive_ws_data(&self, data: Bytes) {
let data: ServerRevisionWSData = data.try_into().unwrap(); let result: Result<ServerRevisionWSData, protobuf::ProtobufError> = data.try_into();
match self.ws_receivers.get(&data.object_id) { match result {
Ok(data) => match self.ws_receivers.get(&data.object_id) {
None => tracing::error!("Can't find any source handler for {:?}", data.object_id), None => tracing::error!("Can't find any source handler for {:?}", data.object_id),
Some(handler) => match handler.receive_ws_data(data).await { Some(handler) => match handler.receive_ws_data(data).await {
Ok(_) => {}, Ok(_) => {},
Err(e) => tracing::error!("{}", e), Err(e) => tracing::error!("{}", e),
}, },
},
Err(e) => {
tracing::error!("Document ws data parser failed: {:?}", e);
},
} }
} }

View File

@ -7,11 +7,11 @@ use flowy_collaboration::{
util::make_delta_from_revisions, util::make_delta_from_revisions,
}; };
use flowy_error::FlowyError; use flowy_error::FlowyError;
use flowy_sync::RevisionManager; use flowy_sync::{DeltaMD5, RevisionManager, TransformDeltas};
use futures::stream::StreamExt; use futures::stream::StreamExt;
use lib_ot::{ use lib_ot::{
core::{Interval, OperationTransformable}, core::{Interval, OperationTransformable},
rich_text::{RichTextAttribute, RichTextDelta}, rich_text::{RichTextAttribute, RichTextAttributes, RichTextDelta},
}; };
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::{oneshot, RwLock}; use tokio::sync::{oneshot, RwLock};
@ -72,62 +72,36 @@ impl EditorCommandQueue {
let _ = self.save_local_delta(delta, md5).await?; let _ = self.save_local_delta(delta, md5).await?;
let _ = ret.send(Ok(())); let _ = ret.send(Ok(()));
}, },
EditorCommand::ComposeRemoteDelta { EditorCommand::ComposeRemoteDelta { client_delta, ret } => {
revisions,
client_delta,
server_delta,
ret,
} => {
let mut document = self.document.write().await; let mut document = self.document.write().await;
let _ = document.compose_delta(client_delta.clone())?; let _ = document.compose_delta(client_delta.clone())?;
let md5 = document.md5(); let md5 = document.md5();
for revision in &revisions { drop(document);
let _ = self.rev_manager.add_remote_revision(revision).await?; let _ = ret.send(Ok(md5));
}
let (base_rev_id, rev_id) = self.rev_manager.next_rev_id_pair();
let doc_id = self.rev_manager.object_id.clone();
let user_id = self.user.user_id()?;
let (client_revision, server_revision) = make_client_and_server_revision(
&doc_id,
&user_id,
base_rev_id,
rev_id,
client_delta,
Some(server_delta),
md5,
);
let _ = self.rev_manager.add_remote_revision(&client_revision).await?;
let _ = ret.send(Ok(server_revision));
}, },
EditorCommand::OverrideDelta { revisions, delta, ret } => { EditorCommand::ResetDelta { delta, ret } => {
let mut document = self.document.write().await; let mut document = self.document.write().await;
let _ = document.set_delta(delta); let _ = document.set_delta(delta);
let md5 = document.md5(); let md5 = document.md5();
drop(document); drop(document);
let _ = ret.send(Ok(md5));
let repeated_revision = RepeatedRevision::new(revisions);
assert_eq!(repeated_revision.last().unwrap().md5, md5);
let _ = self.rev_manager.reset_object(repeated_revision).await?;
let _ = ret.send(Ok(()));
}, },
EditorCommand::TransformRevision { revisions, ret } => { EditorCommand::TransformDelta { delta, ret } => {
let f = || async { let f = || async {
let new_delta = make_delta_from_revisions(revisions)?;
let read_guard = self.document.read().await; let read_guard = self.document.read().await;
let mut server_prime: Option<RichTextDelta> = None; let mut server_prime: Option<RichTextDelta> = None;
let client_prime: RichTextDelta; let client_prime: RichTextDelta;
// The document is empty if its text is equal to the initial text. // The document is empty if its text is equal to the initial text.
if read_guard.is_empty::<NewlineDoc>() { if read_guard.is_empty::<NewlineDoc>() {
// Do nothing // Do nothing
client_prime = new_delta; client_prime = delta;
} else { } else {
let (s_prime, c_prime) = read_guard.delta().transform(&new_delta)?; let (s_prime, c_prime) = read_guard.delta().transform(&delta)?;
client_prime = c_prime; client_prime = c_prime;
server_prime = Some(s_prime); server_prime = Some(s_prime);
} }
drop(read_guard); drop(read_guard);
Ok::<TransformDeltas, CollaborateError>(TransformDeltas { Ok::<TransformDeltas<RichTextAttributes>, CollaborateError>(TransformDeltas {
client_prime, client_prime,
server_prime, server_prime,
}) })
@ -251,19 +225,16 @@ pub(crate) enum EditorCommand {
ret: Ret<()>, ret: Ret<()>,
}, },
ComposeRemoteDelta { ComposeRemoteDelta {
revisions: Vec<Revision>,
client_delta: RichTextDelta, client_delta: RichTextDelta,
server_delta: RichTextDelta, ret: Ret<DeltaMD5>,
ret: Ret<Option<Revision>>,
}, },
OverrideDelta { ResetDelta {
revisions: Vec<Revision>,
delta: RichTextDelta, delta: RichTextDelta,
ret: Ret<()>, ret: Ret<DeltaMD5>,
}, },
TransformRevision { TransformDelta {
revisions: Vec<Revision>, delta: RichTextDelta,
ret: Ret<TransformDeltas>, ret: Ret<TransformDeltas<RichTextAttributes>>,
}, },
Insert { Insert {
index: usize, index: usize,
@ -310,8 +281,8 @@ impl std::fmt::Debug for EditorCommand {
let s = match self { let s = match self {
EditorCommand::ComposeLocalDelta { .. } => "ComposeLocalDelta", EditorCommand::ComposeLocalDelta { .. } => "ComposeLocalDelta",
EditorCommand::ComposeRemoteDelta { .. } => "ComposeRemoteDelta", EditorCommand::ComposeRemoteDelta { .. } => "ComposeRemoteDelta",
EditorCommand::OverrideDelta { .. } => "OverrideDelta", EditorCommand::ResetDelta { .. } => "ResetDelta",
EditorCommand::TransformRevision { .. } => "TransformRevision", EditorCommand::TransformDelta { .. } => "TransformDelta",
EditorCommand::Insert { .. } => "Insert", EditorCommand::Insert { .. } => "Insert",
EditorCommand::Delete { .. } => "Delete", EditorCommand::Delete { .. } => "Delete",
EditorCommand::Format { .. } => "Format", EditorCommand::Format { .. } => "Format",
@ -326,8 +297,3 @@ impl std::fmt::Debug for EditorCommand {
f.write_str(s) f.write_str(s)
} }
} }
pub(crate) struct TransformDeltas {
pub client_prime: RichTextDelta,
pub server_prime: Option<RichTextDelta>,
}

View File

@ -1,32 +1,37 @@
use crate::{ use crate::{
core::{EditorCommand, TransformDeltas, SYNC_INTERVAL_IN_MILLIS}, core::{EditorCommand, SYNC_INTERVAL_IN_MILLIS},
DocumentWSReceiver, DocumentWSReceiver,
}; };
use async_trait::async_trait; use async_trait::async_trait;
use bytes::Bytes; use bytes::Bytes;
use flowy_collaboration::{ use flowy_collaboration::{
entities::{ entities::{
revision::{RepeatedRevision, Revision, RevisionRange}, revision::RevisionRange,
ws_data::{ClientRevisionWSData, NewDocumentUser, ServerRevisionWSData, ServerRevisionWSDataType}, ws_data::{ClientRevisionWSData, NewDocumentUser, ServerRevisionWSData, ServerRevisionWSDataType},
}, },
errors::CollaborateResult, errors::CollaborateResult,
}; };
use flowy_error::{internal_error, FlowyError, FlowyResult}; use flowy_error::{internal_error, FlowyError};
use flowy_sync::{ use flowy_sync::{
CompositeWSSinkDataProvider,
DeltaMD5,
ResolverTarget,
RevisionConflictResolver,
RevisionManager, RevisionManager,
RevisionWSSinkDataProvider, RevisionWSSinkDataProvider,
RevisionWSSteamConsumer, RevisionWSSteamConsumer,
RevisionWebSocket, RevisionWebSocket,
RevisionWebSocketManager, RevisionWebSocketManager,
TransformDeltas,
}; };
use lib_infra::future::FutureResult; use lib_infra::future::{BoxResultFuture, FutureResult};
use lib_ot::{core::Delta, rich_text::RichTextAttributes};
use lib_ws::WSConnectState; use lib_ws::WSConnectState;
use std::{collections::VecDeque, convert::TryFrom, sync::Arc, time::Duration}; use std::{sync::Arc, time::Duration};
use tokio::sync::{ use tokio::sync::{
broadcast, broadcast,
mpsc::{Receiver, Sender}, mpsc::{Receiver, Sender},
oneshot, oneshot,
RwLock,
}; };
pub(crate) type EditorCommandSender = Sender<EditorCommand>; pub(crate) type EditorCommandSender = Sender<EditorCommand>;
@ -39,19 +44,25 @@ pub(crate) async fn make_document_ws_manager(
rev_manager: Arc<RevisionManager>, rev_manager: Arc<RevisionManager>,
web_socket: Arc<dyn RevisionWebSocket>, web_socket: Arc<dyn RevisionWebSocket>,
) -> Arc<RevisionWebSocketManager> { ) -> Arc<RevisionWebSocketManager> {
let shared_sink = Arc::new(SharedWSSinkDataProvider::new(rev_manager.clone())); let composite_sink_provider = Arc::new(CompositeWSSinkDataProvider::new(&doc_id, rev_manager.clone()));
let ws_stream_consumer = Arc::new(DocumentWebSocketSteamConsumerAdapter { let resolver_target = Arc::new(DocumentRevisionResolver { edit_cmd_tx });
let resolver = RevisionConflictResolver::<RichTextAttributes>::new(
&user_id,
resolver_target,
Arc::new(composite_sink_provider.clone()),
rev_manager.clone(),
);
let ws_stream_consumer = Arc::new(DocumentWSSteamConsumerAdapter {
object_id: doc_id.clone(), object_id: doc_id.clone(),
edit_cmd_tx, resolver: Arc::new(resolver),
rev_manager: rev_manager.clone(),
shared_sink: shared_sink.clone(),
}); });
let data_provider = Arc::new(DocumentWSSinkDataProviderAdapter(shared_sink));
let sink_provider = Arc::new(DocumentWSSinkDataProviderAdapter(composite_sink_provider));
let ping_duration = Duration::from_millis(SYNC_INTERVAL_IN_MILLIS); let ping_duration = Duration::from_millis(SYNC_INTERVAL_IN_MILLIS);
let ws_manager = Arc::new(RevisionWebSocketManager::new( let ws_manager = Arc::new(RevisionWebSocketManager::new(
&doc_id, &doc_id,
web_socket, web_socket,
data_provider, sink_provider,
ws_stream_consumer, ws_stream_consumer,
ping_duration, ping_duration,
)); ));
@ -77,31 +88,20 @@ fn listen_document_ws_state(
}); });
} }
pub(crate) struct DocumentWebSocketSteamConsumerAdapter { pub(crate) struct DocumentWSSteamConsumerAdapter {
pub(crate) object_id: String, object_id: String,
pub(crate) edit_cmd_tx: EditorCommandSender, resolver: Arc<RevisionConflictResolver<RichTextAttributes>>,
pub(crate) rev_manager: Arc<RevisionManager>,
pub(crate) shared_sink: Arc<SharedWSSinkDataProvider>,
} }
impl RevisionWSSteamConsumer for DocumentWebSocketSteamConsumerAdapter { impl RevisionWSSteamConsumer for DocumentWSSteamConsumerAdapter {
fn receive_push_revision(&self, bytes: Bytes) -> FutureResult<(), FlowyError> { fn receive_push_revision(&self, bytes: Bytes) -> FutureResult<(), FlowyError> {
let rev_manager = self.rev_manager.clone(); let resolver = self.resolver.clone();
let edit_cmd_tx = self.edit_cmd_tx.clone(); FutureResult::new(async move { resolver.receive_bytes(bytes).await })
let shared_sink = self.shared_sink.clone();
let object_id = self.object_id.clone();
FutureResult::new(async move {
if let Some(server_composed_revision) = handle_remote_revision(edit_cmd_tx, rev_manager, bytes).await? {
let data = ClientRevisionWSData::from_revisions(&object_id, vec![server_composed_revision]);
shared_sink.push_back(data).await;
}
Ok(())
})
} }
fn receive_ack(&self, id: String, ty: ServerRevisionWSDataType) -> FutureResult<(), FlowyError> { fn receive_ack(&self, id: String, ty: ServerRevisionWSDataType) -> FutureResult<(), FlowyError> {
let shared_sink = self.shared_sink.clone(); let resolver = self.resolver.clone();
FutureResult::new(async move { shared_sink.ack(id, ty).await }) FutureResult::new(async move { resolver.ack_revision(id, ty).await })
} }
fn receive_new_user_connect(&self, _new_user: NewDocumentUser) -> FutureResult<(), FlowyError> { fn receive_new_user_connect(&self, _new_user: NewDocumentUser) -> FutureResult<(), FlowyError> {
@ -110,202 +110,71 @@ impl RevisionWSSteamConsumer for DocumentWebSocketSteamConsumerAdapter {
} }
fn pull_revisions_in_range(&self, range: RevisionRange) -> FutureResult<(), FlowyError> { fn pull_revisions_in_range(&self, range: RevisionRange) -> FutureResult<(), FlowyError> {
let rev_manager = self.rev_manager.clone(); let resolver = self.resolver.clone();
let shared_sink = self.shared_sink.clone(); FutureResult::new(async move { resolver.send_revisions(range).await })
let object_id = self.object_id.clone();
FutureResult::new(async move {
let revisions = rev_manager.get_revisions_in_range(range).await?;
let data = ClientRevisionWSData::from_revisions(&object_id, revisions);
shared_sink.push_back(data).await;
Ok(())
})
} }
} }
pub(crate) struct DocumentWSSinkDataProviderAdapter(pub(crate) Arc<SharedWSSinkDataProvider>); pub(crate) struct DocumentWSSinkDataProviderAdapter(pub(crate) Arc<CompositeWSSinkDataProvider>);
impl RevisionWSSinkDataProvider for DocumentWSSinkDataProviderAdapter { impl RevisionWSSinkDataProvider for DocumentWSSinkDataProviderAdapter {
fn next(&self) -> FutureResult<Option<ClientRevisionWSData>, FlowyError> { fn next(&self) -> FutureResult<Option<ClientRevisionWSData>, FlowyError> {
let shared_sink = self.0.clone(); let sink_provider = self.0.clone();
FutureResult::new(async move { shared_sink.next().await }) FutureResult::new(async move { sink_provider.next().await })
} }
} }
async fn transform_pushed_revisions( struct DocumentRevisionResolver {
revisions: Vec<Revision>, edit_cmd_tx: EditorCommandSender,
edit_cmd_tx: &EditorCommandSender, }
) -> FlowyResult<TransformDeltas> {
let (ret, rx) = oneshot::channel::<CollaborateResult<TransformDeltas>>(); impl ResolverTarget<RichTextAttributes> for DocumentRevisionResolver {
edit_cmd_tx fn compose_delta(&self, delta: Delta<RichTextAttributes>) -> BoxResultFuture<DeltaMD5, FlowyError> {
.send(EditorCommand::TransformRevision { revisions, ret }) let tx = self.edit_cmd_tx.clone();
Box::pin(async move {
let (ret, rx) = oneshot::channel();
tx.send(EditorCommand::ComposeRemoteDelta {
client_delta: delta,
ret,
})
.await
.map_err(internal_error)?;
let md5 = rx.await.map_err(|e| {
FlowyError::internal().context(format!("handle EditorCommand::ComposeRemoteDelta failed: {}", e))
})??;
Ok(md5)
})
}
fn transform_delta(
&self,
delta: Delta<RichTextAttributes>,
) -> BoxResultFuture<flowy_sync::TransformDeltas<RichTextAttributes>, FlowyError> {
let tx = self.edit_cmd_tx.clone();
Box::pin(async move {
let (ret, rx) = oneshot::channel::<CollaborateResult<TransformDeltas<RichTextAttributes>>>();
tx.send(EditorCommand::TransformDelta { delta, ret })
.await .await
.map_err(internal_error)?; .map_err(internal_error)?;
let transform_delta = rx let transform_delta = rx
.await .await
.map_err(|e| FlowyError::internal().context(format!("transform_pushed_revisions failed: {}", e)))??; .map_err(|e| FlowyError::internal().context(format!("TransformDelta failed: {}", e)))??;
Ok(transform_delta) Ok(transform_delta)
}
#[tracing::instrument(level = "debug", skip(edit_cmd_tx, rev_manager, bytes), err)]
pub(crate) async fn handle_remote_revision(
edit_cmd_tx: EditorCommandSender,
rev_manager: Arc<RevisionManager>,
bytes: Bytes,
) -> FlowyResult<Option<Revision>> {
let mut revisions = RepeatedRevision::try_from(bytes)?.into_inner();
if revisions.is_empty() {
return Ok(None);
}
let first_revision = revisions.first().unwrap();
if let Some(local_revision) = rev_manager.get_revision(first_revision.rev_id).await {
if local_revision.md5 == first_revision.md5 {
// The local revision is equal to the pushed revision. Just ignore it.
revisions = revisions.split_off(1);
if revisions.is_empty() {
return Ok(None);
}
} else {
return Ok(None);
}
}
let TransformDeltas {
client_prime,
server_prime,
} = transform_pushed_revisions(revisions.clone(), &edit_cmd_tx).await?;
match server_prime {
None => {
// The server_prime is None means the client local revisions conflict with the
// server, and it needs to override the client delta.
let (ret, rx) = oneshot::channel();
let _ = edit_cmd_tx
.send(EditorCommand::OverrideDelta {
revisions,
delta: client_prime,
ret,
}) })
.await; }
let _ = rx.await.map_err(|e| {
FlowyError::internal().context(format!("handle EditorCommand::OverrideDelta failed: {}", e)) fn reset_delta(&self, delta: Delta<RichTextAttributes>) -> BoxResultFuture<DeltaMD5, FlowyError> {
})??; let tx = self.edit_cmd_tx.clone();
Ok(None) Box::pin(async move {
},
Some(server_prime) => {
let (ret, rx) = oneshot::channel(); let (ret, rx) = oneshot::channel();
edit_cmd_tx let _ = tx
.send(EditorCommand::ComposeRemoteDelta { .send(EditorCommand::ResetDelta { delta, ret })
revisions,
client_delta: client_prime,
server_delta: server_prime,
ret,
})
.await .await
.map_err(internal_error)?; .map_err(internal_error)?;
let result = rx.await.map_err(|e| { let md5 = rx.await.map_err(|e| {
FlowyError::internal().context(format!("handle EditorCommand::ComposeRemoteDelta failed: {}", e)) FlowyError::internal().context(format!("handle EditorCommand::OverrideDelta failed: {}", e))
})??; })??;
Ok(result) Ok(md5)
}, })
}
}
#[derive(Clone)]
enum SourceType {
Shared,
Revision,
}
#[derive(Clone)]
pub(crate) struct SharedWSSinkDataProvider {
shared: Arc<RwLock<VecDeque<ClientRevisionWSData>>>,
rev_manager: Arc<RevisionManager>,
source_ty: Arc<RwLock<SourceType>>,
}
impl SharedWSSinkDataProvider {
pub(crate) fn new(rev_manager: Arc<RevisionManager>) -> Self {
SharedWSSinkDataProvider {
shared: Arc::new(RwLock::new(VecDeque::new())),
rev_manager,
source_ty: Arc::new(RwLock::new(SourceType::Shared)),
}
}
#[allow(dead_code)]
pub(crate) async fn push_front(&self, data: ClientRevisionWSData) { self.shared.write().await.push_front(data); }
async fn push_back(&self, data: ClientRevisionWSData) { self.shared.write().await.push_back(data); }
async fn next(&self) -> FlowyResult<Option<ClientRevisionWSData>> {
let source_ty = self.source_ty.read().await.clone();
match source_ty {
SourceType::Shared => match self.shared.read().await.front() {
None => {
*self.source_ty.write().await = SourceType::Revision;
Ok(None)
},
Some(data) => {
tracing::debug!("[SharedWSSinkDataProvider]: {}:{:?}", data.object_id, data.ty);
Ok(Some(data.clone()))
},
},
SourceType::Revision => {
if !self.shared.read().await.is_empty() {
*self.source_ty.write().await = SourceType::Shared;
return Ok(None);
}
match self.rev_manager.next_sync_revision().await? {
Some(rev) => {
let doc_id = rev.object_id.clone();
Ok(Some(ClientRevisionWSData::from_revisions(&doc_id, vec![rev])))
},
None => {
//
let doc_id = self.rev_manager.object_id.clone();
let latest_rev_id = self.rev_manager.rev_id();
Ok(Some(ClientRevisionWSData::ping(&doc_id, latest_rev_id)))
},
}
},
}
}
async fn ack(&self, id: String, _ty: ServerRevisionWSDataType) -> FlowyResult<()> {
// let _ = self.rev_manager.ack_revision(id).await?;
let source_ty = self.source_ty.read().await.clone();
match source_ty {
SourceType::Shared => {
let should_pop = match self.shared.read().await.front() {
None => false,
Some(val) => {
let expected_id = val.id();
if expected_id == id {
true
} else {
tracing::error!("The front element's {} is not equal to the {}", expected_id, id);
false
}
},
};
if should_pop {
let _ = self.shared.write().await.pop_front();
}
},
SourceType::Revision => {
match id.parse::<i64>() {
Ok(rev_id) => {
let _ = self.rev_manager.ack_revision(rev_id).await?;
},
Err(e) => {
tracing::error!("Parse rev_id from {} failed. {}", id, e);
},
};
},
}
Ok(())
} }
} }

View File

@ -4,7 +4,7 @@ use flowy_collaboration::entities::ws_data::ClientRevisionWSData;
use flowy_core::{ use flowy_core::{
controller::FolderManager, controller::FolderManager,
errors::{internal_error, FlowyError}, errors::{internal_error, FlowyError},
module::{init_folder, FolderCouldServiceV1, WorkspaceDatabase, WorkspaceUser}, module::{FolderCouldServiceV1, WorkspaceDatabase, WorkspaceUser},
}; };
use flowy_database::ConnectionPool; use flowy_database::ConnectionPool;
use flowy_document::context::DocumentContext; use flowy_document::context::DocumentContext;
@ -18,8 +18,8 @@ use flowy_user::services::UserSession;
use lib_ws::{WSMessageReceiver, WSModule, WebSocketRawMessage}; use lib_ws::{WSMessageReceiver, WSModule, WebSocketRawMessage};
use std::{convert::TryInto, sync::Arc}; use std::{convert::TryInto, sync::Arc};
pub struct CoreDepsResolver(); pub struct FolderDepsResolver();
impl CoreDepsResolver { impl FolderDepsResolver {
pub fn resolve( pub fn resolve(
local_server: Option<Arc<LocalServer>>, local_server: Option<Arc<LocalServer>>,
user_session: Arc<UserSession>, user_session: Arc<UserSession>,
@ -29,13 +29,20 @@ impl CoreDepsResolver {
) -> Arc<FolderManager> { ) -> Arc<FolderManager> {
let user: Arc<dyn WorkspaceUser> = Arc::new(WorkspaceUserImpl(user_session.clone())); let user: Arc<dyn WorkspaceUser> = Arc::new(WorkspaceUserImpl(user_session.clone()));
let database: Arc<dyn WorkspaceDatabase> = Arc::new(WorkspaceDatabaseImpl(user_session)); let database: Arc<dyn WorkspaceDatabase> = Arc::new(WorkspaceDatabaseImpl(user_session));
let ws_sender = Arc::new(FolderWebSocketImpl(ws_conn.clone())); let web_socket = Arc::new(FolderWebSocketImpl(ws_conn.clone()));
let cloud_service: Arc<dyn FolderCouldServiceV1> = match local_server { let cloud_service: Arc<dyn FolderCouldServiceV1> = match local_server {
None => Arc::new(CoreHttpCloudService::new(server_config.clone())), None => Arc::new(CoreHttpCloudService::new(server_config.clone())),
Some(local_server) => local_server, Some(local_server) => local_server,
}; };
let folder_manager = init_folder(user, database, flowy_document.clone(), cloud_service, ws_sender); let folder_manager = Arc::new(FolderManager::new(
user,
cloud_service,
database,
flowy_document.clone(),
web_socket,
));
let receiver = Arc::new(FolderWSMessageReceiverImpl(folder_manager.clone())); let receiver = Arc::new(FolderWSMessageReceiverImpl(folder_manager.clone()));
ws_conn.add_ws_message_receiver(receiver).unwrap(); ws_conn.add_ws_message_receiver(receiver).unwrap();

View File

@ -1,7 +1,7 @@
mod core_deps;
mod document_deps; mod document_deps;
mod folder_deps;
mod user_deps; mod user_deps;
pub use core_deps::*;
pub use document_deps::*; pub use document_deps::*;
pub use folder_deps::*;
pub use user_deps::*; pub use user_deps::*;

View File

@ -250,7 +250,7 @@ fn mk_core_context(
server_config: &ClientServerConfiguration, server_config: &ClientServerConfiguration,
ws_conn: &Arc<FlowyWebSocketConnect>, ws_conn: &Arc<FlowyWebSocketConnect>,
) -> Arc<FolderManager> { ) -> Arc<FolderManager> {
CoreDepsResolver::resolve( FolderDepsResolver::resolve(
local_server.clone(), local_server.clone(),
user_session.clone(), user_session.clone(),
server_config, server_config,

View File

@ -0,0 +1,182 @@
use crate::RevisionManager;
use bytes::Bytes;
use flowy_collaboration::{
entities::{
revision::{RepeatedRevision, Revision, RevisionRange},
ws_data::{ClientRevisionWSData, ServerRevisionWSDataType},
},
util::make_delta_from_revisions,
};
use flowy_error::{FlowyError, FlowyResult};
use lib_infra::future::BoxResultFuture;
use lib_ot::core::{Attributes, Delta};
use serde::de::DeserializeOwned;
use std::{convert::TryFrom, sync::Arc};
use tokio::sync::oneshot;
pub type DeltaMD5 = String;
pub trait ResolverTarget<T>
where
T: Attributes + Send + Sync,
{
fn compose_delta(&self, delta: Delta<T>) -> BoxResultFuture<DeltaMD5, FlowyError>;
fn transform_delta(&self, delta: Delta<T>) -> BoxResultFuture<TransformDeltas<T>, FlowyError>;
fn reset_delta(&self, delta: Delta<T>) -> BoxResultFuture<DeltaMD5, FlowyError>;
}
pub trait ResolverRevisionSink: Send + Sync {
fn send(&self, revisions: Vec<Revision>) -> BoxResultFuture<(), FlowyError>;
fn ack(&self, rev_id: String, ty: ServerRevisionWSDataType) -> BoxResultFuture<(), FlowyError>;
}
pub struct RevisionConflictResolver<T>
where
T: Attributes + Send + Sync,
{
user_id: String,
target: Arc<dyn ResolverTarget<T>>,
rev_sink: Arc<dyn ResolverRevisionSink>,
rev_manager: Arc<RevisionManager>,
}
impl<T> RevisionConflictResolver<T>
where
T: Attributes + Send + Sync,
// DeserializeOwned + serde::Serialize,
{
pub fn new(
user_id: &str,
target: Arc<dyn ResolverTarget<T>>,
rev_sink: Arc<dyn ResolverRevisionSink>,
rev_manager: Arc<RevisionManager>,
) -> Self {
let user_id = user_id.to_owned();
Self {
user_id,
target,
rev_sink,
rev_manager,
}
}
pub async fn receive_bytes(&self, bytes: Bytes) -> FlowyResult<()> {
let repeated_revision = RepeatedRevision::try_from(bytes)?;
if repeated_revision.is_empty() {
return Ok(());
}
// match self.handle_revision(repeated_revision).await? {
// None => {},
// Some(server_revision) => {
// self.rev_sink.send(vec![server_revision]);
// },
// }
Ok(())
}
pub async fn ack_revision(&self, rev_id: String, ty: ServerRevisionWSDataType) -> FlowyResult<()> {
self.rev_sink.ack(rev_id, ty).await
}
pub async fn send_revisions(&self, range: RevisionRange) -> FlowyResult<()> {
let revisions = self.rev_manager.get_revisions_in_range(range).await?;
self.rev_sink.send(revisions).await;
Ok(())
}
// async fn handle_revision(&self, repeated_revision: RepeatedRevision) ->
// FlowyResult<Option<Revision>> { let mut revisions =
// repeated_revision.into_inner(); let first_revision =
// revisions.first().unwrap(); if let Some(local_revision) =
// self.rev_manager.get_revision(first_revision.rev_id).await { if
// local_revision.md5 == first_revision.md5 { // The local
// revision is equal to the pushed revision. Just ignore it.
// revisions = revisions.split_off(1); if revisions.is_empty() {
// return Ok(None);
// }
// } else {
// return Ok(None);
// }
// }
//
// let new_delta = make_delta_from_revisions(revisions.clone())?;
//
// let TransformDeltas {
// client_prime,
// server_prime,
// } = self.target.transform_delta(new_delta).await?;
//
// match server_prime {
// None => {
// // The server_prime is None means the client local revisions
// conflict with the // server, and it needs to override the
// client delta. let md5 =
// self.target.reset_delta(client_prime).await?; let
// repeated_revision = RepeatedRevision::new(revisions);
// assert_eq!(repeated_revision.last().unwrap().md5, md5); let _
// = self.rev_manager.reset_object(repeated_revision).await?;
// Ok(None) },
// Some(server_prime) => {
// let md5 = self.target.compose_delta(client_prime.clone()).await?;
// for revision in &revisions {
// let _ =
// self.rev_manager.add_remote_revision(revision).await?; }
// let (client_revision, server_revision) =
// make_client_and_server_revision( &self.user_id,
// &self.rev_manager,
// client_prime,
// Some(server_prime),
// md5,
// );
// let _ =
// self.rev_manager.add_remote_revision(&client_revision).await?;
// Ok(server_revision)
// },
// }
// }
}
fn make_client_and_server_revision<T>(
user_id: &str,
rev_manager: &Arc<RevisionManager>,
client_delta: Delta<T>,
server_delta: Option<Delta<T>>,
md5: String,
) -> (Revision, Option<Revision>)
where
T: Attributes + serde::Serialize,
{
let (base_rev_id, rev_id) = rev_manager.next_rev_id_pair();
let client_revision = Revision::new(
&rev_manager.object_id,
base_rev_id,
rev_id,
client_delta.to_bytes(),
&user_id,
md5.clone(),
);
match server_delta {
None => (client_revision, None),
Some(server_delta) => {
let server_revision = Revision::new(
&rev_manager.object_id,
base_rev_id,
rev_id,
server_delta.to_bytes(),
&user_id,
md5,
);
(client_revision, Some(server_revision))
},
}
}
pub struct TransformDeltas<T>
where
T: Attributes,
{
pub client_prime: Delta<T>,
pub server_prime: Option<Delta<T>>,
}

View File

@ -1,8 +1,10 @@
mod cache; mod cache;
mod conflict_resolve;
mod rev_manager; mod rev_manager;
mod ws_manager; mod ws_manager;
pub use cache::*; pub use cache::*;
pub use conflict_resolve::*;
pub use rev_manager::*; pub use rev_manager::*;
pub use ws_manager::*; pub use ws_manager::*;

View File

@ -1,19 +1,21 @@
use crate::{ResolverRevisionSink, RevisionManager};
use async_stream::stream; use async_stream::stream;
use bytes::Bytes; use bytes::Bytes;
use flowy_collaboration::entities::{ use flowy_collaboration::entities::{
revision::{RevId, RevisionRange}, revision::{RevId, Revision, RevisionRange},
ws_data::{ClientRevisionWSData, NewDocumentUser, ServerRevisionWSData, ServerRevisionWSDataType}, ws_data::{ClientRevisionWSData, NewDocumentUser, ServerRevisionWSData, ServerRevisionWSDataType},
}; };
use flowy_error::{internal_error, FlowyError, FlowyResult}; use flowy_error::{internal_error, FlowyError, FlowyResult};
use futures_util::stream::StreamExt; use futures_util::stream::StreamExt;
use lib_infra::future::FutureResult; use lib_infra::future::{BoxResultFuture, FutureResult};
use lib_ws::WSConnectState; use lib_ws::WSConnectState;
use std::{convert::TryFrom, sync::Arc}; use std::{collections::VecDeque, convert::TryFrom, sync::Arc};
use tokio::{ use tokio::{
sync::{ sync::{
broadcast, broadcast,
mpsc, mpsc,
mpsc::{Receiver, Sender}, mpsc::{Receiver, Sender},
RwLock,
}, },
task::spawn_blocking, task::spawn_blocking,
time::{interval, Duration}, time::{interval, Duration},
@ -34,14 +36,14 @@ pub trait RevisionWSSinkDataProvider: Send + Sync {
} }
pub type WSStateReceiver = tokio::sync::broadcast::Receiver<WSConnectState>; pub type WSStateReceiver = tokio::sync::broadcast::Receiver<WSConnectState>;
pub trait RevisionWebSocket: Send + Sync { pub trait RevisionWebSocket: Send + Sync + 'static {
fn send(&self, data: ClientRevisionWSData) -> Result<(), FlowyError>; fn send(&self, data: ClientRevisionWSData) -> Result<(), FlowyError>;
fn subscribe_state_changed(&self) -> WSStateReceiver; fn subscribe_state_changed(&self) -> WSStateReceiver;
} }
pub struct RevisionWebSocketManager { pub struct RevisionWebSocketManager {
pub object_id: String, pub object_id: String,
data_provider: Arc<dyn RevisionWSSinkDataProvider>, sink_provider: Arc<dyn RevisionWSSinkDataProvider>,
stream_consumer: Arc<dyn RevisionWSSteamConsumer>, stream_consumer: Arc<dyn RevisionWSSteamConsumer>,
web_socket: Arc<dyn RevisionWebSocket>, web_socket: Arc<dyn RevisionWebSocket>,
pub ws_passthrough_tx: Sender<ServerRevisionWSData>, pub ws_passthrough_tx: Sender<ServerRevisionWSData>,
@ -54,7 +56,7 @@ impl RevisionWebSocketManager {
pub fn new( pub fn new(
object_id: &str, object_id: &str,
web_socket: Arc<dyn RevisionWebSocket>, web_socket: Arc<dyn RevisionWebSocket>,
data_provider: Arc<dyn RevisionWSSinkDataProvider>, sink_provider: Arc<dyn RevisionWSSinkDataProvider>,
stream_consumer: Arc<dyn RevisionWSSteamConsumer>, stream_consumer: Arc<dyn RevisionWSSteamConsumer>,
ping_duration: Duration, ping_duration: Duration,
) -> Self { ) -> Self {
@ -64,7 +66,7 @@ impl RevisionWebSocketManager {
let (state_passthrough_tx, _) = broadcast::channel(2); let (state_passthrough_tx, _) = broadcast::channel(2);
let mut manager = RevisionWebSocketManager { let mut manager = RevisionWebSocketManager {
object_id, object_id,
data_provider, sink_provider,
stream_consumer, stream_consumer,
web_socket, web_socket,
ws_passthrough_tx, ws_passthrough_tx,
@ -80,7 +82,7 @@ impl RevisionWebSocketManager {
let ws_msg_rx = self.ws_passthrough_rx.take().expect("Only take once"); let ws_msg_rx = self.ws_passthrough_rx.take().expect("Only take once");
let sink = RevisionWSSink::new( let sink = RevisionWSSink::new(
&self.object_id, &self.object_id,
self.data_provider.clone(), self.sink_provider.clone(),
self.web_socket.clone(), self.web_socket.clone(),
self.stop_sync_tx.subscribe(), self.stop_sync_tx.subscribe(),
ping_duration, ping_duration,
@ -172,10 +174,7 @@ impl RevisionWSStream {
async fn handle_message(&self, msg: ServerRevisionWSData) -> FlowyResult<()> { async fn handle_message(&self, msg: ServerRevisionWSData) -> FlowyResult<()> {
let ServerRevisionWSData { object_id: _, ty, data } = msg; let ServerRevisionWSData { object_id: _, ty, data } = msg;
let bytes = spawn_blocking(move || Bytes::from(data)) let bytes = Bytes::from(data);
.await
.map_err(internal_error)?;
tracing::trace!("[RevisionWSStream]: new message: {:?}", ty); tracing::trace!("[RevisionWSStream]: new message: {:?}", ty);
match ty { match ty {
ServerRevisionWSDataType::ServerPushRev => { ServerRevisionWSDataType::ServerPushRev => {
@ -280,3 +279,110 @@ async fn tick(sender: mpsc::Sender<()>, duration: Duration) {
interval.tick().await; interval.tick().await;
} }
} }
#[derive(Clone)]
enum Source {
Custom,
Revision,
}
#[derive(Clone)]
pub struct CompositeWSSinkDataProvider {
object_id: String,
container: Arc<RwLock<VecDeque<ClientRevisionWSData>>>,
rev_manager: Arc<RevisionManager>,
source: Arc<RwLock<Source>>,
}
impl CompositeWSSinkDataProvider {
pub fn new(object_id: &str, rev_manager: Arc<RevisionManager>) -> Self {
CompositeWSSinkDataProvider {
object_id: object_id.to_owned(),
container: Arc::new(RwLock::new(VecDeque::new())),
rev_manager,
source: Arc::new(RwLock::new(Source::Custom)),
}
}
pub async fn push_data(&self, data: ClientRevisionWSData) { self.container.write().await.push_back(data); }
pub async fn next(&self) -> FlowyResult<Option<ClientRevisionWSData>> {
let source = self.source.read().await.clone();
let data = match source {
Source::Custom => match self.container.read().await.front() {
None => {
*self.source.write().await = Source::Revision;
Ok(None)
},
Some(data) => Ok(Some(data.clone())),
},
Source::Revision => {
if !self.container.read().await.is_empty() {
*self.source.write().await = Source::Custom;
return Ok(None);
}
match self.rev_manager.next_sync_revision().await? {
Some(rev) => Ok(Some(ClientRevisionWSData::from_revisions(&self.object_id, vec![rev]))),
None => Ok(Some(ClientRevisionWSData::ping(
&self.object_id,
self.rev_manager.rev_id(),
))),
}
},
};
if let Ok(Some(data)) = &data {
tracing::trace!("[CompositeWSSinkDataProvider]: {}:{:?}", data.object_id, data.ty);
}
data
}
pub async fn ack_data(&self, id: String, _ty: ServerRevisionWSDataType) -> FlowyResult<()> {
let source = self.source.read().await.clone();
match source {
Source::Custom => {
let should_pop = match self.container.read().await.front() {
None => false,
Some(val) => {
let expected_id = val.id();
if expected_id == id {
true
} else {
tracing::error!("The front element's {} is not equal to the {}", expected_id, id);
false
}
},
};
if should_pop {
let _ = self.container.write().await.pop_front();
}
Ok(())
},
Source::Revision => {
let rev_id = id.parse::<i64>().map_err(|e| {
FlowyError::internal().context(format!("Parse {} rev_id from {} failed. {}", self.object_id, id, e))
})?;
let _ = self.rev_manager.ack_revision(rev_id).await?;
Ok::<(), FlowyError>(())
},
}
}
}
impl ResolverRevisionSink for Arc<CompositeWSSinkDataProvider> {
fn send(&self, revisions: Vec<Revision>) -> BoxResultFuture<(), FlowyError> {
let sink = self.clone();
Box::pin(async move {
sink.push_data(ClientRevisionWSData::from_revisions(&sink.object_id, revisions))
.await;
Ok(())
})
}
fn ack(&self, rev_id: String, ty: ServerRevisionWSDataType) -> BoxResultFuture<(), FlowyError> {
let sink = self.clone();
Box::pin(async move { sink.ack_data(rev_id, ty).await })
}
}

View File

@ -4,9 +4,8 @@ pub mod helper;
use crate::helper::*; use crate::helper::*;
use backend_service::configuration::{get_client_server_configuration, ClientServerConfiguration}; use backend_service::configuration::{get_client_server_configuration, ClientServerConfiguration};
use flowy_sdk::{FlowySDK, FlowySDKConfig}; use flowy_sdk::{FlowySDK, FlowySDKConfig};
use flowy_user::{entities::UserProfile, services::database::UserDB}; use flowy_user::entities::UserProfile;
use lib_infra::uuid_string; use lib_infra::uuid_string;
use std::sync::Arc;
pub mod prelude { pub mod prelude {
pub use crate::{event_builder::*, helper::*, *}; pub use crate::{event_builder::*, helper::*, *};
@ -52,15 +51,3 @@ impl FlowySDKTest {
context.user_profile context.user_profile
} }
} }
pub struct MigrationTest {
pub db: UserDB,
}
impl MigrationTest {
pub fn new() -> Self {
let dir = root_dir();
let db = UserDB::new(&dir);
Self { db }
}
}

View File

@ -0,0 +1,69 @@
use crate::{
entities::revision::Revision,
errors::{CollaborateError, CollaborateResult},
folder::{default_folder_delta, FolderPad},
};
use flowy_core_data_model::entities::{trash::Trash, workspace::Workspace};
use lib_ot::core::{OperationTransformable, PlainDelta, PlainDeltaBuilder};
use serde::{Deserialize, Serialize};
use std::sync::Arc;
#[derive(Serialize, Deserialize)]
pub(crate) struct FolderPadBuilder {
workspaces: Vec<Arc<Workspace>>,
trash: Vec<Arc<Trash>>,
}
impl FolderPadBuilder {
pub(crate) fn new() -> Self {
Self {
workspaces: vec![],
trash: vec![],
}
}
pub(crate) fn with_workspace(mut self, workspaces: Vec<Workspace>) -> Self {
self.workspaces = workspaces.into_iter().map(Arc::new).collect::<Vec<_>>();
self
}
pub(crate) fn with_trash(mut self, trash: Vec<Trash>) -> Self {
self.trash = trash.into_iter().map(Arc::new).collect::<Vec<_>>();
self
}
pub(crate) fn build_with_delta(self, mut delta: PlainDelta) -> CollaborateResult<FolderPad> {
if delta.is_empty() {
delta = default_folder_delta();
}
let folder_json = delta.apply("").unwrap();
let mut folder: FolderPad = serde_json::from_str(&folder_json).map_err(|e| {
CollaborateError::internal().context(format!("Deserialize json to root folder failed: {}", e))
})?;
folder.root = delta;
Ok(folder)
}
pub(crate) fn build_with_revisions(self, revisions: Vec<Revision>) -> CollaborateResult<FolderPad> {
let mut folder_delta = PlainDelta::new();
for revision in revisions {
if revision.delta_data.is_empty() {
tracing::warn!("revision delta_data is empty");
}
let delta = PlainDelta::from_bytes(revision.delta_data)?;
folder_delta = folder_delta.compose(&delta)?;
}
self.build_with_delta(folder_delta)
}
pub(crate) fn build(self) -> CollaborateResult<FolderPad> {
let json = serde_json::to_string(&self)
.map_err(|e| CollaborateError::internal().context(format!("serial trash to json failed: {}", e)))?;
Ok(FolderPad {
workspaces: self.workspaces,
trash: self.trash,
root: PlainDeltaBuilder::new().insert(&json).build(),
})
}
}

View File

@ -1,6 +1,7 @@
use crate::{ use crate::{
entities::revision::{md5, Revision}, entities::revision::{md5, Revision},
errors::{CollaborateError, CollaborateResult}, errors::{CollaborateError, CollaborateResult},
folder::builder::FolderPadBuilder,
}; };
use dissimilar::*; use dissimilar::*;
use flowy_core_data_model::entities::{app::App, trash::Trash, view::View, workspace::Workspace}; use flowy_core_data_model::entities::{app::App, trash::Trash, view::View, workspace::Workspace};
@ -10,10 +11,10 @@ use std::sync::Arc;
#[derive(Debug, Deserialize, Serialize, Clone, Eq, PartialEq)] #[derive(Debug, Deserialize, Serialize, Clone, Eq, PartialEq)]
pub struct FolderPad { pub struct FolderPad {
workspaces: Vec<Arc<Workspace>>, pub(crate) workspaces: Vec<Arc<Workspace>>,
trash: Vec<Arc<Trash>>, pub(crate) trash: Vec<Arc<Trash>>,
#[serde(skip)] #[serde(skip)]
root: PlainDelta, pub(crate) root: PlainDelta,
} }
pub fn default_folder_delta() -> PlainDelta { pub fn default_folder_delta() -> PlainDelta {
@ -40,39 +41,17 @@ pub struct FolderChange {
impl FolderPad { impl FolderPad {
pub fn new(workspaces: Vec<Workspace>, trash: Vec<Trash>) -> CollaborateResult<Self> { pub fn new(workspaces: Vec<Workspace>, trash: Vec<Trash>) -> CollaborateResult<Self> {
let mut pad = FolderPad::default(); FolderPadBuilder::new()
pad.workspaces = workspaces.into_iter().map(Arc::new).collect::<Vec<_>>(); .with_workspace(workspaces)
pad.trash = trash.into_iter().map(Arc::new).collect::<Vec<_>>(); .with_trash(trash)
let json = pad.to_json()?; .build()
pad.root = PlainDeltaBuilder::new().insert(&json).build();
Ok(pad)
} }
pub fn from_revisions(revisions: Vec<Revision>) -> CollaborateResult<Self> { pub fn from_revisions(revisions: Vec<Revision>) -> CollaborateResult<Self> {
let mut folder_delta = PlainDelta::new(); FolderPadBuilder::new().build_with_revisions(revisions)
for revision in revisions {
if revision.delta_data.is_empty() {
tracing::warn!("revision delta_data is empty");
} }
let delta = PlainDelta::from_bytes(revision.delta_data)?; pub fn from_delta(delta: PlainDelta) -> CollaborateResult<Self> { FolderPadBuilder::new().build_with_delta(delta) }
folder_delta = folder_delta.compose(&delta)?;
}
Self::from_delta(folder_delta)
}
pub fn from_delta(mut delta: PlainDelta) -> CollaborateResult<Self> {
if delta.is_empty() {
delta = default_folder_delta();
}
let folder_json = delta.apply("").unwrap();
let mut folder: FolderPad = serde_json::from_str(&folder_json).map_err(|e| {
CollaborateError::internal().context(format!("Deserialize json to root folder failed: {}", e))
})?;
folder.root = delta;
Ok(folder)
}
pub fn delta(&self) -> &PlainDelta { &self.root } pub fn delta(&self) -> &PlainDelta { &self.root }

View File

@ -1,3 +1,4 @@
mod builder;
mod folder_pad; mod folder_pad;
pub use folder_pad::*; pub use folder_pad::*;

View File

@ -4,16 +4,15 @@ use crate::{
protobuf::{DocumentInfo as DocumentInfoPB, RepeatedRevision as RepeatedRevisionPB, Revision as RevisionPB}, protobuf::{DocumentInfo as DocumentInfoPB, RepeatedRevision as RepeatedRevisionPB, Revision as RevisionPB},
}; };
use lib_ot::{ use lib_ot::{
core::{OperationTransformable, NEW_LINE, WHITESPACE}, core::{Attributes, Delta, OperationTransformable, NEW_LINE, WHITESPACE},
errors::OTError, errors::OTError,
rich_text::RichTextDelta, rich_text::RichTextDelta,
}; };
use serde::de::DeserializeOwned;
use std::{ use std::{
convert::TryInto, convert::TryInto,
sync::atomic::{AtomicI64, Ordering::SeqCst}, sync::atomic::{AtomicI64, Ordering::SeqCst},
}; };
use serde::de::DeserializeOwned;
use lib_ot::core::{Attributes, Delta};
#[inline] #[inline]
pub fn find_newline(s: &str) -> Option<usize> { s.find(NEW_LINE) } pub fn find_newline(s: &str) -> Option<usize> { s.find(NEW_LINE) }
@ -47,10 +46,13 @@ impl RevIdCounter {
pub fn set(&self, n: i64) { let _ = self.0.fetch_update(SeqCst, SeqCst, |_| Some(n)); } pub fn set(&self, n: i64) { let _ = self.0.fetch_update(SeqCst, SeqCst, |_| Some(n)); }
} }
pub fn make_delta_from_revisions(revisions: Vec<Revision>) -> CollaborateResult<RichTextDelta> { pub fn make_delta_from_revisions<T>(revisions: Vec<Revision>) -> CollaborateResult<Delta<T>>
let mut delta = RichTextDelta::new(); where
T: Attributes + DeserializeOwned,
{
let mut delta = Delta::<T>::new();
for revision in revisions { for revision in revisions {
let revision_delta = RichTextDelta::from_bytes(revision.delta_data).map_err(|e| { let revision_delta = Delta::<T>::from_bytes(revision.delta_data).map_err(|e| {
let err_msg = format!("Deserialize remote revision failed: {:?}", e); let err_msg = format!("Deserialize remote revision failed: {:?}", e);
CollaborateError::internal().context(err_msg) CollaborateError::internal().context(err_msg)
})?; })?;
@ -59,7 +61,10 @@ pub fn make_delta_from_revisions(revisions: Vec<Revision>) -> CollaborateResult<
Ok(delta) Ok(delta)
} }
pub fn make_delta_from_revision_pb<T>(revisions: Vec<RevisionPB>) -> CollaborateResult<Delta<T>> where T: Attributes + DeserializeOwned { pub fn make_delta_from_revision_pb<T>(revisions: Vec<RevisionPB>) -> CollaborateResult<Delta<T>>
where
T: Attributes + DeserializeOwned,
{
let mut new_delta = Delta::<T>::new(); let mut new_delta = Delta::<T>::new();
for revision in revisions { for revision in revisions {
let delta = Delta::<T>::from_bytes(revision.delta_data).map_err(|e| { let delta = Delta::<T>::from_bytes(revision.delta_data).map_err(|e| {