mirror of
https://github.com/AppFlowy-IO/AppFlowy.git
synced 2024-08-30 18:12:39 +00:00
fix compact bugs & update unit tests
This commit is contained in:
parent
4cdf3e3e3e
commit
8eaec5e58c
@ -35,7 +35,7 @@ pub async fn read_document(
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(document_manager, params), fields(delta), err)]
|
||||
#[tracing::instrument(level = "debug", skip(document_manager, params), err)]
|
||||
pub async fn reset_document(
|
||||
document_manager: &Arc<ServerDocumentManager>,
|
||||
mut params: ResetDocumentParams,
|
||||
|
@ -24,7 +24,7 @@ async fn delta_sync_while_editing() {
|
||||
DocScript::ClientInsertText(0, "abc"),
|
||||
DocScript::ClientInsertText(3, "123"),
|
||||
DocScript::AssertClient(r#"[{"insert":"abc123\n"}]"#),
|
||||
DocScript::AssertServer(r#"[{"insert":"abc123\n"}]"#, 2),
|
||||
DocScript::AssertServer(r#"[{"insert":"abc123\n"}]"#, 1),
|
||||
])
|
||||
.await;
|
||||
}
|
||||
@ -50,11 +50,11 @@ async fn delta_sync_while_editing_with_attribute() {
|
||||
DocScript::ClientInsertText(0, "abc"),
|
||||
DocScript::ClientFormatText(Interval::new(0, 3), RichTextAttribute::Bold(true)),
|
||||
DocScript::AssertClient(r#"[{"insert":"abc","attributes":{"bold":true}},{"insert":"\n"}]"#),
|
||||
DocScript::AssertServer(r#"[{"insert":"abc","attributes":{"bold":true}},{"insert":"\n"}]"#, 2),
|
||||
DocScript::AssertServer(r#"[{"insert":"abc","attributes":{"bold":true}},{"insert":"\n"}]"#, 1),
|
||||
DocScript::ClientInsertText(3, "efg"),
|
||||
DocScript::ClientFormatText(Interval::new(3, 5), RichTextAttribute::Italic(true)),
|
||||
DocScript::AssertClient(r#"[{"insert":"abc","attributes":{"bold":true}},{"insert":"ef","attributes":{"bold":true,"italic":true}},{"insert":"g","attributes":{"bold":true}},{"insert":"\n"}]"#),
|
||||
DocScript::AssertServer(r#"[{"insert":"abc","attributes":{"bold":true}},{"insert":"ef","attributes":{"bold":true,"italic":true}},{"insert":"g","attributes":{"bold":true}},{"insert":"\n"}]"#, 4),
|
||||
DocScript::AssertServer(r#"[{"insert":"abc","attributes":{"bold":true}},{"insert":"ef","attributes":{"bold":true,"italic":true}},{"insert":"g","attributes":{"bold":true}},{"insert":"\n"}]"#, 3),
|
||||
])
|
||||
.await;
|
||||
}
|
||||
|
@ -3,7 +3,6 @@ import 'package:dartz/dartz.dart';
|
||||
import 'package:flowy_log/flowy_log.dart';
|
||||
// ignore: unnecessary_import
|
||||
import 'package:flowy_sdk/protobuf/dart-ffi/ffi_response.pb.dart';
|
||||
import 'package:flowy_sdk/protobuf/dart-ffi/ffi_request.pb.dart';
|
||||
import 'package:flowy_sdk/protobuf/flowy-collaboration/document_info.pb.dart';
|
||||
import 'package:flowy_sdk/protobuf/flowy-error/errors.pb.dart';
|
||||
import 'package:flowy_sdk/protobuf/flowy-net/event.pb.dart';
|
||||
|
@ -12,7 +12,7 @@ use flowy_sync::{
|
||||
RevisionWebSocketManager,
|
||||
};
|
||||
use lib_infra::future::FutureResult;
|
||||
use lib_ot::core::PlainTextAttributes;
|
||||
use lib_ot::core::PlainAttributes;
|
||||
use lib_sqlite::ConnectionPool;
|
||||
use parking_lot::RwLock;
|
||||
use std::sync::Arc;
|
||||
@ -91,6 +91,12 @@ impl FolderEditor {
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn folder_json(&self) -> FlowyResult<String> {
|
||||
let json = self.folder.read().to_json()?;
|
||||
Ok(json)
|
||||
}
|
||||
}
|
||||
|
||||
struct FolderPadBuilder();
|
||||
@ -125,22 +131,22 @@ impl FolderEditor {
|
||||
|
||||
struct FolderRevisionCompact();
|
||||
impl RevisionCompact for FolderRevisionCompact {
|
||||
fn compact_revisions(user_id: &str, object_id: &str, revisions: Vec<Revision>) -> FlowyResult<Revision> {
|
||||
match revisions.last() {
|
||||
None => Err(FlowyError::internal().context("compact revisions is empty")),
|
||||
Some(last_revision) => {
|
||||
let (base_rev_id, rev_id) = last_revision.pair_rev_id();
|
||||
let md5 = last_revision.md5.clone();
|
||||
let delta = make_delta_from_revisions::<PlainTextAttributes>(revisions)?;
|
||||
Ok(Revision::new(
|
||||
object_id,
|
||||
base_rev_id,
|
||||
rev_id,
|
||||
delta.to_bytes(),
|
||||
user_id,
|
||||
md5,
|
||||
))
|
||||
}
|
||||
fn compact_revisions(user_id: &str, object_id: &str, mut revisions: Vec<Revision>) -> FlowyResult<Revision> {
|
||||
if revisions.is_empty() {
|
||||
return Err(FlowyError::internal().context("Can't compact the empty folder's revisions"));
|
||||
}
|
||||
|
||||
if revisions.len() == 1 {
|
||||
return Ok(revisions.pop().unwrap());
|
||||
}
|
||||
|
||||
let first_revision = revisions.first().unwrap();
|
||||
let last_revision = revisions.last().unwrap();
|
||||
|
||||
let (base_rev_id, rev_id) = first_revision.pair_rev_id();
|
||||
let md5 = last_revision.md5.clone();
|
||||
let delta = make_delta_from_revisions::<PlainAttributes>(revisions)?;
|
||||
let delta_data = delta.to_bytes();
|
||||
Ok(Revision::new(object_id, base_rev_id, rev_id, delta_data, user_id, md5))
|
||||
}
|
||||
}
|
||||
|
@ -122,6 +122,6 @@ impl FolderPersistence {
|
||||
|
||||
let conn = pool.get()?;
|
||||
let disk_cache = mk_revision_disk_cache(user_id, pool);
|
||||
disk_cache.write_revision_records(vec![record], &conn)
|
||||
disk_cache.create_revision_records(vec![record], &conn)
|
||||
}
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ use flowy_collaboration::{
|
||||
use flowy_error::FlowyError;
|
||||
use flowy_sync::*;
|
||||
use lib_infra::future::{BoxResultFuture, FutureResult};
|
||||
use lib_ot::core::{Delta, OperationTransformable, PlainDelta, PlainTextAttributes};
|
||||
use lib_ot::core::{Delta, OperationTransformable, PlainAttributes, PlainDelta};
|
||||
use parking_lot::RwLock;
|
||||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
@ -23,7 +23,7 @@ pub(crate) async fn make_folder_ws_manager(
|
||||
) -> Arc<RevisionWebSocketManager> {
|
||||
let composite_sink_provider = Arc::new(CompositeWSSinkDataProvider::new(folder_id, rev_manager.clone()));
|
||||
let resolve_target = Arc::new(FolderRevisionResolveTarget { folder_pad });
|
||||
let resolver = RevisionConflictResolver::<PlainTextAttributes>::new(
|
||||
let resolver = RevisionConflictResolver::<PlainAttributes>::new(
|
||||
user_id,
|
||||
resolve_target,
|
||||
Arc::new(composite_sink_provider.clone()),
|
||||
@ -58,8 +58,8 @@ struct FolderRevisionResolveTarget {
|
||||
folder_pad: Arc<RwLock<FolderPad>>,
|
||||
}
|
||||
|
||||
impl ResolverTarget<PlainTextAttributes> for FolderRevisionResolveTarget {
|
||||
fn compose_delta(&self, delta: Delta<PlainTextAttributes>) -> BoxResultFuture<DeltaMD5, FlowyError> {
|
||||
impl ResolverTarget<PlainAttributes> for FolderRevisionResolveTarget {
|
||||
fn compose_delta(&self, delta: Delta<PlainAttributes>) -> BoxResultFuture<DeltaMD5, FlowyError> {
|
||||
let folder_pad = self.folder_pad.clone();
|
||||
Box::pin(async move {
|
||||
let md5 = folder_pad.write().compose_remote_delta(delta)?;
|
||||
@ -69,8 +69,8 @@ impl ResolverTarget<PlainTextAttributes> for FolderRevisionResolveTarget {
|
||||
|
||||
fn transform_delta(
|
||||
&self,
|
||||
delta: Delta<PlainTextAttributes>,
|
||||
) -> BoxResultFuture<TransformDeltas<PlainTextAttributes>, FlowyError> {
|
||||
delta: Delta<PlainAttributes>,
|
||||
) -> BoxResultFuture<TransformDeltas<PlainAttributes>, FlowyError> {
|
||||
let folder_pad = self.folder_pad.clone();
|
||||
Box::pin(async move {
|
||||
let read_guard = folder_pad.read();
|
||||
@ -92,7 +92,7 @@ impl ResolverTarget<PlainTextAttributes> for FolderRevisionResolveTarget {
|
||||
})
|
||||
}
|
||||
|
||||
fn reset_delta(&self, delta: Delta<PlainTextAttributes>) -> BoxResultFuture<DeltaMD5, FlowyError> {
|
||||
fn reset_delta(&self, delta: Delta<PlainAttributes>) -> BoxResultFuture<DeltaMD5, FlowyError> {
|
||||
let folder_pad = self.folder_pad.clone();
|
||||
Box::pin(async move {
|
||||
let md5 = folder_pad.write().reset_folder(delta)?;
|
||||
@ -102,7 +102,7 @@ impl ResolverTarget<PlainTextAttributes> for FolderRevisionResolveTarget {
|
||||
}
|
||||
|
||||
struct FolderWSStreamConsumerAdapter {
|
||||
resolver: Arc<RevisionConflictResolver<PlainTextAttributes>>,
|
||||
resolver: Arc<RevisionConflictResolver<PlainAttributes>>,
|
||||
}
|
||||
|
||||
impl RevisionWSSteamConsumer for FolderWSStreamConsumerAdapter {
|
||||
|
@ -64,8 +64,8 @@ async fn workspace_read() {
|
||||
async fn workspace_create_with_apps() {
|
||||
let mut test = FolderTest::new().await;
|
||||
test.run_scripts(vec![CreateApp {
|
||||
name: "App",
|
||||
desc: "App description",
|
||||
name: "App".to_string(),
|
||||
desc: "App description".to_string(),
|
||||
}])
|
||||
.await;
|
||||
|
||||
@ -147,12 +147,12 @@ async fn app_create_with_view() {
|
||||
let mut app = test.app.clone();
|
||||
test.run_scripts(vec![
|
||||
CreateView {
|
||||
name: "View A",
|
||||
desc: "View A description",
|
||||
name: "View A".to_owned(),
|
||||
desc: "View A description".to_owned(),
|
||||
},
|
||||
CreateView {
|
||||
name: "View B",
|
||||
desc: "View B description",
|
||||
name: "View B".to_owned(),
|
||||
desc: "View B description".to_owned(),
|
||||
},
|
||||
ReadApp(app.id),
|
||||
])
|
||||
@ -219,12 +219,12 @@ async fn view_delete_all() {
|
||||
let app = test.app.clone();
|
||||
test.run_scripts(vec![
|
||||
CreateView {
|
||||
name: "View A",
|
||||
desc: "View A description",
|
||||
name: "View A".to_owned(),
|
||||
desc: "View A description".to_owned(),
|
||||
},
|
||||
CreateView {
|
||||
name: "View B",
|
||||
desc: "View B description",
|
||||
name: "View B".to_owned(),
|
||||
desc: "View B description".to_owned(),
|
||||
},
|
||||
ReadApp(app.id.clone()),
|
||||
])
|
||||
@ -250,8 +250,8 @@ async fn view_delete_all_permanent() {
|
||||
let app = test.app.clone();
|
||||
test.run_scripts(vec![
|
||||
CreateView {
|
||||
name: "View A",
|
||||
desc: "View A description",
|
||||
name: "View A".to_owned(),
|
||||
desc: "View A description".to_owned(),
|
||||
},
|
||||
ReadApp(app.id.clone()),
|
||||
])
|
||||
@ -299,13 +299,8 @@ async fn folder_sync_revision_seq() {
|
||||
rev_id: 2,
|
||||
state: RevisionState::Sync,
|
||||
},
|
||||
AssertRevisionState {
|
||||
rev_id: 3,
|
||||
state: RevisionState::Sync,
|
||||
},
|
||||
AssertNextSyncRevId(Some(1)),
|
||||
AssertNextSyncRevId(Some(2)),
|
||||
AssertNextSyncRevId(Some(3)),
|
||||
AssertRevisionState {
|
||||
rev_id: 1,
|
||||
state: RevisionState::Ack,
|
||||
@ -314,10 +309,6 @@ async fn folder_sync_revision_seq() {
|
||||
rev_id: 2,
|
||||
state: RevisionState::Ack,
|
||||
},
|
||||
AssertRevisionState {
|
||||
rev_id: 3,
|
||||
state: RevisionState::Ack,
|
||||
},
|
||||
])
|
||||
.await;
|
||||
}
|
||||
@ -325,35 +316,50 @@ async fn folder_sync_revision_seq() {
|
||||
#[tokio::test]
|
||||
async fn folder_sync_revision_with_new_app() {
|
||||
let mut test = FolderTest::new().await;
|
||||
let app_name = "AppFlowy contributors".to_owned();
|
||||
let app_desc = "Welcome to be a AppFlowy contributor".to_owned();
|
||||
|
||||
test.run_scripts(vec![
|
||||
AssertNextSyncRevId(Some(1)),
|
||||
AssertNextSyncRevId(Some(2)),
|
||||
AssertNextSyncRevId(Some(3)),
|
||||
CreateApp {
|
||||
name: "New App",
|
||||
desc: "",
|
||||
name: app_name.clone(),
|
||||
desc: app_desc.clone(),
|
||||
},
|
||||
AssertCurrentRevId(4),
|
||||
AssertNextSyncRevId(Some(4)),
|
||||
AssertCurrentRevId(3),
|
||||
AssertNextSyncRevId(Some(3)),
|
||||
AssertNextSyncRevId(None),
|
||||
])
|
||||
.await;
|
||||
|
||||
let app = test.app.clone();
|
||||
assert_eq!(app.name, app_name);
|
||||
assert_eq!(app.desc, app_desc);
|
||||
test.run_scripts(vec![ReadApp(app.id.clone()), AssertApp(app)]).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn folder_sync_revision_with_new_view() {
|
||||
let mut test = FolderTest::new().await;
|
||||
let view_name = "AppFlowy features".to_owned();
|
||||
let view_desc = "😁".to_owned();
|
||||
|
||||
test.run_scripts(vec![
|
||||
AssertNextSyncRevId(Some(1)),
|
||||
AssertNextSyncRevId(Some(2)),
|
||||
AssertNextSyncRevId(Some(3)),
|
||||
CreateView {
|
||||
name: "New App",
|
||||
desc: "",
|
||||
name: view_name.clone(),
|
||||
desc: view_desc.clone(),
|
||||
},
|
||||
AssertCurrentRevId(4),
|
||||
AssertNextSyncRevId(Some(4)),
|
||||
AssertCurrentRevId(3),
|
||||
AssertNextSyncRevId(Some(3)),
|
||||
AssertNextSyncRevId(None),
|
||||
])
|
||||
.await;
|
||||
|
||||
let view = test.view.clone();
|
||||
assert_eq!(view.name, view_name);
|
||||
assert_eq!(view.desc, view_desc);
|
||||
test.run_scripts(vec![ReadView(view.id.clone()), AssertView(view)])
|
||||
.await;
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ pub enum FolderScript {
|
||||
ReadWorkspace(Option<String>),
|
||||
|
||||
// App
|
||||
CreateApp { name: &'static str, desc: &'static str },
|
||||
CreateApp { name: String, desc: String },
|
||||
AssertAppJson(String),
|
||||
AssertApp(App),
|
||||
ReadApp(String),
|
||||
@ -29,7 +29,7 @@ pub enum FolderScript {
|
||||
DeleteApp,
|
||||
|
||||
// View
|
||||
CreateView { name: &'static str, desc: &'static str },
|
||||
CreateView { name: String, desc: String },
|
||||
AssertView(View),
|
||||
ReadView(String),
|
||||
UpdateView { name: Option<String>, desc: Option<String> },
|
||||
@ -124,7 +124,7 @@ impl FolderTest {
|
||||
self.workspace = workspace;
|
||||
}
|
||||
FolderScript::CreateApp { name, desc } => {
|
||||
let app = create_app(sdk, &self.workspace.id, name, desc).await;
|
||||
let app = create_app(sdk, &self.workspace.id, &name, &desc).await;
|
||||
self.app = app;
|
||||
}
|
||||
FolderScript::AssertAppJson(expected_json) => {
|
||||
@ -146,7 +146,7 @@ impl FolderTest {
|
||||
}
|
||||
|
||||
FolderScript::CreateView { name, desc } => {
|
||||
let view = create_view(sdk, &self.app.id, name, desc, ViewType::Doc).await;
|
||||
let view = create_view(sdk, &self.app.id, &name, &desc, ViewType::Doc).await;
|
||||
self.view = view;
|
||||
}
|
||||
FolderScript::AssertView(view) => {
|
||||
@ -193,7 +193,7 @@ impl FolderTest {
|
||||
}
|
||||
}
|
||||
FolderScript::AssertCurrentRevId(rev_id) => {
|
||||
assert_eq!(rev_manager.rev_id(), rev_id);
|
||||
assert_eq!(rev_manager.rev_id(), rev_id, "Current rev_id is not match");
|
||||
}
|
||||
FolderScript::AssertNextSyncRevId(rev_id) => {
|
||||
let next_revision = rev_manager.next_sync_revision().await.unwrap();
|
||||
@ -201,7 +201,8 @@ impl FolderTest {
|
||||
assert!(next_revision.is_none(), "Next revision should be None");
|
||||
return;
|
||||
}
|
||||
let next_revision = next_revision.unwrap();
|
||||
let next_revision = next_revision
|
||||
.unwrap_or_else(|| panic!("Expected Next revision is {}, but receive None", rev_id.unwrap()));
|
||||
let mut receiver = rev_manager.revision_ack_receiver();
|
||||
let _ = receiver.recv().await;
|
||||
assert_eq!(next_revision.rev_id, rev_id.unwrap());
|
||||
|
@ -194,23 +194,23 @@ impl EditorCommandQueue {
|
||||
|
||||
pub(crate) struct DocumentRevisionCompact();
|
||||
impl RevisionCompact for DocumentRevisionCompact {
|
||||
fn compact_revisions(user_id: &str, object_id: &str, revisions: Vec<Revision>) -> FlowyResult<Revision> {
|
||||
match revisions.last() {
|
||||
None => Err(FlowyError::internal().context("compact revisions is empty")),
|
||||
Some(last_revision) => {
|
||||
let (base_rev_id, rev_id) = last_revision.pair_rev_id();
|
||||
let md5 = last_revision.md5.clone();
|
||||
let delta = make_delta_from_revisions::<RichTextAttributes>(revisions)?;
|
||||
Ok(Revision::new(
|
||||
object_id,
|
||||
base_rev_id,
|
||||
rev_id,
|
||||
delta.to_bytes(),
|
||||
user_id,
|
||||
md5,
|
||||
))
|
||||
}
|
||||
fn compact_revisions(user_id: &str, object_id: &str, mut revisions: Vec<Revision>) -> FlowyResult<Revision> {
|
||||
if revisions.is_empty() {
|
||||
return Err(FlowyError::internal().context("Can't compact the empty document's revisions"));
|
||||
}
|
||||
|
||||
if revisions.len() == 1 {
|
||||
return Ok(revisions.pop().unwrap());
|
||||
}
|
||||
|
||||
let first_revision = revisions.first().unwrap();
|
||||
let last_revision = revisions.last().unwrap();
|
||||
|
||||
let (base_rev_id, rev_id) = first_revision.pair_rev_id();
|
||||
let md5 = last_revision.md5.clone();
|
||||
let delta = make_delta_from_revisions::<RichTextAttributes>(revisions)?;
|
||||
let delta_data = delta.to_bytes();
|
||||
Ok(Revision::new(object_id, base_rev_id, rev_id, delta_data, user_id, md5))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,7 @@ async fn document_sync_current_rev_id_check() {
|
||||
AssertCurrentRevId(2),
|
||||
InsertText("3", 2),
|
||||
AssertCurrentRevId(3),
|
||||
AssertNextRevId(None),
|
||||
AssertNextSyncRevId(None),
|
||||
AssertJson(r#"[{"insert":"123\n"}]"#),
|
||||
];
|
||||
EditorTest::new().await.run_scripts(scripts).await;
|
||||
@ -38,7 +38,7 @@ async fn document_sync_insert_test() {
|
||||
InsertText("2", 1),
|
||||
InsertText("3", 2),
|
||||
AssertJson(r#"[{"insert":"123\n"}]"#),
|
||||
AssertNextRevId(None),
|
||||
AssertNextSyncRevId(None),
|
||||
];
|
||||
EditorTest::new().await.run_scripts(scripts).await;
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ pub enum EditorScript {
|
||||
Replace(Interval, &'static str),
|
||||
|
||||
AssertRevisionState(i64, RevisionState),
|
||||
AssertNextRevId(Option<i64>),
|
||||
AssertNextSyncRevId(Option<i64>),
|
||||
AssertCurrentRevId(i64),
|
||||
AssertJson(&'static str),
|
||||
}
|
||||
@ -60,13 +60,15 @@ impl EditorTest {
|
||||
EditorScript::AssertCurrentRevId(rev_id) => {
|
||||
assert_eq!(self.editor.rev_manager().rev_id(), rev_id);
|
||||
}
|
||||
EditorScript::AssertNextRevId(rev_id) => {
|
||||
EditorScript::AssertNextSyncRevId(rev_id) => {
|
||||
let next_revision = rev_manager.next_sync_revision().await.unwrap();
|
||||
if rev_id.is_none() {
|
||||
assert!(next_revision.is_none(), "Next revision should be None");
|
||||
return;
|
||||
}
|
||||
let next_revision = next_revision.unwrap();
|
||||
let mut receiver = rev_manager.revision_ack_receiver();
|
||||
let _ = receiver.recv().await;
|
||||
assert_eq!(next_revision.rev_id, rev_id.unwrap());
|
||||
}
|
||||
EditorScript::AssertJson(expected) => {
|
||||
|
@ -9,7 +9,7 @@ use std::fmt::Debug;
|
||||
|
||||
pub trait RevisionDiskCache: Sync + Send {
|
||||
type Error: Debug;
|
||||
fn write_revision_records(
|
||||
fn create_revision_records(
|
||||
&self,
|
||||
revision_records: Vec<RevisionRecord>,
|
||||
conn: &SqliteConnection,
|
||||
@ -39,5 +39,12 @@ pub trait RevisionDiskCache: Sync + Send {
|
||||
conn: &SqliteConnection,
|
||||
) -> Result<(), Self::Error>;
|
||||
|
||||
fn reset_object(&self, object_id: &str, revision_records: Vec<RevisionRecord>) -> Result<(), Self::Error>;
|
||||
// Delete and insert will be executed in the same transaction.
|
||||
// It deletes all the records if the deleted_rev_ids is None and then insert the new records
|
||||
fn delete_and_insert_records(
|
||||
&self,
|
||||
object_id: &str,
|
||||
deleted_rev_ids: Option<Vec<i64>>,
|
||||
inserted_records: Vec<RevisionRecord>,
|
||||
) -> Result<(), Self::Error>;
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ pub struct SQLitePersistence {
|
||||
impl RevisionDiskCache for SQLitePersistence {
|
||||
type Error = FlowyError;
|
||||
|
||||
fn write_revision_records(
|
||||
fn create_revision_records(
|
||||
&self,
|
||||
revision_records: Vec<RevisionRecord>,
|
||||
conn: &SqliteConnection,
|
||||
@ -72,11 +72,16 @@ impl RevisionDiskCache for SQLitePersistence {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn reset_object(&self, object_id: &str, revision_records: Vec<RevisionRecord>) -> Result<(), Self::Error> {
|
||||
fn delete_and_insert_records(
|
||||
&self,
|
||||
object_id: &str,
|
||||
deleted_rev_ids: Option<Vec<i64>>,
|
||||
inserted_records: Vec<RevisionRecord>,
|
||||
) -> Result<(), Self::Error> {
|
||||
let conn = self.pool.get().map_err(internal_error)?;
|
||||
conn.immediate_transaction::<_, FlowyError, _>(|| {
|
||||
let _ = self.delete_revision_records(object_id, None, &*conn)?;
|
||||
let _ = self.write_revision_records(revision_records, &*conn)?;
|
||||
let _ = self.delete_revision_records(object_id, deleted_rev_ids, &*conn)?;
|
||||
let _ = self.create_revision_records(inserted_records, &*conn)?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
@ -96,6 +101,7 @@ pub struct RevisionTableSql {}
|
||||
impl RevisionTableSql {
|
||||
pub(crate) fn create(revision_records: Vec<RevisionRecord>, conn: &SqliteConnection) -> Result<(), FlowyError> {
|
||||
// Batch insert: https://diesel.rs/guides/all-about-inserts.html
|
||||
|
||||
let records = revision_records
|
||||
.into_iter()
|
||||
.map(|record| {
|
||||
@ -172,7 +178,8 @@ impl RevisionTableSql {
|
||||
rev_ids: Option<Vec<i64>>,
|
||||
conn: &SqliteConnection,
|
||||
) -> Result<(), FlowyError> {
|
||||
let mut sql = dsl::rev_table.filter(dsl::doc_id.eq(object_id)).into_boxed();
|
||||
let filter = dsl::rev_table.filter(dsl::doc_id.eq(object_id));
|
||||
let mut sql = diesel::delete(filter).into_boxed();
|
||||
if let Some(rev_ids) = rev_ids {
|
||||
sql = sql.filter(dsl::rev_id.eq_any(rev_ids));
|
||||
}
|
||||
|
38
frontend/rust-lib/flowy-sync/src/cache/memory.rs
vendored
38
frontend/rust-lib/flowy-sync/src/cache/memory.rs
vendored
@ -40,19 +40,12 @@ impl RevisionMemoryCache {
|
||||
};
|
||||
|
||||
let rev_id = record.revision.rev_id;
|
||||
if self.revs_map.contains_key(&rev_id) {
|
||||
return;
|
||||
}
|
||||
|
||||
if let Some(rev_id) = self.pending_write_revs.read().await.last() {
|
||||
if *rev_id >= record.revision.rev_id {
|
||||
tracing::error!("Duplicated revision added to memory_cache");
|
||||
return;
|
||||
}
|
||||
}
|
||||
self.revs_map.insert(rev_id, record);
|
||||
self.pending_write_revs.write().await.push(rev_id);
|
||||
self.make_checkpoint().await;
|
||||
|
||||
if !self.pending_write_revs.read().await.contains(&rev_id) {
|
||||
self.pending_write_revs.write().await.push(rev_id);
|
||||
self.make_checkpoint().await;
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn ack(&self, rev_id: &i64) {
|
||||
@ -61,12 +54,12 @@ impl RevisionMemoryCache {
|
||||
Some(mut record) => record.ack(),
|
||||
}
|
||||
|
||||
if !self.pending_write_revs.read().await.contains(rev_id) {
|
||||
if self.pending_write_revs.read().await.contains(rev_id) {
|
||||
self.make_checkpoint().await;
|
||||
} else {
|
||||
// The revision must be saved on disk if the pending_write_revs
|
||||
// doesn't contains the rev_id.
|
||||
self.delegate.receive_ack(&self.object_id, *rev_id);
|
||||
} else {
|
||||
self.make_checkpoint().await;
|
||||
}
|
||||
}
|
||||
|
||||
@ -74,6 +67,16 @@ impl RevisionMemoryCache {
|
||||
self.revs_map.get(rev_id).map(|r| r.value().clone())
|
||||
}
|
||||
|
||||
pub(crate) fn remove(&self, rev_id: &i64) {
|
||||
let _ = self.revs_map.remove(rev_id);
|
||||
}
|
||||
|
||||
pub(crate) fn remove_with_range(&self, range: &RevisionRange) {
|
||||
for rev_id in range.iter() {
|
||||
self.remove(&rev_id);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn get_with_range(&self, range: &RevisionRange) -> Result<Vec<RevisionRecord>, FlowyError> {
|
||||
let revs = range
|
||||
.iter()
|
||||
@ -82,7 +85,7 @@ impl RevisionMemoryCache {
|
||||
Ok(revs)
|
||||
}
|
||||
|
||||
pub(crate) async fn reset_with_revisions(&self, revision_records: &[RevisionRecord]) -> FlowyResult<()> {
|
||||
pub(crate) async fn reset_with_revisions(&self, revision_records: Vec<RevisionRecord>) {
|
||||
self.revs_map.clear();
|
||||
if let Some(handler) = self.defer_save.write().await.take() {
|
||||
handler.abort();
|
||||
@ -91,13 +94,12 @@ impl RevisionMemoryCache {
|
||||
let mut write_guard = self.pending_write_revs.write().await;
|
||||
write_guard.clear();
|
||||
for record in revision_records {
|
||||
self.revs_map.insert(record.revision.rev_id, record.clone());
|
||||
write_guard.push(record.revision.rev_id);
|
||||
self.revs_map.insert(record.revision.rev_id, record);
|
||||
}
|
||||
drop(write_guard);
|
||||
|
||||
self.make_checkpoint().await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn make_checkpoint(&self) {
|
||||
|
60
frontend/rust-lib/flowy-sync/src/cache/mod.rs
vendored
60
frontend/rust-lib/flowy-sync/src/cache/mod.rs
vendored
@ -5,10 +5,11 @@ use crate::cache::{
|
||||
disk::{RevisionChangeset, RevisionDiskCache, RevisionTableState, SQLitePersistence},
|
||||
memory::{RevisionMemoryCache, RevisionMemoryCacheDelegate},
|
||||
};
|
||||
use crate::RevisionCompact;
|
||||
|
||||
use flowy_collaboration::entities::revision::{Revision, RevisionRange, RevisionState};
|
||||
use flowy_database::ConnectionPool;
|
||||
use flowy_error::{internal_error, FlowyError, FlowyResult};
|
||||
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
sync::{
|
||||
@ -26,14 +27,6 @@ pub struct RevisionCache {
|
||||
memory_cache: Arc<RevisionMemoryCache>,
|
||||
latest_rev_id: AtomicI64,
|
||||
}
|
||||
|
||||
pub fn mk_revision_disk_cache(
|
||||
user_id: &str,
|
||||
pool: Arc<ConnectionPool>,
|
||||
) -> Arc<dyn RevisionDiskCache<Error = FlowyError>> {
|
||||
Arc::new(SQLitePersistence::new(user_id, pool))
|
||||
}
|
||||
|
||||
impl RevisionCache {
|
||||
pub fn new(user_id: &str, object_id: &str, pool: Arc<ConnectionPool>) -> RevisionCache {
|
||||
let disk_cache = Arc::new(SQLitePersistence::new(user_id, pool));
|
||||
@ -51,7 +44,6 @@ impl RevisionCache {
|
||||
if self.memory_cache.contains(&revision.rev_id) {
|
||||
return Err(FlowyError::internal().context(format!("Duplicate revision: {} {:?}", revision.rev_id, state)));
|
||||
}
|
||||
let state = state.as_ref().clone();
|
||||
let rev_id = revision.rev_id;
|
||||
let record = RevisionRecord {
|
||||
revision,
|
||||
@ -64,6 +56,24 @@ impl RevisionCache {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn compact(&self, range: &RevisionRange, new_revision: Revision) -> FlowyResult<()> {
|
||||
self.memory_cache.remove_with_range(range);
|
||||
let rev_id = new_revision.rev_id;
|
||||
let record = RevisionRecord {
|
||||
revision: new_revision,
|
||||
state: RevisionState::Sync,
|
||||
write_to_disk: true,
|
||||
};
|
||||
|
||||
let rev_ids = range.to_rev_ids();
|
||||
let _ = self
|
||||
.disk_cache
|
||||
.delete_and_insert_records(&self.object_id, Some(rev_ids), vec![record.clone()])?;
|
||||
self.memory_cache.add(Cow::Owned(record)).await;
|
||||
self.set_latest_rev_id(rev_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn ack(&self, rev_id: i64) {
|
||||
self.memory_cache.ack(&rev_id).await;
|
||||
}
|
||||
@ -93,7 +103,8 @@ impl RevisionCache {
|
||||
}
|
||||
|
||||
// Read the revision which rev_id >= range.start && rev_id <= range.end
|
||||
pub async fn revisions_in_range(&self, range: RevisionRange) -> FlowyResult<Vec<Revision>> {
|
||||
pub async fn revisions_in_range(&self, range: &RevisionRange) -> FlowyResult<Vec<Revision>> {
|
||||
let range = range.clone();
|
||||
let mut records = self.memory_cache.get_with_range(&range).await?;
|
||||
let range_len = range.len() as usize;
|
||||
if records.len() != range_len {
|
||||
@ -104,6 +115,11 @@ impl RevisionCache {
|
||||
.map_err(internal_error)??;
|
||||
|
||||
if records.len() != range_len {
|
||||
// #[cfg(debug_assertions)]
|
||||
// records.iter().for_each(|record| {
|
||||
// let delta = PlainDelta::from_bytes(&record.revision.delta_data).unwrap();
|
||||
// tracing::trace!("{}", delta.to_string());
|
||||
// });
|
||||
tracing::error!("Revisions len is not equal to range required");
|
||||
}
|
||||
}
|
||||
@ -113,9 +129,9 @@ impl RevisionCache {
|
||||
.collect::<Vec<Revision>>())
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self, revisions))]
|
||||
#[tracing::instrument(level = "debug", skip(self, revisions), err)]
|
||||
pub async fn reset_with_revisions(&self, object_id: &str, revisions: Vec<Revision>) -> FlowyResult<()> {
|
||||
let revision_records = revisions
|
||||
let records = revisions
|
||||
.to_vec()
|
||||
.into_iter()
|
||||
.map(|revision| RevisionRecord {
|
||||
@ -125,8 +141,11 @@ impl RevisionCache {
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let _ = self.memory_cache.reset_with_revisions(&revision_records).await?;
|
||||
let _ = self.disk_cache.reset_object(object_id, revision_records)?;
|
||||
let _ = self
|
||||
.disk_cache
|
||||
.delete_and_insert_records(object_id, None, records.clone())?;
|
||||
let _ = self.memory_cache.reset_with_revisions(records).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -136,6 +155,13 @@ impl RevisionCache {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn mk_revision_disk_cache(
|
||||
user_id: &str,
|
||||
pool: Arc<ConnectionPool>,
|
||||
) -> Arc<dyn RevisionDiskCache<Error = FlowyError>> {
|
||||
Arc::new(SQLitePersistence::new(user_id, pool))
|
||||
}
|
||||
|
||||
impl RevisionMemoryCacheDelegate for Arc<SQLitePersistence> {
|
||||
#[tracing::instrument(level = "trace", skip(self, records), fields(checkpoint_result), err)]
|
||||
fn checkpoint_tick(&self, mut records: Vec<RevisionRecord>) -> FlowyResult<()> {
|
||||
@ -146,7 +172,7 @@ impl RevisionMemoryCacheDelegate for Arc<SQLitePersistence> {
|
||||
"checkpoint_result",
|
||||
&format!("{} records were saved", records.len()).as_str(),
|
||||
);
|
||||
let _ = self.write_revision_records(records, conn)?;
|
||||
let _ = self.create_revision_records(records, conn)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -164,7 +190,7 @@ impl RevisionMemoryCacheDelegate for Arc<SQLitePersistence> {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RevisionRecord {
|
||||
pub revision: Revision,
|
||||
pub state: RevisionState,
|
||||
|
@ -1,12 +1,11 @@
|
||||
use crate::RevisionCache;
|
||||
|
||||
use flowy_collaboration::{
|
||||
entities::revision::{RepeatedRevision, Revision, RevisionRange, RevisionState},
|
||||
util::{pair_rev_id_from_revisions, RevIdCounter},
|
||||
};
|
||||
use flowy_error::{FlowyError, FlowyResult};
|
||||
use lib_infra::future::FutureResult;
|
||||
use lib_ot::core::Attributes;
|
||||
|
||||
use std::{collections::VecDeque, sync::Arc};
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
@ -36,7 +35,11 @@ pub struct RevisionManager {
|
||||
impl RevisionManager {
|
||||
pub fn new(user_id: &str, object_id: &str, revision_cache: Arc<RevisionCache>) -> Self {
|
||||
let rev_id_counter = RevIdCounter::new(0);
|
||||
let cache = Arc::new(RwLock::new(RevisionCacheCompact::new(object_id, revision_cache)));
|
||||
let cache = Arc::new(RwLock::new(RevisionCacheCompact::new(
|
||||
object_id,
|
||||
user_id,
|
||||
revision_cache,
|
||||
)));
|
||||
#[cfg(feature = "flowy_unit_test")]
|
||||
let (revision_ack_notifier, _) = tokio::sync::broadcast::channel(1);
|
||||
|
||||
@ -71,7 +74,9 @@ impl RevisionManager {
|
||||
#[tracing::instrument(level = "debug", skip(self, revisions), err)]
|
||||
pub async fn reset_object(&self, revisions: RepeatedRevision) -> FlowyResult<()> {
|
||||
let rev_id = pair_rev_id_from_revisions(&revisions).1;
|
||||
let _ = self.cache.write().await.reset(revisions.into_inner()).await?;
|
||||
|
||||
let write_guard = self.cache.write().await;
|
||||
let _ = write_guard.reset(revisions.into_inner()).await?;
|
||||
self.rev_id_counter.set(rev_id);
|
||||
Ok(())
|
||||
}
|
||||
@ -81,7 +86,9 @@ impl RevisionManager {
|
||||
if revision.delta_data.is_empty() {
|
||||
return Err(FlowyError::internal().context("Delta data should be empty"));
|
||||
}
|
||||
self.cache.read().await.add_ack_revision(revision).await?;
|
||||
|
||||
let write_guard = self.cache.write().await;
|
||||
let _ = write_guard.add_ack_revision(revision).await?;
|
||||
self.rev_id_counter.set(revision.rev_id);
|
||||
Ok(())
|
||||
}
|
||||
@ -94,7 +101,10 @@ impl RevisionManager {
|
||||
if revision.delta_data.is_empty() {
|
||||
return Err(FlowyError::internal().context("Delta data should be empty"));
|
||||
}
|
||||
self.cache.write().await.add_sync_revision::<C>(revision, true).await?;
|
||||
let mut write_guard = self.cache.write().await;
|
||||
let rev_id = write_guard.write_sync_revision::<C>(revision).await?;
|
||||
|
||||
self.rev_id_counter.set(rev_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -118,7 +128,7 @@ impl RevisionManager {
|
||||
}
|
||||
|
||||
pub async fn get_revisions_in_range(&self, range: RevisionRange) -> Result<Vec<Revision>, FlowyError> {
|
||||
let revisions = self.cache.read().await.revisions_in_range(range.clone()).await?;
|
||||
let revisions = self.cache.read().await.revisions_in_range(&range).await?;
|
||||
Ok(revisions)
|
||||
}
|
||||
|
||||
@ -143,16 +153,19 @@ impl RevisionManager {
|
||||
|
||||
struct RevisionCacheCompact {
|
||||
object_id: String,
|
||||
user_id: String,
|
||||
inner: Arc<RevisionCache>,
|
||||
sync_seq: RevisionSyncSequence,
|
||||
}
|
||||
|
||||
impl RevisionCacheCompact {
|
||||
fn new(object_id: &str, inner: Arc<RevisionCache>) -> Self {
|
||||
fn new(object_id: &str, user_id: &str, inner: Arc<RevisionCache>) -> Self {
|
||||
let sync_seq = RevisionSyncSequence::new();
|
||||
let object_id = object_id.to_owned();
|
||||
let user_id = user_id.to_owned();
|
||||
Self {
|
||||
object_id,
|
||||
user_id,
|
||||
inner,
|
||||
sync_seq,
|
||||
}
|
||||
@ -162,23 +175,49 @@ impl RevisionCacheCompact {
|
||||
self.inner.add(revision.clone(), RevisionState::Ack, true).await
|
||||
}
|
||||
|
||||
async fn add_sync_revision<C>(&mut self, revision: &Revision, write_to_disk: bool) -> FlowyResult<()>
|
||||
async fn add_sync_revision(&mut self, revision: &Revision) -> FlowyResult<()> {
|
||||
self.inner.add(revision.clone(), RevisionState::Sync, false).await?;
|
||||
self.sync_seq.add(revision.rev_id)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip(self, revision), fields(rev_id, compact_range), err)]
|
||||
async fn write_sync_revision<C>(&mut self, revision: &Revision) -> FlowyResult<i64>
|
||||
where
|
||||
C: RevisionCompact,
|
||||
{
|
||||
// match self.sync_seq.remaining_rev_ids() {
|
||||
// None => {}
|
||||
// Some(range) => {
|
||||
// let revisions = self.inner.revisions_in_range(range).await?;
|
||||
// let compact_revision = C::compact_revisions("", "", revisions)?;
|
||||
// }
|
||||
// }
|
||||
match self.sync_seq.compact() {
|
||||
None => {
|
||||
tracing::Span::current().record("rev_id", &revision.rev_id);
|
||||
self.inner.add(revision.clone(), RevisionState::Sync, true).await?;
|
||||
self.sync_seq.add(revision.rev_id)?;
|
||||
Ok(revision.rev_id)
|
||||
}
|
||||
Some((range, mut compact_seq)) => {
|
||||
tracing::Span::current().record("compact_range", &format!("{}", range).as_str());
|
||||
let mut revisions = self.inner.revisions_in_range(&range).await?;
|
||||
if range.to_rev_ids().len() != revisions.len() {
|
||||
debug_assert_eq!(range.to_rev_ids().len(), revisions.len());
|
||||
}
|
||||
|
||||
self.inner
|
||||
.add(revision.clone(), RevisionState::Sync, write_to_disk)
|
||||
.await?;
|
||||
self.sync_seq.add_record(revision.rev_id)?;
|
||||
Ok(())
|
||||
// append the new revision
|
||||
revisions.push(revision.clone());
|
||||
|
||||
// compact multiple revisions into one
|
||||
let compact_revision = C::compact_revisions(&self.user_id, &self.object_id, revisions)?;
|
||||
let rev_id = compact_revision.rev_id;
|
||||
tracing::Span::current().record("rev_id", &rev_id);
|
||||
|
||||
// insert new revision
|
||||
compact_seq.push_back(rev_id);
|
||||
|
||||
// replace the revisions in range with compact revision
|
||||
self.inner.compact(&range, compact_revision).await?;
|
||||
debug_assert_eq!(self.sync_seq.len(), compact_seq.len());
|
||||
self.sync_seq.reset(compact_seq);
|
||||
Ok(rev_id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn ack_revision(&mut self, rev_id: i64) -> FlowyResult<()> {
|
||||
@ -189,9 +228,13 @@ impl RevisionCacheCompact {
|
||||
}
|
||||
|
||||
async fn next_sync_revision(&self) -> FlowyResult<Option<Revision>> {
|
||||
match self.sync_seq.next_rev_id() {
|
||||
None => Ok(None),
|
||||
Some(rev_id) => Ok(self.inner.get(rev_id).await.map(|record| record.revision)),
|
||||
if cfg!(feature = "flowy_unit_test") {
|
||||
match self.sync_seq.next_rev_id() {
|
||||
None => Ok(None),
|
||||
Some(rev_id) => Ok(self.inner.get(rev_id).await.map(|record| record.revision)),
|
||||
}
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
@ -216,7 +259,7 @@ impl RevisionSyncSequence {
|
||||
RevisionSyncSequence::default()
|
||||
}
|
||||
|
||||
fn add_record(&mut self, new_rev_id: i64) -> FlowyResult<()> {
|
||||
fn add(&mut self, new_rev_id: i64) -> FlowyResult<()> {
|
||||
// The last revision's rev_id must be greater than the new one.
|
||||
if let Some(rev_id) = self.0.back() {
|
||||
if *rev_id >= new_rev_id {
|
||||
@ -248,16 +291,24 @@ impl RevisionSyncSequence {
|
||||
self.0.front().cloned()
|
||||
}
|
||||
|
||||
fn remaining_rev_ids(&self) -> Option<RevisionRange> {
|
||||
if self.next_rev_id().is_some() {
|
||||
let mut seq = self.0.clone();
|
||||
let mut drained = seq.drain(1..).collect::<VecDeque<_>>();
|
||||
let start = drained.pop_front()?;
|
||||
let end = drained.pop_back().unwrap_or_else(|| start);
|
||||
Some(RevisionRange { start, end })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
fn reset(&mut self, new_seq: VecDeque<i64>) {
|
||||
self.0 = new_seq;
|
||||
}
|
||||
|
||||
fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
// Compact the rev_ids into one except the current synchronizing rev_id.
|
||||
fn compact(&self) -> Option<(RevisionRange, VecDeque<i64>)> {
|
||||
self.next_rev_id()?;
|
||||
|
||||
let mut new_seq = self.0.clone();
|
||||
let mut drained = new_seq.drain(1..).collect::<VecDeque<_>>();
|
||||
|
||||
let start = drained.pop_front()?;
|
||||
let end = drained.pop_back().unwrap_or(start);
|
||||
Some((RevisionRange { start, end }, new_seq))
|
||||
}
|
||||
}
|
||||
|
||||
@ -288,12 +339,7 @@ impl RevisionLoader {
|
||||
rev_id = record.revision.rev_id;
|
||||
if record.state == RevisionState::Sync {
|
||||
// Sync the records if their state is RevisionState::Sync.
|
||||
let _ = self
|
||||
.cache
|
||||
.write()
|
||||
.await
|
||||
.add_sync_revision::<C>(&record.revision, false)
|
||||
.await?;
|
||||
let _ = self.cache.write().await.add_sync_revision(&record.revision).await?;
|
||||
}
|
||||
}
|
||||
revisions = records.into_iter().map(|record| record.revision).collect::<_>();
|
||||
|
@ -287,23 +287,26 @@ impl RevisionWSSink {
|
||||
}
|
||||
|
||||
async fn send_next_revision(&self) -> FlowyResult<()> {
|
||||
if cfg!(feature = "flowy_unit_test") {
|
||||
match self.provider.next().await? {
|
||||
None => {
|
||||
tracing::trace!("[{}]: Finish synchronizing revisions", self);
|
||||
Ok(())
|
||||
}
|
||||
Some(data) => {
|
||||
tracing::trace!("[{}]: send {}:{}-{:?}", self, data.object_id, data.id(), data.ty);
|
||||
self.ws_sender.send(data).await
|
||||
}
|
||||
match self.provider.next().await? {
|
||||
None => {
|
||||
tracing::trace!("[{}]: Finish synchronizing revisions", self);
|
||||
Ok(())
|
||||
}
|
||||
Some(data) => {
|
||||
tracing::trace!("[{}]: send {}:{}-{:?}", self, data.object_id, data.id(), data.ty);
|
||||
self.ws_sender.send(data).await
|
||||
}
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn tick(sender: mpsc::Sender<()>, duration: Duration) {
|
||||
let mut interval = interval(duration);
|
||||
while sender.send(()).await.is_ok() {
|
||||
interval.tick().await;
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for RevisionWSSink {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_fmt(format_args!("{}RevisionWSSink", self.object_name))
|
||||
@ -316,13 +319,6 @@ impl std::ops::Drop for RevisionWSSink {
|
||||
}
|
||||
}
|
||||
|
||||
async fn tick(sender: mpsc::Sender<()>, duration: Duration) {
|
||||
let mut interval = interval(duration);
|
||||
while sender.send(()).await.is_ok() {
|
||||
interval.tick().await;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
enum Source {
|
||||
Custom,
|
||||
|
@ -6,7 +6,7 @@ use crate::{
|
||||
errors::{CollaborateError, CollaborateResult},
|
||||
};
|
||||
use flowy_core_data_model::entities::{trash::Trash, workspace::Workspace};
|
||||
use lib_ot::core::{OperationTransformable, PlainDelta, PlainDeltaBuilder, PlainTextAttributes};
|
||||
use lib_ot::core::{PlainAttributes, PlainDelta, PlainDeltaBuilder};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
|
||||
@ -47,7 +47,7 @@ impl FolderPadBuilder {
|
||||
}
|
||||
|
||||
pub(crate) fn build_with_revisions(self, revisions: Vec<Revision>) -> CollaborateResult<FolderPad> {
|
||||
let folder_delta: FolderDelta = make_delta_from_revisions::<PlainTextAttributes>(revisions)?;
|
||||
let folder_delta: FolderDelta = make_delta_from_revisions::<PlainAttributes>(revisions)?;
|
||||
self.build_with_delta(folder_delta)
|
||||
}
|
||||
|
||||
|
@ -8,7 +8,7 @@ use crate::{
|
||||
};
|
||||
use dissimilar::*;
|
||||
use flowy_core_data_model::entities::{app::App, trash::Trash, view::View, workspace::Workspace};
|
||||
use lib_ot::core::{Delta, FlowyStr, OperationTransformable, PlainDeltaBuilder, PlainTextAttributes};
|
||||
use lib_ot::core::{Delta, FlowyStr, OperationTransformable, PlainAttributes, PlainDeltaBuilder};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
|
||||
@ -285,6 +285,11 @@ impl FolderPad {
|
||||
pub fn md5(&self) -> String {
|
||||
md5(&self.root.to_bytes())
|
||||
}
|
||||
|
||||
pub fn to_json(&self) -> CollaborateResult<String> {
|
||||
serde_json::to_string(self)
|
||||
.map_err(|e| CollaborateError::internal().context(format!("serial trash to json failed: {}", e)))
|
||||
}
|
||||
}
|
||||
|
||||
impl FolderPad {
|
||||
@ -372,14 +377,9 @@ impl FolderPad {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn to_json(&self) -> CollaborateResult<String> {
|
||||
serde_json::to_string(self)
|
||||
.map_err(|e| CollaborateError::internal().context(format!("serial trash to json failed: {}", e)))
|
||||
}
|
||||
}
|
||||
|
||||
fn cal_diff(old: String, new: String) -> Delta<PlainTextAttributes> {
|
||||
fn cal_diff(old: String, new: String) -> Delta<PlainAttributes> {
|
||||
let chunks = dissimilar::diff(&old, &new);
|
||||
let mut delta_builder = PlainDeltaBuilder::new();
|
||||
for chunk in &chunks {
|
||||
|
@ -200,9 +200,13 @@ impl RevisionRange {
|
||||
}
|
||||
|
||||
pub fn iter(&self) -> RangeInclusive<i64> {
|
||||
debug_assert!(self.start != self.end);
|
||||
// debug_assert!(self.start != self.end);
|
||||
RangeInclusive::new(self.start, self.end)
|
||||
}
|
||||
|
||||
pub fn to_rev_ids(&self) -> Vec<i64> {
|
||||
self.iter().collect::<Vec<_>>()
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -12,7 +12,7 @@ use crate::{
|
||||
use async_stream::stream;
|
||||
use futures::stream::StreamExt;
|
||||
use lib_infra::future::BoxResultFuture;
|
||||
use lib_ot::core::PlainTextAttributes;
|
||||
use lib_ot::core::PlainAttributes;
|
||||
use std::{collections::HashMap, fmt::Debug, sync::Arc};
|
||||
use tokio::{
|
||||
sync::{mpsc, oneshot, RwLock},
|
||||
@ -187,7 +187,7 @@ impl ServerFolderManager {
|
||||
}
|
||||
}
|
||||
|
||||
type FolderRevisionSynchronizer = RevisionSynchronizer<PlainTextAttributes>;
|
||||
type FolderRevisionSynchronizer = RevisionSynchronizer<PlainAttributes>;
|
||||
|
||||
struct OpenFolderHandler {
|
||||
folder_id: String,
|
||||
|
@ -1,5 +1,5 @@
|
||||
use crate::{entities::folder_info::FolderDelta, errors::CollaborateError, synchronizer::RevisionSyncObject};
|
||||
use lib_ot::core::{Delta, OperationTransformable, PlainTextAttributes};
|
||||
use lib_ot::core::{Delta, OperationTransformable, PlainAttributes};
|
||||
|
||||
pub struct ServerFolder {
|
||||
folder_id: String,
|
||||
@ -15,12 +15,12 @@ impl ServerFolder {
|
||||
}
|
||||
}
|
||||
|
||||
impl RevisionSyncObject<PlainTextAttributes> for ServerFolder {
|
||||
impl RevisionSyncObject<PlainAttributes> for ServerFolder {
|
||||
fn id(&self) -> &str {
|
||||
&self.folder_id
|
||||
}
|
||||
|
||||
fn compose(&mut self, other: &Delta<PlainTextAttributes>) -> Result<(), CollaborateError> {
|
||||
fn compose(&mut self, other: &Delta<PlainAttributes>) -> Result<(), CollaborateError> {
|
||||
let new_delta = self.delta.compose(other)?;
|
||||
self.delta = new_delta;
|
||||
Ok(())
|
||||
@ -28,8 +28,8 @@ impl RevisionSyncObject<PlainTextAttributes> for ServerFolder {
|
||||
|
||||
fn transform(
|
||||
&self,
|
||||
other: &Delta<PlainTextAttributes>,
|
||||
) -> Result<(Delta<PlainTextAttributes>, Delta<PlainTextAttributes>), CollaborateError> {
|
||||
other: &Delta<PlainAttributes>,
|
||||
) -> Result<(Delta<PlainAttributes>, Delta<PlainAttributes>), CollaborateError> {
|
||||
let value = self.delta.transform(other)?;
|
||||
Ok(value)
|
||||
}
|
||||
@ -38,7 +38,7 @@ impl RevisionSyncObject<PlainTextAttributes> for ServerFolder {
|
||||
self.delta.to_json()
|
||||
}
|
||||
|
||||
fn set_delta(&mut self, new_delta: Delta<PlainTextAttributes>) {
|
||||
fn set_delta(&mut self, new_delta: Delta<PlainAttributes>) {
|
||||
self.delta = new_delta;
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
use crate::core::{trim, Attributes, Delta, PlainTextAttributes};
|
||||
use crate::core::{trim, Attributes, Delta, PlainAttributes};
|
||||
|
||||
pub type PlainDeltaBuilder = DeltaBuilder<PlainTextAttributes>;
|
||||
pub type PlainDeltaBuilder = DeltaBuilder<PlainAttributes>;
|
||||
|
||||
pub struct DeltaBuilder<T: Attributes> {
|
||||
delta: Delta<T>,
|
||||
|
@ -13,7 +13,7 @@ use std::{
|
||||
str::FromStr,
|
||||
};
|
||||
|
||||
pub type PlainDelta = Delta<PlainTextAttributes>;
|
||||
pub type PlainDelta = Delta<PlainAttributes>;
|
||||
|
||||
// TODO: optimize the memory usage with Arc::make_mut or Cow
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
|
@ -1,10 +1,10 @@
|
||||
use crate::{
|
||||
core::{Attributes, Operation, PlainTextAttributes},
|
||||
core::{Attributes, Operation, PlainAttributes},
|
||||
rich_text::RichTextAttributes,
|
||||
};
|
||||
|
||||
pub type RichTextOpBuilder = OpBuilder<RichTextAttributes>;
|
||||
pub type PlainTextOpBuilder = OpBuilder<PlainTextAttributes>;
|
||||
pub type PlainTextOpBuilder = OpBuilder<PlainAttributes>;
|
||||
|
||||
pub struct OpBuilder<T: Attributes> {
|
||||
ty: Operation<T>,
|
||||
|
@ -339,14 +339,14 @@ where
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq, Default, Serialize, Deserialize)]
|
||||
pub struct PlainTextAttributes();
|
||||
impl fmt::Display for PlainTextAttributes {
|
||||
pub struct PlainAttributes();
|
||||
impl fmt::Display for PlainAttributes {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.write_str("PlainTextAttributes")
|
||||
f.write_str("PlainAttributes")
|
||||
}
|
||||
}
|
||||
|
||||
impl Attributes for PlainTextAttributes {
|
||||
impl Attributes for PlainAttributes {
|
||||
fn is_empty(&self) -> bool {
|
||||
true
|
||||
}
|
||||
@ -356,7 +356,7 @@ impl Attributes for PlainTextAttributes {
|
||||
fn extend_other(&mut self, _other: Self) {}
|
||||
}
|
||||
|
||||
impl OperationTransformable for PlainTextAttributes {
|
||||
impl OperationTransformable for PlainAttributes {
|
||||
fn compose(&self, _other: &Self) -> Result<Self, OTError> {
|
||||
Ok(self.clone())
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user