Feat/restore revision (#1549)

* chore: write snapshot

* chore: add tests

* chore: sync close

* chore: restore from snapshot

* chore: delete invalid revisions after restored from snapshot

* chore: create default view if it fail to deserialize view's revisions when there is no snapshot

* chore: auto generate snapshot

Co-authored-by: nathan <nathan@appflowy.io>
This commit is contained in:
Nathan.fooo 2022-12-09 09:19:47 +08:00 committed by GitHub
parent a507fb8ec6
commit 8c225fe547
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
67 changed files with 1140 additions and 582 deletions

View File

@ -18,8 +18,10 @@ class TextCellBloc extends Bloc<TextCellEvent, TextCellState> {
_startListening(); _startListening();
}, },
updateText: (text) { updateText: (text) {
cellController.saveCellData(text); if (state.content != text) {
emit(state.copyWith(content: text)); cellController.saveCellData(text);
emit(state.copyWith(content: text));
}
}, },
didReceiveCellUpdate: (content) { didReceiveCellUpdate: (content) {
emit(state.copyWith(content: content)); emit(state.copyWith(content: content));

View File

@ -65,10 +65,6 @@ class _GridTextCellState extends GridFocusNodeCellState<GridTextCell> {
child: TextField( child: TextField(
controller: _controller, controller: _controller,
focusNode: focusNode, focusNode: focusNode,
onSubmitted: (text) => _cellBloc.add(
TextCellEvent.updateText(text),
),
onEditingComplete: () => focusNode.unfocus(),
maxLines: null, maxLines: null,
style: Theme.of(context).textTheme.bodyMedium, style: Theme.of(context).textTheme.bodyMedium,
decoration: InputDecoration( decoration: InputDecoration(
@ -99,4 +95,12 @@ class _GridTextCellState extends GridFocusNodeCellState<GridTextCell> {
void onInsert(String value) { void onInsert(String value) {
_cellBloc.add(TextCellEvent.updateText(value)); _cellBloc.add(TextCellEvent.updateText(value));
} }
@override
Future<void> focusChanged() {
_cellBloc.add(
TextCellEvent.updateText(_controller.text),
);
return super.focusChanged();
}
} }

View File

@ -133,35 +133,35 @@ void main() {
workspaceSetting.latestView.id == document1.id; workspaceSetting.latestView.id == document1.id;
}); });
test('open latest grid test', () async { test('open latest document test', () async {
final app = await testContext.createTestApp(); final app = await testContext.createTestApp();
final bloc = AppBloc(app: app)..add(const AppEvent.initial()); final bloc = AppBloc(app: app)..add(const AppEvent.initial());
await blocResponseFuture(); await blocResponseFuture();
bloc.add(AppEvent.createView("grid 1", GridPluginBuilder())); bloc.add(AppEvent.createView("document 1", DocumentPluginBuilder()));
await blocResponseFuture(); await blocResponseFuture();
final grid1 = bloc.state.latestCreatedView; final document = bloc.state.latestCreatedView;
assert(grid1!.name == "grid 1"); assert(document!.name == "document 1");
bloc.add(AppEvent.createView("grid 2", GridPluginBuilder())); bloc.add(AppEvent.createView("grid 2", GridPluginBuilder()));
await blocResponseFuture(); await blocResponseFuture();
final grid2 = bloc.state.latestCreatedView; final grid = bloc.state.latestCreatedView;
assert(grid2!.name == "grid 2"); assert(grid!.name == "grid 2");
var workspaceSetting = await FolderEventReadCurrentWorkspace() var workspaceSetting = await FolderEventReadCurrentWorkspace()
.send() .send()
.then((result) => result.fold((l) => l, (r) => throw Exception())); .then((result) => result.fold((l) => l, (r) => throw Exception()));
workspaceSetting.latestView.id == grid1!.id; workspaceSetting.latestView.id == grid!.id;
// Open grid 1 // Open grid 1
// ignore: unused_local_variable // ignore: unused_local_variable
final documentBloc = DocumentBloc(view: grid1) final documentBloc = DocumentBloc(view: document!)
..add(const DocumentEvent.initial()); ..add(const DocumentEvent.initial());
await blocResponseFuture(); await blocResponseFuture();
workspaceSetting = await FolderEventReadCurrentWorkspace() workspaceSetting = await FolderEventReadCurrentWorkspace()
.send() .send()
.then((result) => result.fold((l) => l, (r) => throw Exception())); .then((result) => result.fold((l) => l, (r) => throw Exception()));
workspaceSetting.latestView.id == grid1.id; workspaceSetting.latestView.id == document.id;
}); });
} }

View File

@ -84,6 +84,17 @@ dependencies = [
"syn", "syn",
] ]
[[package]]
name = "async-trait"
version = "0.1.59"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "31e6e93155431f3931513b243d371981bb2770112b370c82745a1d19d2f99364"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]] [[package]]
name = "atomic" name = "atomic"
version = "0.5.1" version = "0.5.1"
@ -1085,6 +1096,7 @@ dependencies = [
"flowy-error", "flowy-error",
"flowy-http-model", "flowy-http-model",
"flowy-revision", "flowy-revision",
"futures",
"futures-util", "futures-util",
"lib-infra", "lib-infra",
"lib-ws", "lib-ws",
@ -1768,6 +1780,7 @@ dependencies = [
name = "lib-infra" name = "lib-infra"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"async-trait",
"bytes", "bytes",
"chrono", "chrono",
"futures-core", "futures-core",
@ -2469,11 +2482,11 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
[[package]] [[package]]
name = "proc-macro2" name = "proc-macro2"
version = "1.0.36" version = "1.0.47"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725"
dependencies = [ dependencies = [
"unicode-xid", "unicode-ident",
] ]
[[package]] [[package]]
@ -3155,13 +3168,13 @@ dependencies = [
[[package]] [[package]]
name = "syn" name = "syn"
version = "1.0.85" version = "1.0.105"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a684ac3dcd8913827e18cd09a68384ee66c1de24157e3c556c9ab16d85695fb7" checksum = "60b9b43d45702de4c839cb9b51d9f529c5dd26a4aff255b42b1ebc03e88ee908"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
"unicode-xid", "unicode-ident",
] ]
[[package]] [[package]]
@ -3602,6 +3615,12 @@ version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a01404663e3db436ed2746d9fefef640d868edae3cceb81c3b8d5732fda678f" checksum = "1a01404663e3db436ed2746d9fefef640d868edae3cceb81c3b8d5732fda678f"
[[package]]
name = "unicode-ident"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3"
[[package]] [[package]]
name = "unicode-normalization" name = "unicode-normalization"
version = "0.1.19" version = "0.1.19"
@ -3623,12 +3642,6 @@ version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973"
[[package]]
name = "unicode-xid"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
[[package]] [[package]]
name = "url" name = "url"
version = "2.2.2" version = "2.2.2"

View File

@ -0,0 +1,2 @@
-- This file should undo anything in `up.sql`
DROP TABLE grid_rev_snapshot;

View File

@ -0,0 +1,9 @@
-- Your SQL goes here
CREATE TABLE grid_rev_snapshot (
snapshot_id TEXT NOT NULL PRIMARY KEY DEFAULT '',
object_id TEXT NOT NULL DEFAULT '',
rev_id BIGINT NOT NULL DEFAULT 0,
base_rev_id BIGINT NOT NULL DEFAULT 0,
timestamp BIGINT NOT NULL DEFAULT 0,
data BLOB NOT NULL DEFAULT (x'')
);

View File

@ -37,7 +37,7 @@ macro_rules! diesel_insert_table {
) => { ) => {
{ {
let _ = diesel::insert_into($table_name::table) let _ = diesel::insert_into($table_name::table)
.values($table.clone()) .values($table)
// .on_conflict($table_name::dsl::id) // .on_conflict($table_name::dsl::id)
// .do_update() // .do_update()
// .set(WorkspaceTableChangeset::from_table(workspace_table)) // .set(WorkspaceTableChangeset::from_table(workspace_table))
@ -61,6 +61,21 @@ macro_rules! diesel_record_count {
}; };
} }
#[macro_export]
macro_rules! diesel_revision_record_count {
(
$table_name:expr,
$filter:expr,
$connection:expr
) => {
$table_name
.filter($table_name::dsl::id.eq($id))
.count()
.get_result($connection)
.unwrap_or(0);
};
}
#[macro_export] #[macro_export]
macro_rules! diesel_update_table { macro_rules! diesel_update_table {
( (

View File

@ -1,4 +1,6 @@
table! { // @generated automatically by Diesel CLI.
diesel::table! {
app_table (id) { app_table (id) {
id -> Text, id -> Text,
workspace_id -> Text, workspace_id -> Text,
@ -13,7 +15,7 @@ table! {
} }
} }
table! { diesel::table! {
document_rev_table (id) { document_rev_table (id) {
id -> Integer, id -> Integer,
document_id -> Text, document_id -> Text,
@ -24,14 +26,14 @@ table! {
} }
} }
table! { diesel::table! {
grid_block_index_table (row_id) { grid_block_index_table (row_id) {
row_id -> Text, row_id -> Text,
block_id -> Text, block_id -> Text,
} }
} }
table! { diesel::table! {
grid_meta_rev_table (id) { grid_meta_rev_table (id) {
id -> Integer, id -> Integer,
object_id -> Text, object_id -> Text,
@ -42,7 +44,18 @@ table! {
} }
} }
table! { diesel::table! {
grid_rev_snapshot (snapshot_id) {
snapshot_id -> Text,
object_id -> Text,
rev_id -> BigInt,
base_rev_id -> BigInt,
timestamp -> BigInt,
data -> Binary,
}
}
diesel::table! {
grid_rev_table (id) { grid_rev_table (id) {
id -> Integer, id -> Integer,
object_id -> Text, object_id -> Text,
@ -53,7 +66,7 @@ table! {
} }
} }
table! { diesel::table! {
grid_view_rev_table (id) { grid_view_rev_table (id) {
id -> Integer, id -> Integer,
object_id -> Text, object_id -> Text,
@ -64,14 +77,14 @@ table! {
} }
} }
table! { diesel::table! {
kv_table (key) { kv_table (key) {
key -> Text, key -> Text,
value -> Binary, value -> Binary,
} }
} }
table! { diesel::table! {
rev_snapshot (id) { rev_snapshot (id) {
id -> Integer, id -> Integer,
object_id -> Text, object_id -> Text,
@ -80,7 +93,7 @@ table! {
} }
} }
table! { diesel::table! {
rev_table (id) { rev_table (id) {
id -> Integer, id -> Integer,
doc_id -> Text, doc_id -> Text,
@ -92,7 +105,7 @@ table! {
} }
} }
table! { diesel::table! {
trash_table (id) { trash_table (id) {
id -> Text, id -> Text,
name -> Text, name -> Text,
@ -103,7 +116,7 @@ table! {
} }
} }
table! { diesel::table! {
user_table (id) { user_table (id) {
id -> Text, id -> Text,
name -> Text, name -> Text,
@ -114,7 +127,7 @@ table! {
} }
} }
table! { diesel::table! {
view_table (id) { view_table (id) {
id -> Text, id -> Text,
belong_to_id -> Text, belong_to_id -> Text,
@ -130,7 +143,7 @@ table! {
} }
} }
table! { diesel::table! {
workspace_table (id) { workspace_table (id) {
id -> Text, id -> Text,
name -> Text, name -> Text,
@ -142,11 +155,12 @@ table! {
} }
} }
allow_tables_to_appear_in_same_query!( diesel::allow_tables_to_appear_in_same_query!(
app_table, app_table,
document_rev_table, document_rev_table,
grid_block_index_table, grid_block_index_table,
grid_meta_rev_table, grid_meta_rev_table,
grid_rev_snapshot,
grid_rev_table, grid_rev_table,
grid_view_rev_table, grid_view_rev_table,
kv_table, kv_table,

View File

@ -95,8 +95,8 @@ impl RevisionObjectSerializer for DocumentRevisionSerde {
} }
} }
pub(crate) struct DocumentRevisionCompress(); pub(crate) struct DocumentRevisionMergeable();
impl RevisionMergeable for DocumentRevisionCompress { impl RevisionMergeable for DocumentRevisionMergeable {
fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> { fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> {
DocumentRevisionSerde::combine_revisions(revisions) DocumentRevisionSerde::combine_revisions(revisions)
} }

View File

@ -8,6 +8,7 @@ use flowy_database::ConnectionPool;
use flowy_error::{internal_error, FlowyError, FlowyResult}; use flowy_error::{internal_error, FlowyError, FlowyResult};
use flowy_http_model::ws_data::ServerRevisionWSData; use flowy_http_model::ws_data::ServerRevisionWSData;
use flowy_revision::{RevisionCloudService, RevisionManager}; use flowy_revision::{RevisionCloudService, RevisionManager};
use lib_infra::async_trait::async_trait;
use lib_infra::future::FutureResult; use lib_infra::future::FutureResult;
use lib_ot::core::Transaction; use lib_ot::core::Transaction;
use lib_ws::WSConnectState; use lib_ws::WSConnectState;
@ -64,11 +65,15 @@ impl AppFlowyDocumentEditor {
} }
pub async fn duplicate_document(&self) -> FlowyResult<String> { pub async fn duplicate_document(&self) -> FlowyResult<String> {
let revisions = self.rev_manager.load_revisions().await?; let transaction = self.document_transaction().await?;
let transaction = make_transaction_from_revisions(&revisions)?;
let json = transaction.to_json()?; let json = transaction.to_json()?;
Ok(json) Ok(json)
} }
pub async fn document_transaction(&self) -> FlowyResult<Transaction> {
let revisions = self.rev_manager.load_revisions().await?;
make_transaction_from_revisions(&revisions)
}
} }
fn spawn_edit_queue( fn spawn_edit_queue(
@ -82,13 +87,12 @@ fn spawn_edit_queue(
sender sender
} }
#[async_trait]
impl DocumentEditor for Arc<AppFlowyDocumentEditor> { impl DocumentEditor for Arc<AppFlowyDocumentEditor> {
#[tracing::instrument(name = "close document editor", level = "trace", skip_all)] #[tracing::instrument(name = "close document editor", level = "trace", skip_all)]
fn close(&self) { async fn close(&self) {
let rev_manager = self.rev_manager.clone(); self.rev_manager.generate_snapshot().await;
tokio::spawn(async move { self.rev_manager.close().await;
rev_manager.close().await;
});
} }
fn export(&self) -> FutureResult<String, FlowyError> { fn export(&self) -> FutureResult<String, FlowyError> {

View File

@ -3,7 +3,7 @@ use crate::DocumentUser;
use async_stream::stream; use async_stream::stream;
use bytes::Bytes; use bytes::Bytes;
use flowy_error::FlowyError; use flowy_error::FlowyError;
use flowy_http_model::revision::{RevId, Revision}; use flowy_http_model::revision::RevId;
use flowy_revision::RevisionManager; use flowy_revision::RevisionManager;
use futures::stream::StreamExt; use futures::stream::StreamExt;
use lib_ot::core::Transaction; use lib_ot::core::Transaction;
@ -78,9 +78,7 @@ impl DocumentQueue {
#[tracing::instrument(level = "trace", skip(self, transaction, md5), err)] #[tracing::instrument(level = "trace", skip(self, transaction, md5), err)]
async fn save_local_operations(&self, transaction: Transaction, md5: String) -> Result<RevId, FlowyError> { async fn save_local_operations(&self, transaction: Transaction, md5: String) -> Result<RevId, FlowyError> {
let bytes = Bytes::from(transaction.to_bytes()?); let bytes = Bytes::from(transaction.to_bytes()?);
let (base_rev_id, rev_id) = self.rev_manager.next_rev_id_pair(); let rev_id = self.rev_manager.add_local_revision(bytes, md5).await?;
let revision = Revision::new(&self.rev_manager.object_id, base_rev_id, rev_id, bytes, md5);
let _ = self.rev_manager.add_local_revision(&revision).await?;
Ok(rev_id.into()) Ok(rev_id.into())
} }
} }

View File

@ -1,20 +1,20 @@
use crate::editor::{initial_document_content, AppFlowyDocumentEditor, DocumentRevisionCompress}; use crate::editor::{initial_document_content, AppFlowyDocumentEditor, DocumentRevisionMergeable};
use crate::entities::{DocumentVersionPB, EditParams}; use crate::entities::{DocumentVersionPB, EditParams};
use crate::old_editor::editor::{DeltaDocumentEditor, DeltaDocumentRevisionCompress}; use crate::old_editor::editor::{DeltaDocumentEditor, DeltaDocumentRevisionMergeable};
use crate::services::rev_sqlite::{SQLiteDeltaDocumentRevisionPersistence, SQLiteDocumentRevisionPersistence}; use crate::services::rev_sqlite::{SQLiteDeltaDocumentRevisionPersistence, SQLiteDocumentRevisionPersistence};
use crate::services::DocumentPersistence; use crate::services::DocumentPersistence;
use crate::{errors::FlowyError, DocumentCloudService}; use crate::{errors::FlowyError, DocumentCloudService};
use bytes::Bytes; use bytes::Bytes;
use flowy_database::ConnectionPool; use flowy_database::ConnectionPool;
use flowy_error::FlowyResult; use flowy_error::FlowyResult;
use flowy_http_model::util::md5; use flowy_http_model::util::md5;
use flowy_http_model::{document::DocumentIdPB, revision::Revision, ws_data::ServerRevisionWSData}; use flowy_http_model::{document::DocumentIdPB, revision::Revision, ws_data::ServerRevisionWSData};
use flowy_revision::{ use flowy_revision::{
RevisionCloudService, RevisionManager, RevisionPersistence, RevisionPersistenceConfiguration, RevisionWebSocket, PhantomSnapshotPersistence, RevisionCloudService, RevisionManager, RevisionPersistence,
SQLiteRevisionSnapshotPersistence, RevisionPersistenceConfiguration, RevisionWebSocket,
}; };
use flowy_sync::client_document::initial_delta_document_content; use flowy_sync::client_document::initial_delta_document_content;
use lib_infra::async_trait::async_trait;
use lib_infra::future::FutureResult; use lib_infra::future::FutureResult;
use lib_infra::ref_map::{RefCountHashMap, RefCountValue}; use lib_infra::ref_map::{RefCountHashMap, RefCountValue};
use lib_ws::WSConnectState; use lib_ws::WSConnectState;
@ -32,9 +32,10 @@ pub trait DocumentDatabase: Send + Sync {
fn db_pool(&self) -> Result<Arc<ConnectionPool>, FlowyError>; fn db_pool(&self) -> Result<Arc<ConnectionPool>, FlowyError>;
} }
#[async_trait]
pub trait DocumentEditor: Send + Sync { pub trait DocumentEditor: Send + Sync {
/// Called when the document get closed /// Called when the document get closed
fn close(&self); async fn close(&self);
/// Exports the document content. The content is encoded in the corresponding /// Exports the document content. The content is encoded in the corresponding
/// editor data format. /// editor data format.
@ -129,7 +130,7 @@ impl DocumentManager {
pub async fn close_document_editor<T: AsRef<str>>(&self, editor_id: T) -> Result<(), FlowyError> { pub async fn close_document_editor<T: AsRef<str>>(&self, editor_id: T) -> Result<(), FlowyError> {
let editor_id = editor_id.as_ref(); let editor_id = editor_id.as_ref();
tracing::Span::current().record("editor_id", &editor_id); tracing::Span::current().record("editor_id", &editor_id);
self.editor_map.write().await.remove(editor_id); self.editor_map.write().await.remove(editor_id).await;
Ok(()) Ok(())
} }
@ -254,18 +255,15 @@ impl DocumentManager {
pool: Arc<ConnectionPool>, pool: Arc<ConnectionPool>,
) -> Result<RevisionManager<Arc<ConnectionPool>>, FlowyError> { ) -> Result<RevisionManager<Arc<ConnectionPool>>, FlowyError> {
let user_id = self.user.user_id()?; let user_id = self.user.user_id()?;
let disk_cache = SQLiteDocumentRevisionPersistence::new(&user_id, pool.clone()); let disk_cache = SQLiteDocumentRevisionPersistence::new(&user_id, pool);
let configuration = RevisionPersistenceConfiguration::new(100, true); let configuration = RevisionPersistenceConfiguration::new(100, true);
let rev_persistence = RevisionPersistence::new(&user_id, doc_id, disk_cache, configuration); let rev_persistence = RevisionPersistence::new(&user_id, doc_id, disk_cache, configuration);
// let history_persistence = SQLiteRevisionHistoryPersistence::new(doc_id, pool.clone());
let snapshot_persistence = SQLiteRevisionSnapshotPersistence::new(doc_id, pool);
Ok(RevisionManager::new( Ok(RevisionManager::new(
&user_id, &user_id,
doc_id, doc_id,
rev_persistence, rev_persistence,
DocumentRevisionCompress(), DocumentRevisionMergeable(),
// history_persistence, PhantomSnapshotPersistence(),
snapshot_persistence,
)) ))
} }
@ -275,18 +273,15 @@ impl DocumentManager {
pool: Arc<ConnectionPool>, pool: Arc<ConnectionPool>,
) -> Result<RevisionManager<Arc<ConnectionPool>>, FlowyError> { ) -> Result<RevisionManager<Arc<ConnectionPool>>, FlowyError> {
let user_id = self.user.user_id()?; let user_id = self.user.user_id()?;
let disk_cache = SQLiteDeltaDocumentRevisionPersistence::new(&user_id, pool.clone()); let disk_cache = SQLiteDeltaDocumentRevisionPersistence::new(&user_id, pool);
let configuration = RevisionPersistenceConfiguration::new(100, true); let configuration = RevisionPersistenceConfiguration::new(100, true);
let rev_persistence = RevisionPersistence::new(&user_id, doc_id, disk_cache, configuration); let rev_persistence = RevisionPersistence::new(&user_id, doc_id, disk_cache, configuration);
// let history_persistence = SQLiteRevisionHistoryPersistence::new(doc_id, pool.clone());
let snapshot_persistence = SQLiteRevisionSnapshotPersistence::new(doc_id, pool);
Ok(RevisionManager::new( Ok(RevisionManager::new(
&user_id, &user_id,
doc_id, doc_id,
rev_persistence, rev_persistence,
DeltaDocumentRevisionCompress(), DeltaDocumentRevisionMergeable(),
// history_persistence, PhantomSnapshotPersistence(),
snapshot_persistence,
)) ))
} }
} }
@ -320,9 +315,10 @@ impl RevisionCloudService for DocumentRevisionCloudService {
#[derive(Clone)] #[derive(Clone)]
struct RefCountDocumentHandler(Arc<dyn DocumentEditor>); struct RefCountDocumentHandler(Arc<dyn DocumentEditor>);
#[async_trait]
impl RefCountValue for RefCountDocumentHandler { impl RefCountValue for RefCountDocumentHandler {
fn did_remove(&self) { async fn did_remove(&self) {
self.0.close(); self.0.close().await;
} }
} }

View File

@ -13,6 +13,7 @@ use flowy_revision::{
RevisionWebSocket, RevisionWebSocket,
}; };
use flowy_sync::{errors::CollaborateResult, util::make_operations_from_revisions}; use flowy_sync::{errors::CollaborateResult, util::make_operations_from_revisions};
use lib_infra::async_trait::async_trait;
use lib_infra::future::FutureResult; use lib_infra::future::FutureResult;
use lib_ot::core::{AttributeEntry, AttributeHashMap}; use lib_ot::core::{AttributeEntry, AttributeHashMap};
use lib_ot::{ use lib_ot::{
@ -145,8 +146,9 @@ impl DeltaDocumentEditor {
} }
} }
#[async_trait]
impl DocumentEditor for Arc<DeltaDocumentEditor> { impl DocumentEditor for Arc<DeltaDocumentEditor> {
fn close(&self) { async fn close(&self) {
#[cfg(feature = "sync")] #[cfg(feature = "sync")]
self.ws_manager.stop(); self.ws_manager.stop();
} }
@ -267,8 +269,8 @@ impl RevisionObjectSerializer for DeltaDocumentRevisionSerde {
} }
} }
pub(crate) struct DeltaDocumentRevisionCompress(); pub(crate) struct DeltaDocumentRevisionMergeable();
impl RevisionMergeable for DeltaDocumentRevisionCompress { impl RevisionMergeable for DeltaDocumentRevisionMergeable {
fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> { fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> {
DeltaDocumentRevisionSerde::combine_revisions(revisions) DeltaDocumentRevisionSerde::combine_revisions(revisions)
} }

View File

@ -3,7 +3,7 @@ use crate::DocumentUser;
use async_stream::stream; use async_stream::stream;
use flowy_database::ConnectionPool; use flowy_database::ConnectionPool;
use flowy_error::FlowyError; use flowy_error::FlowyError;
use flowy_http_model::revision::{RevId, Revision}; use flowy_http_model::revision::RevId;
use flowy_revision::{RevisionMD5, RevisionManager, TransformOperations}; use flowy_revision::{RevisionMD5, RevisionManager, TransformOperations};
use flowy_sync::{ use flowy_sync::{
client_document::{history::UndoResult, ClientDocument}, client_document::{history::UndoResult, ClientDocument},
@ -178,9 +178,7 @@ impl EditDocumentQueue {
async fn save_local_operations(&self, operations: DeltaTextOperations, md5: String) -> Result<RevId, FlowyError> { async fn save_local_operations(&self, operations: DeltaTextOperations, md5: String) -> Result<RevId, FlowyError> {
let bytes = operations.json_bytes(); let bytes = operations.json_bytes();
let (base_rev_id, rev_id) = self.rev_manager.next_rev_id_pair(); let rev_id = self.rev_manager.add_local_revision(bytes, md5).await?;
let revision = Revision::new(&self.rev_manager.object_id, base_rev_id, rev_id, bytes, md5);
let _ = self.rev_manager.add_local_revision(&revision).await?;
Ok(rev_id.into()) Ok(rev_id.into())
} }
} }

View File

@ -99,7 +99,6 @@ pub struct DeltaRevisionSql {}
impl DeltaRevisionSql { impl DeltaRevisionSql {
fn create(revision_records: Vec<SyncRecord>, conn: &SqliteConnection) -> Result<(), FlowyError> { fn create(revision_records: Vec<SyncRecord>, conn: &SqliteConnection) -> Result<(), FlowyError> {
// Batch insert: https://diesel.rs/guides/all-about-inserts.html // Batch insert: https://diesel.rs/guides/all-about-inserts.html
let records = revision_records let records = revision_records
.into_iter() .into_iter()
.map(|record| { .map(|record| {

View File

@ -1,6 +1,6 @@
use crate::entities::view::ViewDataFormatPB; use crate::entities::view::ViewDataFormatPB;
use crate::entities::{ViewLayoutTypePB, ViewPB}; use crate::entities::{ViewLayoutTypePB, ViewPB};
use crate::services::folder_editor::FolderRevisionCompress; use crate::services::folder_editor::FolderRevisionMergeable;
use crate::{ use crate::{
dart_notification::{send_dart_notification, FolderNotification}, dart_notification::{send_dart_notification, FolderNotification},
entities::workspace::RepeatedWorkspacePB, entities::workspace::RepeatedWorkspacePB,
@ -15,8 +15,8 @@ use bytes::Bytes;
use flowy_document::editor::initial_read_me; use flowy_document::editor::initial_read_me;
use flowy_error::FlowyError; use flowy_error::FlowyError;
use flowy_revision::{ use flowy_revision::{
RevisionManager, RevisionPersistence, RevisionPersistenceConfiguration, RevisionWebSocket, PhantomSnapshotPersistence, RevisionManager, RevisionPersistence, RevisionPersistenceConfiguration,
SQLiteRevisionSnapshotPersistence, RevisionWebSocket,
}; };
use folder_rev_model::user_default; use folder_rev_model::user_default;
use lazy_static::lazy_static; use lazy_static::lazy_static;
@ -171,16 +171,13 @@ impl FolderManager {
let disk_cache = SQLiteFolderRevisionPersistence::new(user_id, pool.clone()); let disk_cache = SQLiteFolderRevisionPersistence::new(user_id, pool.clone());
let configuration = RevisionPersistenceConfiguration::new(100, false); let configuration = RevisionPersistenceConfiguration::new(100, false);
let rev_persistence = RevisionPersistence::new(user_id, object_id, disk_cache, configuration); let rev_persistence = RevisionPersistence::new(user_id, object_id, disk_cache, configuration);
let rev_compactor = FolderRevisionCompress(); let rev_compactor = FolderRevisionMergeable();
// let history_persistence = SQLiteRevisionHistoryPersistence::new(object_id, pool.clone());
let snapshot_persistence = SQLiteRevisionSnapshotPersistence::new(object_id, pool);
let rev_manager = RevisionManager::new( let rev_manager = RevisionManager::new(
user_id, user_id,
folder_id.as_ref(), folder_id.as_ref(),
rev_persistence, rev_persistence,
rev_compactor, rev_compactor,
// history_persistence, PhantomSnapshotPersistence(),
snapshot_persistence,
); );
let folder_editor = FolderEditor::new(user_id, &folder_id, token, rev_manager, self.web_socket.clone()).await?; let folder_editor = FolderEditor::new(user_id, &folder_id, token, rev_manager, self.web_socket.clone()).await?;

View File

@ -82,10 +82,11 @@ impl FolderEditor {
pub(crate) fn apply_change(&self, change: FolderChangeset) -> FlowyResult<()> { pub(crate) fn apply_change(&self, change: FolderChangeset) -> FlowyResult<()> {
let FolderChangeset { operations: delta, md5 } = change; let FolderChangeset { operations: delta, md5 } = change;
let (base_rev_id, rev_id) = self.rev_manager.next_rev_id_pair();
let delta_data = delta.json_bytes(); let delta_data = delta.json_bytes();
let revision = Revision::new(&self.rev_manager.object_id, base_rev_id, rev_id, delta_data, md5); let rev_manager = self.rev_manager.clone();
let _ = futures::executor::block_on(async { self.rev_manager.add_local_revision(&revision).await })?; tokio::spawn(async move {
let _ = rev_manager.add_local_revision(delta_data, md5).await;
});
Ok(()) Ok(())
} }
@ -113,8 +114,8 @@ impl RevisionObjectSerializer for FolderRevisionSerde {
} }
} }
pub struct FolderRevisionCompress(); pub struct FolderRevisionMergeable();
impl RevisionMergeable for FolderRevisionCompress { impl RevisionMergeable for FolderRevisionMergeable {
fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> { fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> {
FolderRevisionSerde::combine_revisions(revisions) FolderRevisionSerde::combine_revisions(revisions)
} }

View File

@ -15,7 +15,7 @@ impl AppTableSql {
pub(crate) fn create_app(app_rev: AppRevision, conn: &SqliteConnection) -> Result<(), FlowyError> { pub(crate) fn create_app(app_rev: AppRevision, conn: &SqliteConnection) -> Result<(), FlowyError> {
let app_table = AppTable::new(app_rev); let app_table = AppTable::new(app_rev);
match diesel_record_count!(app_table, &app_table.id, conn) { match diesel_record_count!(app_table, &app_table.id, conn) {
0 => diesel_insert_table!(app_table, &app_table, conn), 0 => diesel_insert_table!(app_table, app_table.clone(), conn),
_ => { _ => {
let changeset = AppChangeset::from_table(app_table); let changeset = AppChangeset::from_table(app_table);
diesel_update_table!(app_table, changeset, conn) diesel_update_table!(app_table, changeset, conn)

View File

@ -13,7 +13,7 @@ impl TrashTableSql {
for trash_rev in trashes { for trash_rev in trashes {
let trash_table: TrashTable = trash_rev.into(); let trash_table: TrashTable = trash_rev.into();
match diesel_record_count!(trash_table, &trash_table.id, conn) { match diesel_record_count!(trash_table, &trash_table.id, conn) {
0 => diesel_insert_table!(trash_table, &trash_table, conn), 0 => diesel_insert_table!(trash_table, trash_table.clone(), conn),
_ => { _ => {
let changeset = TrashChangeset::from(trash_table); let changeset = TrashChangeset::from(trash_table);
diesel_update_table!(trash_table, changeset, conn) diesel_update_table!(trash_table, changeset, conn)

View File

@ -21,7 +21,7 @@ impl ViewTableSql {
pub(crate) fn create_view(view_rev: ViewRevision, conn: &SqliteConnection) -> Result<(), FlowyError> { pub(crate) fn create_view(view_rev: ViewRevision, conn: &SqliteConnection) -> Result<(), FlowyError> {
let view_table = ViewTable::new(view_rev); let view_table = ViewTable::new(view_rev);
match diesel_record_count!(view_table, &view_table.id, conn) { match diesel_record_count!(view_table, &view_table.id, conn) {
0 => diesel_insert_table!(view_table, &view_table, conn), 0 => diesel_insert_table!(view_table, view_table.clone(), conn),
_ => { _ => {
let changeset = ViewChangeset::from_table(view_table); let changeset = ViewChangeset::from_table(view_table);
diesel_update_table!(view_table, changeset, conn) diesel_update_table!(view_table, changeset, conn)

View File

@ -15,7 +15,7 @@ impl WorkspaceTableSql {
) -> Result<(), FlowyError> { ) -> Result<(), FlowyError> {
let table = WorkspaceTable::new(workspace_rev, user_id); let table = WorkspaceTable::new(workspace_rev, user_id);
match diesel_record_count!(workspace_table, &table.id, conn) { match diesel_record_count!(workspace_table, &table.id, conn) {
0 => diesel_insert_table!(workspace_table, &table, conn), 0 => diesel_insert_table!(workspace_table, table.clone(), conn),
_ => { _ => {
let changeset = WorkspaceChangeset::from_table(table); let changeset = WorkspaceChangeset::from_table(table);
diesel_update_table!(workspace_table, changeset, conn); diesel_update_table!(workspace_table, changeset, conn);

View File

@ -208,7 +208,7 @@ pub(crate) async fn create_field_type_option_data_handler(
let params: CreateFieldParams = data.into_inner().try_into()?; let params: CreateFieldParams = data.into_inner().try_into()?;
let editor = manager.get_grid_editor(&params.grid_id).await?; let editor = manager.get_grid_editor(&params.grid_id).await?;
let field_rev = editor let field_rev = editor
.create_new_field_rev(&params.field_type, params.type_option_data) .create_new_field_rev_with_type_option(&params.field_type, params.type_option_data)
.await?; .await?;
let field_type: FieldType = field_rev.ty.into(); let field_type: FieldType = field_rev.ty.into();
let type_option_data = get_type_option_data(&field_rev, &field_type).await?; let type_option_data = get_type_option_data(&field_rev, &field_type).await?;

View File

@ -1,23 +1,19 @@
use crate::entities::GridLayout; use crate::entities::GridLayout;
use crate::services::grid_editor::{GridRevisionEditor, GridRevisionMergeable};
use crate::services::grid_editor::{GridRevisionCompress, GridRevisionEditor};
use crate::services::persistence::block_index::BlockIndexCache; use crate::services::persistence::block_index::BlockIndexCache;
use crate::services::persistence::kv::GridKVPersistence; use crate::services::persistence::kv::GridKVPersistence;
use crate::services::persistence::migration::GridMigration; use crate::services::persistence::migration::GridMigration;
use crate::services::persistence::rev_sqlite::SQLiteGridRevisionPersistence; use crate::services::persistence::rev_sqlite::{SQLiteGridRevisionPersistence, SQLiteGridRevisionSnapshotPersistence};
use crate::services::persistence::GridDatabase; use crate::services::persistence::GridDatabase;
use crate::services::view_editor::make_grid_view_rev_manager; use crate::services::view_editor::make_grid_view_rev_manager;
use bytes::Bytes; use bytes::Bytes;
use flowy_database::ConnectionPool; use flowy_database::ConnectionPool;
use flowy_error::{FlowyError, FlowyResult}; use flowy_error::{FlowyError, FlowyResult};
use flowy_http_model::revision::Revision; use flowy_http_model::revision::Revision;
use flowy_revision::{ use flowy_revision::{RevisionManager, RevisionPersistence, RevisionPersistenceConfiguration, RevisionWebSocket};
RevisionManager, RevisionPersistence, RevisionPersistenceConfiguration, RevisionWebSocket,
SQLiteRevisionSnapshotPersistence,
};
use flowy_sync::client_grid::{make_grid_block_operations, make_grid_operations, make_grid_view_operations}; use flowy_sync::client_grid::{make_grid_block_operations, make_grid_operations, make_grid_view_operations};
use grid_rev_model::{BuildGridContext, GridRevision, GridViewRevision}; use grid_rev_model::{BuildGridContext, GridRevision, GridViewRevision};
use lib_infra::async_trait::async_trait;
use lib_infra::ref_map::{RefCountHashMap, RefCountValue}; use lib_infra::ref_map::{RefCountHashMap, RefCountValue};
use crate::services::block_manager::make_grid_block_rev_manager; use crate::services::block_manager::make_grid_block_rev_manager;
@ -108,7 +104,7 @@ impl GridManager {
let grid_id = grid_id.as_ref(); let grid_id = grid_id.as_ref();
tracing::Span::current().record("grid_id", &grid_id); tracing::Span::current().record("grid_id", &grid_id);
self.grid_editors.write().await.remove(grid_id); self.grid_editors.write().await.remove(grid_id).await;
// self.task_scheduler.write().await.unregister_handler(grid_id); // self.task_scheduler.write().await.unregister_handler(grid_id);
Ok(()) Ok(())
} }
@ -152,18 +148,25 @@ impl GridManager {
Ok(grid_editor) Ok(grid_editor)
} }
#[tracing::instrument(level = "trace", skip(self, pool), err)]
pub fn make_grid_rev_manager( pub fn make_grid_rev_manager(
&self, &self,
grid_id: &str, grid_id: &str,
pool: Arc<ConnectionPool>, pool: Arc<ConnectionPool>,
) -> FlowyResult<RevisionManager<Arc<ConnectionPool>>> { ) -> FlowyResult<RevisionManager<Arc<ConnectionPool>>> {
let user_id = self.grid_user.user_id()?; let user_id = self.grid_user.user_id()?;
// Create revision persistence
let disk_cache = SQLiteGridRevisionPersistence::new(&user_id, pool.clone()); let disk_cache = SQLiteGridRevisionPersistence::new(&user_id, pool.clone());
let configuration = RevisionPersistenceConfiguration::new(2, false); let configuration = RevisionPersistenceConfiguration::new(4, false);
let rev_persistence = RevisionPersistence::new(&user_id, grid_id, disk_cache, configuration); let rev_persistence = RevisionPersistence::new(&user_id, grid_id, disk_cache, configuration);
let snapshot_persistence = SQLiteRevisionSnapshotPersistence::new(grid_id, pool);
let rev_compactor = GridRevisionCompress(); // Create snapshot persistence
let rev_manager = RevisionManager::new(&user_id, grid_id, rev_persistence, rev_compactor, snapshot_persistence); let snapshot_object_id = format!("grid:{}", grid_id);
let snapshot_persistence = SQLiteGridRevisionSnapshotPersistence::new(&snapshot_object_id, pool);
let rev_compress = GridRevisionMergeable();
let rev_manager = RevisionManager::new(&user_id, grid_id, rev_persistence, rev_compress, snapshot_persistence);
Ok(rev_manager) Ok(rev_manager)
} }
} }
@ -220,8 +223,9 @@ pub async fn make_grid_view_data(
Ok(grid_rev_delta_bytes) Ok(grid_rev_delta_bytes)
} }
#[async_trait]
impl RefCountValue for GridRevisionEditor { impl RefCountValue for GridRevisionEditor {
fn did_remove(&self) { async fn did_remove(&self) {
self.close(); self.close().await;
} }
} }

View File

@ -47,6 +47,11 @@ impl GridBlockRevisionEditor {
}) })
} }
pub async fn close(&self) {
self.rev_manager.generate_snapshot().await;
self.rev_manager.close().await;
}
pub async fn duplicate_block(&self, duplicated_block_id: &str) -> GridBlockRevision { pub async fn duplicate_block(&self, duplicated_block_id: &str) -> GridBlockRevision {
self.pad.read().await.duplicate_data(duplicated_block_id).await self.pad.read().await.duplicate_data(duplicated_block_id).await
} }
@ -167,10 +172,8 @@ impl GridBlockRevisionEditor {
async fn apply_change(&self, change: GridBlockRevisionChangeset) -> FlowyResult<()> { async fn apply_change(&self, change: GridBlockRevisionChangeset) -> FlowyResult<()> {
let GridBlockRevisionChangeset { operations: delta, md5 } = change; let GridBlockRevisionChangeset { operations: delta, md5 } = change;
let (base_rev_id, rev_id) = self.rev_manager.next_rev_id_pair(); let data = delta.json_bytes();
let delta_data = delta.json_bytes(); let _ = self.rev_manager.add_local_revision(data, md5).await?;
let revision = Revision::new(&self.rev_manager.object_id, base_rev_id, rev_id, delta_data, md5);
let _ = self.rev_manager.add_local_revision(&revision).await?;
Ok(()) Ok(())
} }
} }
@ -203,8 +206,8 @@ impl RevisionObjectSerializer for GridBlockRevisionSerde {
} }
} }
pub struct GridBlockRevisionCompress(); pub struct GridBlockRevisionMergeable();
impl RevisionMergeable for GridBlockRevisionCompress { impl RevisionMergeable for GridBlockRevisionMergeable {
fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> { fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> {
GridBlockRevisionSerde::combine_revisions(revisions) GridBlockRevisionSerde::combine_revisions(revisions)
} }

View File

@ -1,16 +1,16 @@
use crate::dart_notification::{send_dart_notification, GridDartNotification}; use crate::dart_notification::{send_dart_notification, GridDartNotification};
use crate::entities::{CellChangesetPB, GridBlockChangesetPB, InsertedRowPB, RowPB, UpdatedRowPB}; use crate::entities::{CellChangesetPB, GridBlockChangesetPB, InsertedRowPB, RowPB, UpdatedRowPB};
use crate::manager::GridUser; use crate::manager::GridUser;
use crate::services::block_editor::{GridBlockRevisionCompress, GridBlockRevisionEditor}; use crate::services::block_editor::{GridBlockRevisionEditor, GridBlockRevisionMergeable};
use crate::services::persistence::block_index::BlockIndexCache; use crate::services::persistence::block_index::BlockIndexCache;
use crate::services::persistence::rev_sqlite::SQLiteGridBlockRevisionPersistence; use crate::services::persistence::rev_sqlite::{
SQLiteGridBlockRevisionPersistence, SQLiteGridRevisionSnapshotPersistence,
};
use crate::services::row::{block_from_row_orders, make_row_from_row_rev, GridBlock}; use crate::services::row::{block_from_row_orders, make_row_from_row_rev, GridBlock};
use dashmap::DashMap; use dashmap::DashMap;
use flowy_database::ConnectionPool; use flowy_database::ConnectionPool;
use flowy_error::FlowyResult; use flowy_error::FlowyResult;
use flowy_revision::{ use flowy_revision::{RevisionManager, RevisionPersistence, RevisionPersistenceConfiguration};
RevisionManager, RevisionPersistence, RevisionPersistenceConfiguration, SQLiteRevisionSnapshotPersistence,
};
use grid_rev_model::{GridBlockMetaRevision, GridBlockMetaRevisionChangeset, RowChangeset, RowRevision}; use grid_rev_model::{GridBlockMetaRevision, GridBlockMetaRevisionChangeset, RowChangeset, RowRevision};
use std::borrow::Cow; use std::borrow::Cow;
use std::collections::HashMap; use std::collections::HashMap;
@ -39,6 +39,12 @@ impl GridBlockManager {
Ok(manager) Ok(manager)
} }
pub async fn close(&self) {
for block_editor in self.block_editors.iter() {
block_editor.close().await;
}
}
// #[tracing::instrument(level = "trace", skip(self))] // #[tracing::instrument(level = "trace", skip(self))]
pub(crate) async fn get_block_editor(&self, block_id: &str) -> FlowyResult<Arc<GridBlockRevisionEditor>> { pub(crate) async fn get_block_editor(&self, block_id: &str) -> FlowyResult<Arc<GridBlockRevisionEditor>> {
debug_assert!(!block_id.is_empty()); debug_assert!(!block_id.is_empty());
@ -275,12 +281,18 @@ pub fn make_grid_block_rev_manager(
block_id: &str, block_id: &str,
) -> FlowyResult<RevisionManager<Arc<ConnectionPool>>> { ) -> FlowyResult<RevisionManager<Arc<ConnectionPool>>> {
let user_id = user.user_id()?; let user_id = user.user_id()?;
// Create revision persistence
let pool = user.db_pool()?; let pool = user.db_pool()?;
let disk_cache = SQLiteGridBlockRevisionPersistence::new(&user_id, pool.clone()); let disk_cache = SQLiteGridBlockRevisionPersistence::new(&user_id, pool.clone());
let configuration = RevisionPersistenceConfiguration::new(4, false); let configuration = RevisionPersistenceConfiguration::new(4, false);
let rev_persistence = RevisionPersistence::new(&user_id, block_id, disk_cache, configuration); let rev_persistence = RevisionPersistence::new(&user_id, block_id, disk_cache, configuration);
let rev_compactor = GridBlockRevisionCompress();
let snapshot_persistence = SQLiteRevisionSnapshotPersistence::new(block_id, pool); // Create snapshot persistence
let rev_manager = RevisionManager::new(&user_id, block_id, rev_persistence, rev_compactor, snapshot_persistence); let snapshot_object_id = format!("grid_block:{}", block_id);
let snapshot_persistence = SQLiteGridRevisionSnapshotPersistence::new(&snapshot_object_id, pool);
let rev_compress = GridBlockRevisionMergeable();
let rev_manager = RevisionManager::new(&user_id, block_id, rev_persistence, rev_compress, snapshot_persistence);
Ok(rev_manager) Ok(rev_manager)
} }

View File

@ -61,11 +61,8 @@ impl CellDataOperation<SelectOptionIds, SelectOptionCellChangeset> for Checklist
.filter(|insert_option_id| self.options.iter().any(|option| &option.id == insert_option_id)) .filter(|insert_option_id| self.options.iter().any(|option| &option.id == insert_option_id))
.collect::<Vec<String>>(); .collect::<Vec<String>>();
let new_cell_data: String;
match cell_rev { match cell_rev {
None => { None => Ok(SelectOptionIds::from(insert_option_ids).to_string()),
new_cell_data = SelectOptionIds::from(insert_option_ids).to_string();
}
Some(cell_rev) => { Some(cell_rev) => {
let cell_data = get_cell_data(&cell_rev); let cell_data = get_cell_data(&cell_rev);
let mut select_ids: SelectOptionIds = cell_data.into(); let mut select_ids: SelectOptionIds = cell_data.into();
@ -79,12 +76,9 @@ impl CellDataOperation<SelectOptionIds, SelectOptionCellChangeset> for Checklist
select_ids.retain(|id| id != &delete_option_id); select_ids.retain(|id| id != &delete_option_id);
} }
new_cell_data = select_ids.to_string(); Ok(select_ids.to_string())
} }
} }
tracing::trace!("checklist's cell data: {}", &new_cell_data);
Ok(new_cell_data)
} }
} }

View File

@ -60,7 +60,11 @@ impl FilterController {
} }
pub async fn close(&self) { pub async fn close(&self) {
self.task_scheduler.write().await.unregister_handler(&self.handler_id); self.task_scheduler
.write()
.await
.unregister_handler(&self.handler_id)
.await;
} }
#[tracing::instrument(name = "schedule_filter_task", level = "trace", skip(self))] #[tracing::instrument(name = "schedule_filter_task", level = "trace", skip(self))]

View File

@ -26,7 +26,7 @@ use flowy_sync::errors::{CollaborateError, CollaborateResult};
use flowy_sync::util::make_operations_from_revisions; use flowy_sync::util::make_operations_from_revisions;
use flowy_task::TaskDispatcher; use flowy_task::TaskDispatcher;
use grid_rev_model::*; use grid_rev_model::*;
use lib_infra::future::{to_future, FutureResult}; use lib_infra::future::{to_fut, FutureResult};
use lib_ot::core::EmptyAttributes; use lib_ot::core::EmptyAttributes;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
@ -87,14 +87,11 @@ impl GridRevisionEditor {
} }
#[tracing::instrument(name = "close grid editor", level = "trace", skip_all)] #[tracing::instrument(name = "close grid editor", level = "trace", skip_all)]
pub fn close(&self) { pub async fn close(&self) {
let rev_manager = self.rev_manager.clone(); self.block_manager.close().await;
let view_manager = self.view_manager.clone(); self.rev_manager.generate_snapshot().await;
let view_id = self.grid_id.clone(); self.rev_manager.close().await;
tokio::spawn(async move { self.view_manager.close(&self.grid_id).await;
rev_manager.close().await;
view_manager.close(&view_id).await;
});
} }
/// Save the type-option data to disk and send a `GridDartNotification::DidUpdateField` notification /// Save the type-option data to disk and send a `GridDartNotification::DidUpdateField` notification
@ -154,7 +151,15 @@ impl GridRevisionEditor {
Ok(field_rev) Ok(field_rev)
} }
pub async fn create_new_field_rev( pub async fn create_new_field_rev(&self, field_rev: FieldRevision) -> FlowyResult<()> {
let field_id = field_rev.id.clone();
let _ = self.modify(|grid| Ok(grid.create_field_rev(field_rev, None)?)).await?;
let _ = self.notify_did_insert_grid_field(&field_id).await?;
Ok(())
}
pub async fn create_new_field_rev_with_type_option(
&self, &self,
field_type: &FieldType, field_type: &FieldType,
type_option_data: Option<Vec<u8>>, type_option_data: Option<Vec<u8>>,
@ -627,7 +632,7 @@ impl GridRevisionEditor {
let block_manager = self.block_manager.clone(); let block_manager = self.block_manager.clone();
self.view_manager self.view_manager
.move_group_row(row_rev, to_group_id, to_row_id.clone(), |row_changeset| { .move_group_row(row_rev, to_group_id, to_row_id.clone(), |row_changeset| {
to_future(async move { to_fut(async move {
tracing::trace!("Row data changed: {:?}", row_changeset); tracing::trace!("Row data changed: {:?}", row_changeset);
let cell_changesets = row_changeset let cell_changesets = row_changeset
.cell_by_field_id .cell_by_field_id
@ -752,10 +757,8 @@ impl GridRevisionEditor {
async fn apply_change(&self, change: GridRevisionChangeset) -> FlowyResult<()> { async fn apply_change(&self, change: GridRevisionChangeset) -> FlowyResult<()> {
let GridRevisionChangeset { operations: delta, md5 } = change; let GridRevisionChangeset { operations: delta, md5 } = change;
let (base_rev_id, rev_id) = self.rev_manager.next_rev_id_pair(); let data = delta.json_bytes();
let delta_data = delta.json_bytes(); let _ = self.rev_manager.add_local_revision(data, md5).await?;
let revision = Revision::new(&self.rev_manager.object_id, base_rev_id, rev_id, delta_data, md5);
let _ = self.rev_manager.add_local_revision(&revision).await?;
Ok(()) Ok(())
} }
@ -810,6 +813,10 @@ impl GridRevisionEditor {
pub fn rev_manager(&self) -> Arc<RevisionManager<Arc<ConnectionPool>>> { pub fn rev_manager(&self) -> Arc<RevisionManager<Arc<ConnectionPool>>> {
self.rev_manager.clone() self.rev_manager.clone()
} }
pub fn grid_pad(&self) -> Arc<RwLock<GridRevisionPad>> {
self.grid_pad.clone()
}
} }
pub struct GridRevisionSerde(); pub struct GridRevisionSerde();
@ -839,9 +846,9 @@ impl RevisionCloudService for GridRevisionCloudService {
} }
} }
pub struct GridRevisionCompress(); pub struct GridRevisionMergeable();
impl RevisionMergeable for GridRevisionCompress { impl RevisionMergeable for GridRevisionMergeable {
fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> { fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> {
GridRevisionSerde::combine_revisions(revisions) GridRevisionSerde::combine_revisions(revisions)
} }

View File

@ -4,7 +4,7 @@ use crate::services::view_editor::GridViewEditorDelegate;
use flowy_sync::client_grid::GridRevisionPad; use flowy_sync::client_grid::GridRevisionPad;
use flowy_task::TaskDispatcher; use flowy_task::TaskDispatcher;
use grid_rev_model::{FieldRevision, RowRevision}; use grid_rev_model::{FieldRevision, RowRevision};
use lib_infra::future::{to_future, Fut}; use lib_infra::future::{to_fut, Fut};
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::RwLock; use tokio::sync::RwLock;
@ -17,7 +17,7 @@ pub(crate) struct GridViewEditorDelegateImpl {
impl GridViewEditorDelegate for GridViewEditorDelegateImpl { impl GridViewEditorDelegate for GridViewEditorDelegateImpl {
fn get_field_revs(&self, field_ids: Option<Vec<String>>) -> Fut<Vec<Arc<FieldRevision>>> { fn get_field_revs(&self, field_ids: Option<Vec<String>>) -> Fut<Vec<Arc<FieldRevision>>> {
let pad = self.pad.clone(); let pad = self.pad.clone();
to_future(async move { to_fut(async move {
match pad.read().await.get_field_revs(field_ids) { match pad.read().await.get_field_revs(field_ids) {
Ok(field_revs) => field_revs, Ok(field_revs) => field_revs,
Err(e) => { Err(e) => {
@ -31,19 +31,19 @@ impl GridViewEditorDelegate for GridViewEditorDelegateImpl {
fn get_field_rev(&self, field_id: &str) -> Fut<Option<Arc<FieldRevision>>> { fn get_field_rev(&self, field_id: &str) -> Fut<Option<Arc<FieldRevision>>> {
let pad = self.pad.clone(); let pad = self.pad.clone();
let field_id = field_id.to_owned(); let field_id = field_id.to_owned();
to_future(async move { Some(pad.read().await.get_field_rev(&field_id)?.1.clone()) }) to_fut(async move { Some(pad.read().await.get_field_rev(&field_id)?.1.clone()) })
} }
fn index_of_row(&self, row_id: &str) -> Fut<Option<usize>> { fn index_of_row(&self, row_id: &str) -> Fut<Option<usize>> {
let block_manager = self.block_manager.clone(); let block_manager = self.block_manager.clone();
let row_id = row_id.to_owned(); let row_id = row_id.to_owned();
to_future(async move { block_manager.index_of_row(&row_id).await }) to_fut(async move { block_manager.index_of_row(&row_id).await })
} }
fn get_row_rev(&self, row_id: &str) -> Fut<Option<(usize, Arc<RowRevision>)>> { fn get_row_rev(&self, row_id: &str) -> Fut<Option<(usize, Arc<RowRevision>)>> {
let block_manager = self.block_manager.clone(); let block_manager = self.block_manager.clone();
let row_id = row_id.to_owned(); let row_id = row_id.to_owned();
to_future(async move { to_fut(async move {
match block_manager.get_row_rev(&row_id).await { match block_manager.get_row_rev(&row_id).await {
Ok(indexed_row) => indexed_row, Ok(indexed_row) => indexed_row,
Err(_) => None, Err(_) => None,
@ -54,7 +54,7 @@ impl GridViewEditorDelegate for GridViewEditorDelegateImpl {
fn get_row_revs(&self) -> Fut<Vec<Arc<RowRevision>>> { fn get_row_revs(&self) -> Fut<Vec<Arc<RowRevision>>> {
let block_manager = self.block_manager.clone(); let block_manager = self.block_manager.clone();
to_future(async move { to_fut(async move {
let blocks = block_manager.get_blocks(None).await.unwrap(); let blocks = block_manager.get_blocks(None).await.unwrap();
blocks blocks
.into_iter() .into_iter()
@ -65,7 +65,7 @@ impl GridViewEditorDelegate for GridViewEditorDelegateImpl {
fn get_blocks(&self) -> Fut<Vec<GridBlock>> { fn get_blocks(&self) -> Fut<Vec<GridBlock>> {
let block_manager = self.block_manager.clone(); let block_manager = self.block_manager.clone();
to_future(async move { block_manager.get_blocks(None).await.unwrap_or_default() }) to_fut(async move { block_manager.get_blocks(None).await.unwrap_or_default() })
} }
fn get_task_scheduler(&self) -> Arc<RwLock<TaskDispatcher>> { fn get_task_scheduler(&self) -> Arc<RwLock<TaskDispatcher>> {

View File

@ -0,0 +1,148 @@
use bytes::Bytes;
use flowy_database::{
prelude::*,
schema::{grid_rev_snapshot, grid_rev_snapshot::dsl},
ConnectionPool,
};
use flowy_error::{internal_error, FlowyResult};
use flowy_revision::{RevisionSnapshot, RevisionSnapshotDiskCache};
use lib_infra::util::timestamp;
use std::sync::Arc;
pub struct SQLiteGridRevisionSnapshotPersistence {
object_id: String,
pool: Arc<ConnectionPool>,
}
impl SQLiteGridRevisionSnapshotPersistence {
pub fn new(object_id: &str, pool: Arc<ConnectionPool>) -> Self {
Self {
object_id: object_id.to_string(),
pool,
}
}
fn gen_snapshot_id(&self, rev_id: i64) -> String {
format!("{}:{}", self.object_id, rev_id)
}
}
impl RevisionSnapshotDiskCache for SQLiteGridRevisionSnapshotPersistence {
fn write_snapshot(&self, rev_id: i64, data: Vec<u8>) -> FlowyResult<()> {
let conn = self.pool.get().map_err(internal_error)?;
let snapshot_id = self.gen_snapshot_id(rev_id);
let timestamp = timestamp();
let record = (
dsl::snapshot_id.eq(&snapshot_id),
dsl::object_id.eq(&self.object_id),
dsl::rev_id.eq(rev_id),
dsl::base_rev_id.eq(rev_id),
dsl::timestamp.eq(timestamp),
dsl::data.eq(data),
);
let _ = insert_or_ignore_into(dsl::grid_rev_snapshot)
.values(record)
.execute(&*conn)?;
Ok(())
// conn.immediate_transaction::<_, FlowyError, _>(|| {
// let filter = dsl::grid_rev_snapshot
// .filter(dsl::object_id.eq(&self.object_id))
// .filter(dsl::rev_id.eq(rev_id));
//
// let is_exist: bool = select(exists(filter)).get_result(&*conn)?;
// match is_exist {
// false => {
// let record = (
// dsl::object_id.eq(&self.object_id),
// dsl::rev_id.eq(rev_id),
// dsl::data.eq(data),
// );
// insert_or_ignore_into(dsl::grid_rev_snapshot)
// .values(record)
// .execute(&*conn)?;
// }
// true => {
// let affected_row = update(filter).set(dsl::data.eq(data)).execute(&*conn)?;
// debug_assert_eq!(affected_row, 1);
// }
// }
// Ok(())
// })
}
fn read_snapshot(&self, rev_id: i64) -> FlowyResult<Option<RevisionSnapshot>> {
let conn = self.pool.get().map_err(internal_error)?;
let snapshot_id = self.gen_snapshot_id(rev_id);
let record = dsl::grid_rev_snapshot
.filter(dsl::snapshot_id.eq(&snapshot_id))
.first::<GridSnapshotRecord>(&*conn)?;
Ok(Some(record.into()))
}
fn read_last_snapshot(&self) -> FlowyResult<Option<RevisionSnapshot>> {
let conn = self.pool.get().map_err(internal_error)?;
let latest_record = dsl::grid_rev_snapshot
.filter(dsl::object_id.eq(&self.object_id))
.order(dsl::rev_id.desc())
// .select(max(dsl::rev_id))
// .select((dsl::id, dsl::object_id, dsl::rev_id, dsl::data))
.first::<GridSnapshotRecord>(&*conn)?;
Ok(Some(latest_record.into()))
}
}
#[derive(PartialEq, Clone, Debug, Queryable, Identifiable, Insertable, Associations)]
#[table_name = "grid_rev_snapshot"]
#[primary_key("snapshot_id")]
struct GridSnapshotRecord {
snapshot_id: String,
object_id: String,
rev_id: i64,
base_rev_id: i64,
timestamp: i64,
data: Vec<u8>,
}
impl std::convert::From<GridSnapshotRecord> for RevisionSnapshot {
fn from(record: GridSnapshotRecord) -> Self {
RevisionSnapshot {
rev_id: record.rev_id,
base_rev_id: record.base_rev_id,
timestamp: record.timestamp,
data: Bytes::from(record.data),
}
}
}
// pub(crate) fn get_latest_rev_id_from(rev_ids: Vec<i64>, anchor: i64) -> Option<i64> {
// let mut target_rev_id = None;
// let mut old_step: Option<i64> = None;
// for rev_id in rev_ids {
// let step = (rev_id - anchor).abs();
// if let Some(old_step) = &mut old_step {
// if *old_step > step {
// *old_step = step;
// target_rev_id = Some(rev_id);
// }
// } else {
// old_step = Some(step);
// target_rev_id = Some(rev_id);
// }
// }
// target_rev_id
// }
// #[cfg(test)]
// mod tests {
// use crate::services::persistence::rev_sqlite::get_latest_rev_id_from;
//
// #[test]
// fn test_latest_rev_id() {
// let ids = vec![1, 2, 3, 4, 5, 6];
// for (anchor, expected_value) in vec![(3, 3), (7, 6), (1, 1)] {
// let value = get_latest_rev_id_from(ids.clone(), anchor).unwrap();
// assert_eq!(value, expected_value);
// }
// }
// }

View File

@ -1,7 +1,9 @@
mod grid_block_impl; mod grid_block_sqlite_impl;
mod grid_impl; mod grid_snapshot_sqlite_impl;
mod grid_view_impl; mod grid_sqlite_impl;
mod grid_view_sqlite_impl;
pub use grid_block_impl::*; pub use grid_block_sqlite_impl::*;
pub use grid_impl::*; pub use grid_snapshot_sqlite_impl::*;
pub use grid_view_impl::*; pub use grid_sqlite_impl::*;
pub use grid_view_sqlite_impl::*;

View File

@ -11,10 +11,14 @@ use crate::services::view_editor::trait_impl::*;
use crate::services::view_editor::GridViewChangedReceiverRunner; use crate::services::view_editor::GridViewChangedReceiverRunner;
use flowy_database::ConnectionPool; use flowy_database::ConnectionPool;
use flowy_error::FlowyResult; use flowy_error::FlowyResult;
use flowy_http_model::revision::Revision;
use flowy_revision::RevisionManager; use flowy_revision::RevisionManager;
use flowy_sync::client_grid::{GridViewRevisionChangeset, GridViewRevisionPad}; use flowy_sync::client_grid::{make_grid_view_operations, GridViewRevisionChangeset, GridViewRevisionPad};
use flowy_task::TaskDispatcher; use flowy_task::TaskDispatcher;
use grid_rev_model::{gen_grid_filter_id, FieldRevision, FieldTypeRevision, FilterRevision, RowChangeset, RowRevision}; use grid_rev_model::{
gen_grid_filter_id, FieldRevision, FieldTypeRevision, FilterRevision, LayoutRevision, RowChangeset, RowRevision,
};
use lib_infra::async_trait::async_trait;
use lib_infra::future::Fut; use lib_infra::future::Fut;
use lib_infra::ref_map::RefCountValue; use lib_infra::ref_map::RefCountValue;
use nanoid::nanoid; use nanoid::nanoid;
@ -60,13 +64,26 @@ impl GridViewRevisionEditor {
let cloud = Arc::new(GridViewRevisionCloudService { let cloud = Arc::new(GridViewRevisionCloudService {
token: token.to_owned(), token: token.to_owned(),
}); });
let view_revision_pad = rev_manager.initialize::<GridViewRevisionSerde>(Some(cloud)).await?;
let pad = Arc::new(RwLock::new(view_revision_pad)); let view_rev_pad = match rev_manager.initialize::<GridViewRevisionSerde>(Some(cloud)).await {
Ok(pad) => pad,
Err(err) => {
// It shouldn't be here, because the snapshot should come to recue.
tracing::error!("Deserialize grid view revisions failed: {}", err);
let view = GridViewRevisionPad::new(view_id.to_owned(), view_id.to_owned(), LayoutRevision::Table);
let bytes = make_grid_view_operations(&view).json_bytes();
let reset_revision = Revision::initial_revision(&view_id, bytes);
let _ = rev_manager.reset_object(vec![reset_revision]).await;
view
}
};
let view_rev_pad = Arc::new(RwLock::new(view_rev_pad));
let rev_manager = Arc::new(rev_manager); let rev_manager = Arc::new(rev_manager);
let group_controller = new_group_controller( let group_controller = new_group_controller(
user_id.to_owned(), user_id.to_owned(),
view_id.clone(), view_id.clone(),
pad.clone(), view_rev_pad.clone(),
rev_manager.clone(), rev_manager.clone(),
delegate.clone(), delegate.clone(),
) )
@ -74,9 +91,10 @@ impl GridViewRevisionEditor {
let user_id = user_id.to_owned(); let user_id = user_id.to_owned();
let group_controller = Arc::new(RwLock::new(group_controller)); let group_controller = Arc::new(RwLock::new(group_controller));
let filter_controller = make_filter_controller(&view_id, delegate.clone(), notifier.clone(), pad.clone()).await; let filter_controller =
make_filter_controller(&view_id, delegate.clone(), notifier.clone(), view_rev_pad.clone()).await;
Ok(Self { Ok(Self {
pad, pad: view_rev_pad,
user_id, user_id,
view_id, view_id,
rev_manager, rev_manager,
@ -88,11 +106,10 @@ impl GridViewRevisionEditor {
} }
#[tracing::instrument(name = "close grid view editor", level = "trace", skip_all)] #[tracing::instrument(name = "close grid view editor", level = "trace", skip_all)]
pub fn close(&self) { pub async fn close(&self) {
let filter_controller = self.filter_controller.clone(); self.rev_manager.generate_snapshot().await;
tokio::spawn(async move { self.rev_manager.close().await;
filter_controller.read().await.close().await; self.filter_controller.read().await.close().await;
});
} }
pub async fn filter_rows(&self, _block_id: &str, mut rows: Vec<Arc<RowRevision>>) -> Vec<Arc<RowRevision>> { pub async fn filter_rows(&self, _block_id: &str, mut rows: Vec<Arc<RowRevision>>) -> Vec<Arc<RowRevision>> {
@ -510,9 +527,10 @@ impl GridViewRevisionEditor {
} }
} }
#[async_trait]
impl RefCountValue for GridViewRevisionEditor { impl RefCountValue for GridViewRevisionEditor {
fn did_remove(&self) { async fn did_remove(&self) {
self.close(); self.close().await;
} }
} }

View File

@ -4,15 +4,15 @@ use crate::entities::{
}; };
use crate::manager::GridUser; use crate::manager::GridUser;
use crate::services::filter::FilterType; use crate::services::filter::FilterType;
use crate::services::persistence::rev_sqlite::SQLiteGridViewRevisionPersistence; use crate::services::persistence::rev_sqlite::{
SQLiteGridRevisionSnapshotPersistence, SQLiteGridViewRevisionPersistence,
};
use crate::services::view_editor::changed_notifier::*; use crate::services::view_editor::changed_notifier::*;
use crate::services::view_editor::trait_impl::GridViewRevisionCompress; use crate::services::view_editor::trait_impl::GridViewRevisionMergeable;
use crate::services::view_editor::{GridViewEditorDelegate, GridViewRevisionEditor}; use crate::services::view_editor::{GridViewEditorDelegate, GridViewRevisionEditor};
use flowy_database::ConnectionPool; use flowy_database::ConnectionPool;
use flowy_error::FlowyResult; use flowy_error::FlowyResult;
use flowy_revision::{ use flowy_revision::{RevisionManager, RevisionPersistence, RevisionPersistenceConfiguration};
RevisionManager, RevisionPersistence, RevisionPersistenceConfiguration, SQLiteRevisionSnapshotPersistence,
};
use grid_rev_model::{FieldRevision, FilterRevision, RowChangeset, RowRevision}; use grid_rev_model::{FieldRevision, FilterRevision, RowChangeset, RowRevision};
use lib_infra::future::Fut; use lib_infra::future::Fut;
use lib_infra::ref_map::RefCountHashMap; use lib_infra::ref_map::RefCountHashMap;
@ -42,7 +42,7 @@ impl GridViewManager {
} }
pub async fn close(&self, view_id: &str) { pub async fn close(&self, view_id: &str) {
self.view_editors.write().await.remove(view_id); self.view_editors.write().await.remove(view_id).await;
} }
pub async fn subscribe_view_changed(&self, view_id: &str) -> FlowyResult<broadcast::Receiver<GridViewChanged>> { pub async fn subscribe_view_changed(&self, view_id: &str) -> FlowyResult<broadcast::Receiver<GridViewChanged>> {
@ -230,19 +230,23 @@ pub async fn make_grid_view_rev_manager(
view_id: &str, view_id: &str,
) -> FlowyResult<RevisionManager<Arc<ConnectionPool>>> { ) -> FlowyResult<RevisionManager<Arc<ConnectionPool>>> {
let user_id = user.user_id()?; let user_id = user.user_id()?;
let pool = user.db_pool()?;
// Create revision persistence
let pool = user.db_pool()?;
let disk_cache = SQLiteGridViewRevisionPersistence::new(&user_id, pool.clone()); let disk_cache = SQLiteGridViewRevisionPersistence::new(&user_id, pool.clone());
let configuration = RevisionPersistenceConfiguration::new(2, false); let configuration = RevisionPersistenceConfiguration::new(2, false);
let rev_persistence = RevisionPersistence::new(&user_id, view_id, disk_cache, configuration); let rev_persistence = RevisionPersistence::new(&user_id, view_id, disk_cache, configuration);
let rev_compactor = GridViewRevisionCompress();
let snapshot_persistence = SQLiteRevisionSnapshotPersistence::new(view_id, pool); // Create snapshot persistence
let snapshot_object_id = format!("grid_view:{}", view_id);
let snapshot_persistence = SQLiteGridRevisionSnapshotPersistence::new(&snapshot_object_id, pool);
let rev_compress = GridViewRevisionMergeable();
Ok(RevisionManager::new( Ok(RevisionManager::new(
&user_id, &user_id,
view_id, view_id,
rev_persistence, rev_persistence,
rev_compactor, rev_compress,
snapshot_persistence, snapshot_persistence,
)) ))
} }

View File

@ -13,7 +13,7 @@ use flowy_revision::{
use flowy_sync::client_grid::{GridViewRevisionChangeset, GridViewRevisionPad}; use flowy_sync::client_grid::{GridViewRevisionChangeset, GridViewRevisionPad};
use flowy_sync::util::make_operations_from_revisions; use flowy_sync::util::make_operations_from_revisions;
use grid_rev_model::{FieldRevision, FieldTypeRevision, FilterRevision, GroupConfigurationRevision, RowRevision}; use grid_rev_model::{FieldRevision, FieldTypeRevision, FilterRevision, GroupConfigurationRevision, RowRevision};
use lib_infra::future::{to_future, Fut, FutureResult}; use lib_infra::future::{to_fut, Fut, FutureResult};
use lib_ot::core::EmptyAttributes; use lib_ot::core::EmptyAttributes;
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::RwLock; use tokio::sync::RwLock;
@ -46,8 +46,8 @@ impl RevisionObjectSerializer for GridViewRevisionSerde {
} }
} }
pub(crate) struct GridViewRevisionCompress(); pub(crate) struct GridViewRevisionMergeable();
impl RevisionMergeable for GridViewRevisionCompress { impl RevisionMergeable for GridViewRevisionMergeable {
fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> { fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> {
GridViewRevisionSerde::combine_revisions(revisions) GridViewRevisionSerde::combine_revisions(revisions)
} }
@ -58,7 +58,7 @@ pub(crate) struct GroupConfigurationReaderImpl(pub(crate) Arc<RwLock<GridViewRev
impl GroupConfigurationReader for GroupConfigurationReaderImpl { impl GroupConfigurationReader for GroupConfigurationReaderImpl {
fn get_configuration(&self) -> Fut<Option<Arc<GroupConfigurationRevision>>> { fn get_configuration(&self) -> Fut<Option<Arc<GroupConfigurationRevision>>> {
let view_pad = self.0.clone(); let view_pad = self.0.clone();
to_future(async move { to_fut(async move {
let mut groups = view_pad.read().await.get_all_groups(); let mut groups = view_pad.read().await.get_all_groups();
if groups.is_empty() { if groups.is_empty() {
None None
@ -88,7 +88,7 @@ impl GroupConfigurationWriter for GroupConfigurationWriterImpl {
let view_pad = self.view_pad.clone(); let view_pad = self.view_pad.clone();
let field_id = field_id.to_owned(); let field_id = field_id.to_owned();
to_future(async move { to_fut(async move {
let changeset = view_pad.write().await.insert_or_update_group_configuration( let changeset = view_pad.write().await.insert_or_update_group_configuration(
&field_id, &field_id,
&field_type, &field_type,
@ -109,10 +109,8 @@ pub(crate) async fn apply_change(
change: GridViewRevisionChangeset, change: GridViewRevisionChangeset,
) -> FlowyResult<()> { ) -> FlowyResult<()> {
let GridViewRevisionChangeset { operations: delta, md5 } = change; let GridViewRevisionChangeset { operations: delta, md5 } = change;
let (base_rev_id, rev_id) = rev_manager.next_rev_id_pair(); let data = delta.json_bytes();
let delta_data = delta.json_bytes(); let _ = rev_manager.add_local_revision(data, md5).await?;
let revision = Revision::new(&rev_manager.object_id, base_rev_id, rev_id, delta_data, md5);
let _ = rev_manager.add_local_revision(&revision).await?;
Ok(()) Ok(())
} }
@ -136,7 +134,7 @@ pub(crate) struct GridViewFilterDelegateImpl {
impl FilterDelegate for GridViewFilterDelegateImpl { impl FilterDelegate for GridViewFilterDelegateImpl {
fn get_filter_rev(&self, filter_id: FilterType) -> Fut<Option<Arc<FilterRevision>>> { fn get_filter_rev(&self, filter_id: FilterType) -> Fut<Option<Arc<FilterRevision>>> {
let pad = self.view_revision_pad.clone(); let pad = self.view_revision_pad.clone();
to_future(async move { to_fut(async move {
let field_type_rev: FieldTypeRevision = filter_id.field_type.into(); let field_type_rev: FieldTypeRevision = filter_id.field_type.into();
let mut filters = pad.read().await.get_filters(&filter_id.field_id, &field_type_rev); let mut filters = pad.read().await.get_filters(&filter_id.field_id, &field_type_rev);
if filters.is_empty() { if filters.is_empty() {

View File

@ -60,7 +60,7 @@ impl GridFieldTest {
FieldScript::CreateField { params } => { FieldScript::CreateField { params } => {
self.field_count += 1; self.field_count += 1;
self.editor self.editor
.create_new_field_rev(&params.field_type, params.type_option_data) .create_new_field_rev_with_type_option(&params.field_type, params.type_option_data)
.await .await
.unwrap(); .unwrap();
self.field_revs = self.editor.get_field_revs(None).await.unwrap(); self.field_revs = self.editor.get_field_revs(None).await.unwrap();

View File

@ -462,7 +462,7 @@ async fn group_insert_single_select_option_test() {
AssertGroupCount(5), AssertGroupCount(5),
]; ];
test.run_scripts(scripts).await; test.run_scripts(scripts).await;
let new_group = test.group_at_index(1).await; let new_group = test.group_at_index(4).await;
assert_eq!(new_group.desc, new_option_name); assert_eq!(new_group.desc, new_option_name);
} }

View File

@ -4,3 +4,4 @@ mod field_test;
mod filter_test; mod filter_test;
mod grid_editor; mod grid_editor;
mod group_test; mod group_test;
mod snapshot_test;

View File

@ -0,0 +1,2 @@
mod script;
mod test;

View File

@ -0,0 +1,105 @@
use crate::grid::grid_editor::GridEditorTest;
use flowy_http_model::revision::Revision;
use flowy_revision::{RevisionSnapshot, REVISION_WRITE_INTERVAL_IN_MILLIS};
use flowy_sync::client_grid::{GridOperations, GridRevisionPad};
use grid_rev_model::FieldRevision;
use std::time::Duration;
use tokio::time::sleep;
pub enum SnapshotScript {
WriteSnapshot,
#[allow(dead_code)]
AssertSnapshot {
rev_id: i64,
expected: Option<RevisionSnapshot>,
},
AssertSnapshotContent {
snapshot: RevisionSnapshot,
expected: String,
},
CreateField {
field_rev: FieldRevision,
},
DeleteField {
field_rev: FieldRevision,
},
}
pub struct GridSnapshotTest {
inner: GridEditorTest,
pub current_snapshot: Option<RevisionSnapshot>,
pub current_revision: Option<Revision>,
}
impl GridSnapshotTest {
pub async fn new() -> Self {
let editor_test = GridEditorTest::new_table().await;
Self {
inner: editor_test,
current_snapshot: None,
current_revision: None,
}
}
pub fn grid_id(&self) -> String {
self.grid_id.clone()
}
pub async fn grid_pad(&self) -> GridRevisionPad {
let pad = self.editor.grid_pad();
let pad = (*pad.read().await).clone();
pad
}
pub async fn run_scripts(&mut self, scripts: Vec<SnapshotScript>) {
for script in scripts {
self.run_script(script).await;
}
}
pub async fn get_latest_snapshot(&self) -> Option<RevisionSnapshot> {
self.editor.rev_manager().read_snapshot(None).await.unwrap()
}
pub async fn run_script(&mut self, script: SnapshotScript) {
let rev_manager = self.editor.rev_manager();
match script {
SnapshotScript::WriteSnapshot => {
sleep(Duration::from_millis(2 * REVISION_WRITE_INTERVAL_IN_MILLIS)).await;
rev_manager.generate_snapshot().await;
self.current_snapshot = rev_manager.read_snapshot(None).await.unwrap();
}
SnapshotScript::AssertSnapshot { rev_id, expected } => {
let snapshot = rev_manager.read_snapshot(Some(rev_id)).await.unwrap();
assert_eq!(snapshot, expected);
}
SnapshotScript::AssertSnapshotContent { snapshot, expected } => {
let operations = GridOperations::from_bytes(snapshot.data).unwrap();
let pad = GridRevisionPad::from_operations(operations).unwrap();
assert_eq!(pad.json_str().unwrap(), expected);
}
SnapshotScript::CreateField { field_rev } => {
self.editor.create_new_field_rev(field_rev).await.unwrap();
let current_rev_id = rev_manager.rev_id();
self.current_revision = rev_manager.get_revision(current_rev_id).await;
}
SnapshotScript::DeleteField { field_rev } => {
self.editor.delete_field(&field_rev.id).await.unwrap();
}
}
}
}
impl std::ops::Deref for GridSnapshotTest {
type Target = GridEditorTest;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl std::ops::DerefMut for GridSnapshotTest {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}

View File

@ -0,0 +1,45 @@
use crate::grid::field_test::util::create_text_field;
use crate::grid::snapshot_test::script::{GridSnapshotTest, SnapshotScript::*};
#[tokio::test]
async fn snapshot_create_test() {
let mut test = GridSnapshotTest::new().await;
let (_, field_rev) = create_text_field(&test.grid_id());
let scripts = vec![CreateField { field_rev }, WriteSnapshot];
test.run_scripts(scripts).await;
let snapshot = test.current_snapshot.clone().unwrap();
let content = test.grid_pad().await.json_str().unwrap();
test.run_scripts(vec![AssertSnapshotContent {
snapshot,
expected: content,
}])
.await;
}
#[tokio::test]
async fn snapshot_multi_version_test() {
let mut test = GridSnapshotTest::new().await;
let original_content = test.grid_pad().await.json_str().unwrap();
// Create a field
let (_, field_rev) = create_text_field(&test.grid_id());
let scripts = vec![
CreateField {
field_rev: field_rev.clone(),
},
WriteSnapshot,
];
test.run_scripts(scripts).await;
// Delete a field
let scripts = vec![DeleteField { field_rev }, WriteSnapshot];
test.run_scripts(scripts).await;
// The latest snapshot will be the same as the original content.
test.run_scripts(vec![AssertSnapshotContent {
snapshot: test.get_latest_snapshot().await.unwrap(),
expected: original_content,
}])
.await;
}

View File

@ -18,6 +18,7 @@ strum_macros = "0.21"
dashmap = "5" dashmap = "5"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
futures-util = "0.3.15" futures-util = "0.3.15"
futures = "0.3.15"
async-stream = "0.3.2" async-stream = "0.3.2"
serde_json = {version = "1.0"} serde_json = {version = "1.0"}

View File

@ -67,13 +67,13 @@ where
self.disk_cache.clone(), self.disk_cache.clone(),
configuration, configuration,
)); ));
let (revisions, _) = RevisionLoader { let revisions = RevisionLoader {
object_id: self.target.target_id().to_owned(), object_id: self.target.target_id().to_owned(),
user_id: self.user_id.clone(), user_id: self.user_id.clone(),
cloud: None, cloud: None,
rev_persistence, rev_persistence,
} }
.load() .load_revisions()
.await?; .await?;
let bytes = self.target.reset_data(revisions)?; let bytes = self.target.reset_data(revisions)?;

View File

@ -1,15 +1,14 @@
mod cache; mod cache;
mod conflict_resolve; mod conflict_resolve;
// mod history;
mod rev_manager; mod rev_manager;
mod rev_persistence; mod rev_persistence;
mod snapshot; mod rev_queue;
mod rev_snapshot;
mod ws_manager; mod ws_manager;
pub use cache::*; pub use cache::*;
pub use conflict_resolve::*; pub use conflict_resolve::*;
// pub use history::*;
pub use rev_manager::*; pub use rev_manager::*;
pub use rev_persistence::*; pub use rev_persistence::*;
pub use snapshot::*; pub use rev_snapshot::*;
pub use ws_manager::*; pub use ws_manager::*;

View File

@ -1,13 +1,17 @@
use crate::disk::RevisionState; use crate::rev_queue::{RevCommand, RevCommandSender, RevQueue};
use crate::{RevisionPersistence, RevisionSnapshotDiskCache, RevisionSnapshotManager, WSDataProviderDataSource}; use crate::{
RevisionPersistence, RevisionSnapshot, RevisionSnapshotController, RevisionSnapshotDiskCache,
WSDataProviderDataSource,
};
use bytes::Bytes; use bytes::Bytes;
use flowy_error::{FlowyError, FlowyResult}; use flowy_error::{internal_error, FlowyError, FlowyResult};
use flowy_http_model::revision::{Revision, RevisionRange}; use flowy_http_model::revision::{Revision, RevisionRange};
use flowy_http_model::util::md5; use flowy_http_model::util::md5;
use lib_infra::future::FutureResult; use lib_infra::future::FutureResult;
use std::sync::atomic::AtomicI64; use std::sync::atomic::AtomicI64;
use std::sync::atomic::Ordering::SeqCst; use std::sync::atomic::Ordering::SeqCst;
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::{mpsc, oneshot};
pub trait RevisionCloudService: Send + Sync { pub trait RevisionCloudService: Send + Sync {
/// Read the object's revision from remote /// Read the object's revision from remote
@ -67,13 +71,13 @@ pub trait RevisionMergeable: Send + Sync {
pub struct RevisionManager<Connection> { pub struct RevisionManager<Connection> {
pub object_id: String, pub object_id: String,
user_id: String, user_id: String,
rev_id_counter: RevIdCounter, rev_id_counter: Arc<RevIdCounter>,
rev_persistence: Arc<RevisionPersistence<Connection>>, rev_persistence: Arc<RevisionPersistence<Connection>>,
#[allow(dead_code)] rev_snapshot: Arc<RevisionSnapshotController<Connection>>,
rev_snapshot: Arc<RevisionSnapshotManager>,
rev_compress: Arc<dyn RevisionMergeable>, rev_compress: Arc<dyn RevisionMergeable>,
#[cfg(feature = "flowy_unit_test")] #[cfg(feature = "flowy_unit_test")]
rev_ack_notifier: tokio::sync::broadcast::Sender<i64>, rev_ack_notifier: tokio::sync::broadcast::Sender<i64>,
rev_queue: RevCommandSender,
} }
impl<Connection: 'static> RevisionManager<Connection> { impl<Connection: 'static> RevisionManager<Connection> {
@ -88,45 +92,84 @@ impl<Connection: 'static> RevisionManager<Connection> {
SP: 'static + RevisionSnapshotDiskCache, SP: 'static + RevisionSnapshotDiskCache,
C: 'static + RevisionMergeable, C: 'static + RevisionMergeable,
{ {
let rev_id_counter = RevIdCounter::new(0); let rev_id_counter = Arc::new(RevIdCounter::new(0));
let rev_compress = Arc::new(rev_compress); let rev_compress = Arc::new(rev_compress);
let rev_persistence = Arc::new(rev_persistence); let rev_persistence = Arc::new(rev_persistence);
let rev_snapshot = Arc::new(RevisionSnapshotManager::new(user_id, object_id, snapshot_persistence)); let rev_snapshot = RevisionSnapshotController::new(
user_id,
object_id,
snapshot_persistence,
rev_id_counter.clone(),
rev_persistence.clone(),
rev_compress.clone(),
);
let (rev_queue, receiver) = mpsc::channel(1000);
let queue = RevQueue::new(
object_id.to_owned(),
rev_id_counter.clone(),
rev_persistence.clone(),
rev_compress.clone(),
receiver,
);
tokio::spawn(queue.run());
Self { Self {
object_id: object_id.to_string(), object_id: object_id.to_string(),
user_id: user_id.to_owned(), user_id: user_id.to_owned(),
rev_id_counter, rev_id_counter,
rev_persistence, rev_persistence,
rev_snapshot, rev_snapshot: Arc::new(rev_snapshot),
rev_compress, rev_compress,
#[cfg(feature = "flowy_unit_test")] #[cfg(feature = "flowy_unit_test")]
rev_ack_notifier: tokio::sync::broadcast::channel(1).0, rev_ack_notifier: tokio::sync::broadcast::channel(1).0,
rev_queue,
} }
} }
#[tracing::instrument(level = "debug", skip_all, fields(object_id) err)] #[tracing::instrument(level = "debug", skip_all, fields(deserializer, object) err)]
pub async fn initialize<B>(&mut self, cloud: Option<Arc<dyn RevisionCloudService>>) -> FlowyResult<B::Output> pub async fn initialize<B>(&mut self, _cloud: Option<Arc<dyn RevisionCloudService>>) -> FlowyResult<B::Output>
where where
B: RevisionObjectDeserializer, B: RevisionObjectDeserializer,
{ {
let (revisions, rev_id) = RevisionLoader { let revision_records = self.rev_persistence.load_all_records(&self.object_id)?;
object_id: self.object_id.clone(), tracing::Span::current().record("object", &self.object_id.as_str());
user_id: self.user_id.clone(), tracing::Span::current().record("deserializer", &std::any::type_name::<B>());
cloud, let revisions: Vec<Revision> = revision_records.iter().map(|record| record.revision.clone()).collect();
rev_persistence: self.rev_persistence.clone(), let current_rev_id = revisions.last().as_ref().map(|revision| revision.rev_id).unwrap_or(0);
match B::deserialize_revisions(&self.object_id, revisions) {
Ok(object) => {
let _ = self.rev_persistence.sync_revision_records(&revision_records).await?;
self.rev_id_counter.set(current_rev_id);
Ok(object)
}
Err(err) => match self.rev_snapshot.restore_from_snapshot::<B>(current_rev_id) {
None => Err(err),
Some((object, snapshot_rev)) => {
let snapshot_rev_id = snapshot_rev.rev_id;
let _ = self.rev_persistence.reset(vec![snapshot_rev]).await;
// revision_records.retain(|record| record.revision.rev_id <= snapshot_rev_id);
// let _ = self.rev_persistence.sync_revision_records(&revision_records).await?;
self.rev_id_counter.set(snapshot_rev_id);
Ok(object)
}
},
} }
.load()
.await?;
self.rev_id_counter.set(rev_id);
tracing::Span::current().record("object_id", &self.object_id.as_str());
B::deserialize_revisions(&self.object_id, revisions)
} }
pub async fn close(&self) { pub async fn close(&self) {
let _ = self.rev_persistence.compact_lagging_revisions(&self.rev_compress).await; let _ = self.rev_persistence.compact_lagging_revisions(&self.rev_compress).await;
} }
pub async fn generate_snapshot(&self) {
self.rev_snapshot.generate_snapshot().await;
}
pub async fn read_snapshot(&self, rev_id: Option<i64>) -> FlowyResult<Option<RevisionSnapshot>> {
match rev_id {
None => self.rev_snapshot.read_last_snapshot(),
Some(rev_id) => self.rev_snapshot.read_snapshot(rev_id),
}
}
pub async fn load_revisions(&self) -> FlowyResult<Vec<Revision>> { pub async fn load_revisions(&self) -> FlowyResult<Vec<Revision>> {
let revisions = RevisionLoader { let revisions = RevisionLoader {
object_id: self.object_id.clone(), object_id: self.object_id.clone(),
@ -154,23 +197,23 @@ impl<Connection: 'static> RevisionManager<Connection> {
} }
let _ = self.rev_persistence.add_ack_revision(revision).await?; let _ = self.rev_persistence.add_ack_revision(revision).await?;
// self.rev_history.add_revision(revision).await;
self.rev_id_counter.set(revision.rev_id); self.rev_id_counter.set(revision.rev_id);
Ok(()) Ok(())
} }
/// Adds the revision that generated by user editing
// #[tracing::instrument(level = "trace", skip_all, err)] // #[tracing::instrument(level = "trace", skip_all, err)]
pub async fn add_local_revision(&self, revision: &Revision) -> Result<(), FlowyError> { pub async fn add_local_revision(&self, data: Bytes, object_md5: String) -> Result<i64, FlowyError> {
if revision.bytes.is_empty() { if data.is_empty() {
return Err(FlowyError::internal().context("Local revisions is empty")); return Err(FlowyError::internal().context("The data of the revisions is empty"));
} }
let rev_id = self self.rev_snapshot.generate_snapshot_if_need();
.rev_persistence let (ret, rx) = oneshot::channel();
.add_sync_revision(revision, &self.rev_compress) self.rev_queue
.await?; .send(RevCommand::RevisionData { data, object_md5, ret })
// self.rev_history.add_revision(revision).await; .await
self.rev_id_counter.set(rev_id); .map_err(internal_error)?;
Ok(()) rx.await.map_err(internal_error)?
} }
#[tracing::instrument(level = "debug", skip(self), err)] #[tracing::instrument(level = "debug", skip(self), err)]
@ -256,40 +299,6 @@ pub struct RevisionLoader<Connection> {
} }
impl<Connection: 'static> RevisionLoader<Connection> { impl<Connection: 'static> RevisionLoader<Connection> {
pub async fn load(&self) -> Result<(Vec<Revision>, i64), FlowyError> {
let records = self.rev_persistence.load_all_records(&self.object_id)?;
let revisions: Vec<Revision>;
let mut rev_id = 0;
if records.is_empty() && self.cloud.is_some() {
let remote_revisions = self
.cloud
.as_ref()
.unwrap()
.fetch_object(&self.user_id, &self.object_id)
.await?;
for revision in &remote_revisions {
rev_id = revision.rev_id;
let _ = self.rev_persistence.add_ack_revision(revision).await?;
}
revisions = remote_revisions;
} else {
for record in &records {
rev_id = record.revision.rev_id;
if record.state == RevisionState::Sync {
// Sync the records if their state is RevisionState::Sync.
let _ = self.rev_persistence.sync_revision(&record.revision).await?;
}
}
revisions = records.into_iter().map(|record| record.revision).collect::<_>();
}
if let Some(revision) = revisions.last() {
debug_assert_eq!(rev_id, revision.rev_id);
}
Ok((revisions, rev_id))
}
pub async fn load_revisions(&self) -> Result<Vec<Revision>, FlowyError> { pub async fn load_revisions(&self) -> Result<Vec<Revision>, FlowyError> {
let records = self.rev_persistence.load_all_records(&self.object_id)?; let records = self.rev_persistence.load_all_records(&self.object_id)?;
let revisions = records.into_iter().map(|record| record.revision).collect::<_>(); let revisions = records.into_iter().map(|record| record.revision).collect::<_>();
@ -377,6 +386,7 @@ impl RevIdCounter {
let _ = self.0.fetch_add(1, SeqCst); let _ = self.0.fetch_add(1, SeqCst);
self.value() self.value()
} }
pub fn value(&self) -> i64 { pub fn value(&self) -> i64 {
self.0.load(SeqCst) self.0.load(SeqCst)
} }

View File

@ -7,7 +7,8 @@ use crate::memory::RevisionMemoryCache;
use crate::RevisionMergeable; use crate::RevisionMergeable;
use flowy_error::{internal_error, FlowyError, FlowyResult}; use flowy_error::{internal_error, FlowyError, FlowyResult};
use flowy_http_model::revision::{Revision, RevisionRange}; use flowy_http_model::revision::{Revision, RevisionRange};
use std::collections::VecDeque; use std::collections::{HashMap, VecDeque};
use std::{borrow::Cow, sync::Arc}; use std::{borrow::Cow, sync::Arc};
use tokio::sync::RwLock; use tokio::sync::RwLock;
use tokio::task::spawn_blocking; use tokio::task::spawn_blocking;
@ -16,7 +17,12 @@ pub const REVISION_WRITE_INTERVAL_IN_MILLIS: u64 = 600;
#[derive(Clone)] #[derive(Clone)]
pub struct RevisionPersistenceConfiguration { pub struct RevisionPersistenceConfiguration {
// If the number of revisions that didn't sync to the server greater than the merge_threshold
// then these revisions will be merged into one revision.
merge_threshold: usize, merge_threshold: usize,
/// Indicates that the revisions that didn't sync to the server can be merged into one when
/// `compact_lagging_revisions` get called.
merge_lagging: bool, merge_lagging: bool,
} }
@ -46,6 +52,9 @@ impl std::default::Default for RevisionPersistenceConfiguration {
} }
} }
/// Represents as the persistence of revisions including memory or disk cache.
/// The generic parameter, `Connection`, represents as the disk backend's connection.
/// If the backend is SQLite, then the Connect will be SQLiteConnect.
pub struct RevisionPersistence<Connection> { pub struct RevisionPersistence<Connection> {
user_id: String, user_id: String,
object_id: String, object_id: String,
@ -99,15 +108,6 @@ where
self.add(revision.clone(), RevisionState::Ack, true).await self.add(revision.clone(), RevisionState::Ack, true).await
} }
/// Append the revision that already existed in the local DB state to sync sequence
#[tracing::instrument(level = "trace", skip(self), fields(rev_id, object_id=%self.object_id), err)]
pub(crate) async fn sync_revision(&self, revision: &Revision) -> FlowyResult<()> {
tracing::Span::current().record("rev_id", &revision.rev_id);
self.add(revision.clone(), RevisionState::Sync, false).await?;
self.sync_seq.write().await.recv(revision.rev_id)?;
Ok(())
}
#[tracing::instrument(level = "trace", skip_all, err)] #[tracing::instrument(level = "trace", skip_all, err)]
pub async fn compact_lagging_revisions<'a>( pub async fn compact_lagging_revisions<'a>(
&'a self, &'a self,
@ -144,22 +144,36 @@ where
Ok(()) Ok(())
} }
/// Sync the each records' revisions to remote if its state is `RevisionState::Sync`.
///
pub(crate) async fn sync_revision_records(&self, records: &[SyncRecord]) -> FlowyResult<()> {
let mut sync_seq = self.sync_seq.write().await;
for record in records {
if record.state == RevisionState::Sync {
self.add(record.revision.clone(), RevisionState::Sync, false).await?;
sync_seq.recv(record.revision.rev_id)?; // Sync the records if their state is RevisionState::Sync.
}
}
Ok(())
}
/// Save the revision to disk and append it to the end of the sync sequence. /// Save the revision to disk and append it to the end of the sync sequence.
/// The returned value,rev_id, will be different with the passed-in revision's rev_id if
/// multiple revisions are merged into one.
#[tracing::instrument(level = "trace", skip_all, fields(rev_id, compact_range, object_id=%self.object_id), err)] #[tracing::instrument(level = "trace", skip_all, fields(rev_id, compact_range, object_id=%self.object_id), err)]
pub(crate) async fn add_sync_revision<'a>( pub(crate) async fn add_local_revision<'a>(
&'a self, &'a self,
new_revision: &'a Revision, new_revision: Revision,
rev_compress: &Arc<dyn RevisionMergeable + 'a>, rev_compress: &Arc<dyn RevisionMergeable + 'a>,
) -> FlowyResult<i64> { ) -> FlowyResult<i64> {
let mut sync_seq = self.sync_seq.write().await; let mut sync_seq = self.sync_seq.write().await;
let compact_length = sync_seq.compact_length;
// Before the new_revision is pushed into the sync_seq, we check if the current `compact_length` of the // Before the new_revision is pushed into the sync_seq, we check if the current `compact_length` of the
// sync_seq is less equal to or greater than the merge threshold. If yes, it's needs to merged // sync_seq is less equal to or greater than the merge threshold. If yes, it's needs to merged
// with the new_revision into one revision. // with the new_revision into one revision.
let mut compact_seq = VecDeque::default(); let mut compact_seq = VecDeque::default();
// tracing::info!("{}", compact_seq) // tracing::info!("{}", compact_seq)
if compact_length >= self.configuration.merge_threshold - 1 { if sync_seq.compact_length >= self.configuration.merge_threshold - 1 {
compact_seq.extend(sync_seq.compact()); compact_seq.extend(sync_seq.compact());
} }
if !compact_seq.is_empty() { if !compact_seq.is_empty() {
@ -172,7 +186,7 @@ where
let mut revisions = self.revisions_in_range(&range).await?; let mut revisions = self.revisions_in_range(&range).await?;
debug_assert_eq!(range.len() as usize, revisions.len()); debug_assert_eq!(range.len() as usize, revisions.len());
// append the new revision // append the new revision
revisions.push(new_revision.clone()); revisions.push(new_revision);
// compact multiple revisions into one // compact multiple revisions into one
let merged_revision = rev_compress.merge_revisions(&self.user_id, &self.object_id, revisions)?; let merged_revision = rev_compress.merge_revisions(&self.user_id, &self.object_id, revisions)?;
@ -184,10 +198,11 @@ where
self.compact(&range, merged_revision).await?; self.compact(&range, merged_revision).await?;
Ok(rev_id) Ok(rev_id)
} else { } else {
tracing::Span::current().record("rev_id", &new_revision.rev_id); let rev_id = new_revision.rev_id;
self.add(new_revision.clone(), RevisionState::Sync, true).await?; tracing::Span::current().record("rev_id", &rev_id);
sync_seq.merge_recv(new_revision.rev_id)?; self.add(new_revision, RevisionState::Sync, true).await?;
Ok(new_revision.rev_id) sync_seq.merge_recv(rev_id)?;
Ok(rev_id)
} }
} }
@ -290,7 +305,16 @@ where
} }
pub fn load_all_records(&self, object_id: &str) -> FlowyResult<Vec<SyncRecord>> { pub fn load_all_records(&self, object_id: &str) -> FlowyResult<Vec<SyncRecord>> {
self.disk_cache.read_revision_records(object_id, None) let mut record_ids = HashMap::new();
let mut records = vec![];
for record in self.disk_cache.read_revision_records(object_id, None)? {
let rev_id = record.revision.rev_id;
if record_ids.get(&rev_id).is_none() {
records.push(record);
}
record_ids.insert(rev_id, rev_id);
}
Ok(records)
} }
// Read the revision which rev_id >= range.start && rev_id <= range.end // Read the revision which rev_id >= range.start && rev_id <= range.end
@ -306,11 +330,6 @@ where
.map_err(internal_error)??; .map_err(internal_error)??;
if records.len() != range_len { if records.len() != range_len {
// #[cfg(debug_assertions)]
// records.iter().for_each(|record| {
// let delta = PlainDelta::from_bytes(&record.revision.delta_data).unwrap();
// tracing::trace!("{}", delta.to_string());
// });
tracing::error!("Expect revision len {},but receive {}", range_len, records.len()); tracing::error!("Expect revision len {},but receive {}", range_len, records.len());
} }
} }
@ -319,6 +338,14 @@ where
.map(|record| record.revision) .map(|record| record.revision)
.collect::<Vec<Revision>>()) .collect::<Vec<Revision>>())
} }
#[allow(dead_code)]
pub fn delete_revisions_from_range(&self, range: RevisionRange) -> FlowyResult<()> {
let _ = self
.disk_cache
.delete_revision_records(&self.object_id, Some(range.to_rev_ids()))?;
Ok(())
}
} }
impl<C> RevisionMemoryCacheDelegate for Arc<dyn RevisionDiskCache<C, Error = FlowyError>> { impl<C> RevisionMemoryCacheDelegate for Arc<dyn RevisionDiskCache<C, Error = FlowyError>> {
@ -378,9 +405,8 @@ impl DeferSyncSequence {
// The last revision's rev_id must be greater than the new one. // The last revision's rev_id must be greater than the new one.
if let Some(rev_id) = self.rev_ids.back() { if let Some(rev_id) = self.rev_ids.back() {
if *rev_id >= new_rev_id { if *rev_id >= new_rev_id {
return Err( tracing::error!("The new revision's id must be greater than {}", rev_id);
FlowyError::internal().context(format!("The new revision's id must be greater than {}", rev_id)) return Ok(());
);
} }
} }
self.rev_ids.push_back(new_rev_id); self.rev_ids.push_back(new_rev_id);

View File

@ -0,0 +1,98 @@
use crate::{RevIdCounter, RevisionMergeable, RevisionPersistence};
use async_stream::stream;
use bytes::Bytes;
use flowy_error::FlowyError;
use flowy_http_model::revision::Revision;
use futures::stream::StreamExt;
use std::sync::Arc;
use tokio::sync::mpsc::{Receiver, Sender};
use tokio::sync::oneshot;
#[derive(Debug)]
pub(crate) enum RevCommand {
RevisionData {
data: Bytes,
object_md5: String,
ret: Ret<i64>,
},
}
pub(crate) struct RevQueue<Connection> {
object_id: String,
rev_id_counter: Arc<RevIdCounter>,
rev_persistence: Arc<RevisionPersistence<Connection>>,
rev_compress: Arc<dyn RevisionMergeable>,
receiver: Option<RevCommandReceiver>,
}
impl<Connection> RevQueue<Connection>
where
Connection: 'static,
{
pub fn new(
object_id: String,
rev_id_counter: Arc<RevIdCounter>,
rev_persistence: Arc<RevisionPersistence<Connection>>,
rev_compress: Arc<dyn RevisionMergeable>,
receiver: RevCommandReceiver,
) -> Self {
Self {
object_id,
rev_id_counter,
rev_persistence,
rev_compress,
receiver: Some(receiver),
}
}
pub async fn run(mut self) {
let mut receiver = self.receiver.take().expect("Only take once");
let object_id = self.object_id.clone();
let stream = stream! {
loop {
match receiver.recv().await {
Some(msg) => yield msg,
None => {
tracing::trace!("{}'s RevQueue exist", &object_id);
break
},
}
}
};
stream
.for_each(|command| async {
match self.handle_command(command).await {
Ok(_) => {}
Err(e) => tracing::debug!("[RevQueue]: {}", e),
}
})
.await;
}
async fn handle_command(&self, command: RevCommand) -> Result<(), FlowyError> {
match command {
RevCommand::RevisionData {
data,
object_md5: data_md5,
ret,
} => {
let base_rev_id = self.rev_id_counter.value();
let rev_id = self.rev_id_counter.next_id();
let revision = Revision::new(&self.object_id, base_rev_id, rev_id, data, data_md5);
let new_rev_id = self
.rev_persistence
.add_local_revision(revision, &self.rev_compress)
.await?;
self.rev_id_counter.set(new_rev_id);
let _ = ret.send(Ok(new_rev_id));
}
}
Ok(())
}
}
pub(crate) type RevCommandSender = Sender<RevCommand>;
pub(crate) type RevCommandReceiver = Receiver<RevCommand>;
pub(crate) type Ret<T> = oneshot::Sender<Result<T, FlowyError>>;

View File

@ -0,0 +1,174 @@
#![allow(clippy::all)]
#![allow(dead_code)]
#![allow(unused_variables)]
use crate::{RevIdCounter, RevisionMergeable, RevisionObjectDeserializer, RevisionPersistence};
use bytes::Bytes;
use flowy_error::FlowyResult;
use flowy_http_model::revision::Revision;
use std::sync::atomic::AtomicI64;
use std::sync::atomic::Ordering::SeqCst;
use std::sync::Arc;
pub trait RevisionSnapshotDiskCache: Send + Sync {
fn write_snapshot(&self, rev_id: i64, data: Vec<u8>) -> FlowyResult<()>;
fn read_snapshot(&self, rev_id: i64) -> FlowyResult<Option<RevisionSnapshot>>;
fn read_last_snapshot(&self) -> FlowyResult<Option<RevisionSnapshot>>;
}
/// Do nothing but just used to clam the rust compiler about the generic parameter `SP` of `RevisionManager`
///
pub struct PhantomSnapshotPersistence();
impl RevisionSnapshotDiskCache for PhantomSnapshotPersistence {
fn write_snapshot(&self, rev_id: i64, data: Vec<u8>) -> FlowyResult<()> {
Ok(())
}
fn read_snapshot(&self, rev_id: i64) -> FlowyResult<Option<RevisionSnapshot>> {
Ok(None)
}
fn read_last_snapshot(&self) -> FlowyResult<Option<RevisionSnapshot>> {
Ok(None)
}
}
const AUTO_GEN_SNAPSHOT_PER_10_REVISION: i64 = 10;
pub struct RevisionSnapshotController<Connection> {
user_id: String,
object_id: String,
disk_cache: Arc<dyn RevisionSnapshotDiskCache>,
rev_id_counter: Arc<RevIdCounter>,
rev_persistence: Arc<RevisionPersistence<Connection>>,
rev_compress: Arc<dyn RevisionMergeable>,
start_rev_id: AtomicI64,
}
impl<Connection> RevisionSnapshotController<Connection>
where
Connection: 'static,
{
pub fn new<D>(
user_id: &str,
object_id: &str,
disk_cache: D,
rev_id_counter: Arc<RevIdCounter>,
revision_persistence: Arc<RevisionPersistence<Connection>>,
revision_compress: Arc<dyn RevisionMergeable>,
) -> Self
where
D: RevisionSnapshotDiskCache + 'static,
{
let disk_cache = Arc::new(disk_cache);
Self {
user_id: user_id.to_string(),
object_id: object_id.to_string(),
disk_cache,
rev_id_counter,
start_rev_id: AtomicI64::new(0),
rev_persistence: revision_persistence,
rev_compress: revision_compress,
}
}
pub async fn generate_snapshot(&self) {
if let Some((rev_id, bytes)) = self.generate_snapshot_data() {
if let Err(e) = self.disk_cache.write_snapshot(rev_id, bytes.to_vec()) {
tracing::error!("Save snapshot failed: {}", e);
}
}
}
/// Find the nearest revision base on the passed-in rev_id
pub fn restore_from_snapshot<B>(&self, rev_id: i64) -> Option<(B::Output, Revision)>
where
B: RevisionObjectDeserializer,
{
tracing::trace!("Try to find if {} has snapshot", self.object_id);
let snapshot = self.disk_cache.read_last_snapshot().ok()??;
let snapshot_rev_id = snapshot.rev_id;
let revision = Revision::new(
&self.object_id,
snapshot.base_rev_id,
snapshot.rev_id,
snapshot.data,
"".to_owned(),
);
tracing::trace!(
"Try to restore from snapshot: {}, {}",
snapshot.base_rev_id,
snapshot.rev_id
);
let object = B::deserialize_revisions(&self.object_id, vec![revision.clone()]).ok()?;
tracing::trace!(
"Restore {} from snapshot with rev_id: {}",
self.object_id,
snapshot_rev_id
);
Some((object, revision))
}
pub fn generate_snapshot_if_need(&self) {
let current_rev_id = self.rev_id_counter.value();
let start_rev_id = self.get_start_rev_id();
if current_rev_id <= start_rev_id {
return;
}
if (current_rev_id - start_rev_id) >= AUTO_GEN_SNAPSHOT_PER_10_REVISION {
if let Some((rev_id, bytes)) = self.generate_snapshot_data() {
let disk_cache = self.disk_cache.clone();
tokio::spawn(async move {
let _ = disk_cache.write_snapshot(rev_id, bytes.to_vec());
});
}
self.set_start_rev_id(current_rev_id);
}
}
fn generate_snapshot_data(&self) -> Option<(i64, Bytes)> {
let revisions = self
.rev_persistence
.load_all_records(&self.object_id)
.map(|records| {
records
.into_iter()
.map(|record| record.revision)
.collect::<Vec<Revision>>()
})
.ok()?;
if revisions.is_empty() {
return None;
}
let data = self.rev_compress.combine_revisions(revisions).ok()?;
let rev_id = self.rev_id_counter.value();
Some((rev_id, data))
}
fn get_start_rev_id(&self) -> i64 {
self.start_rev_id.load(SeqCst)
}
fn set_start_rev_id(&self, rev_id: i64) {
let _ = self.start_rev_id.fetch_update(SeqCst, SeqCst, |_| Some(rev_id));
}
}
impl<Connection> std::ops::Deref for RevisionSnapshotController<Connection> {
type Target = Arc<dyn RevisionSnapshotDiskCache>;
fn deref(&self) -> &Self::Target {
&self.disk_cache
}
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct RevisionSnapshot {
pub rev_id: i64,
pub base_rev_id: i64,
pub timestamp: i64,
pub data: Bytes,
}

View File

@ -1,5 +0,0 @@
mod persistence;
mod rev_snapshot;
pub use persistence::*;
pub use rev_snapshot::*;

View File

@ -1,32 +0,0 @@
#![allow(clippy::all)]
#![allow(dead_code)]
#![allow(unused_variables)]
use crate::{RevisionSnapshotDiskCache, RevisionSnapshotInfo};
use flowy_error::FlowyResult;
pub struct SQLiteRevisionSnapshotPersistence<Connection> {
object_id: String,
pool: Connection,
}
impl<Connection: 'static> SQLiteRevisionSnapshotPersistence<Connection> {
pub fn new(object_id: &str, pool: Connection) -> Self {
Self {
object_id: object_id.to_string(),
pool,
}
}
}
impl<Connection> RevisionSnapshotDiskCache for SQLiteRevisionSnapshotPersistence<Connection>
where
Connection: Send + Sync + 'static,
{
fn write_snapshot(&self, object_id: &str, rev_id: i64, data: Vec<u8>) -> FlowyResult<()> {
todo!()
}
fn read_snapshot(&self, object_id: &str, rev_id: i64) -> FlowyResult<RevisionSnapshotInfo> {
todo!()
}
}

View File

@ -1,32 +0,0 @@
#![allow(clippy::all)]
#![allow(dead_code)]
#![allow(unused_variables)]
use flowy_error::FlowyResult;
use std::sync::Arc;
pub trait RevisionSnapshotDiskCache: Send + Sync {
fn write_snapshot(&self, object_id: &str, rev_id: i64, data: Vec<u8>) -> FlowyResult<()>;
fn read_snapshot(&self, object_id: &str, rev_id: i64) -> FlowyResult<RevisionSnapshotInfo>;
}
pub struct RevisionSnapshotManager {
user_id: String,
object_id: String,
disk_cache: Arc<dyn RevisionSnapshotDiskCache>,
}
impl RevisionSnapshotManager {
pub fn new<D>(user_id: &str, object_id: &str, disk_cache: D) -> Self
where
D: RevisionSnapshotDiskCache + 'static,
{
let disk_cache = Arc::new(disk_cache);
Self {
user_id: user_id.to_string(),
object_id: object_id.to_string(),
disk_cache,
}
}
}
pub struct RevisionSnapshotInfo {}

View File

@ -3,12 +3,9 @@ use crate::revision_test::script::{RevisionScript::*, RevisionTest};
#[tokio::test] #[tokio::test]
async fn revision_sync_test() { async fn revision_sync_test() {
let test = RevisionTest::new().await; let test = RevisionTest::new().await;
let (base_rev_id, rev_id) = test.next_rev_id_pair(); let rev_id = 1;
test.run_script(AddLocalRevision { test.run_script(AddLocalRevision {
content: "123".to_string(), content: "123".to_string(),
base_rev_id,
rev_id,
}) })
.await; .await;
@ -20,21 +17,21 @@ async fn revision_sync_test() {
#[tokio::test] #[tokio::test]
async fn revision_compress_2_revisions_with_2_threshold_test() { async fn revision_compress_2_revisions_with_2_threshold_test() {
let test = RevisionTest::new_with_configuration(2).await; let test = RevisionTest::new_with_configuration(2).await;
test.run_script(AddLocalRevision2 { test.run_script(AddLocalRevision2 {
content: "123".to_string(), content: "123".to_string(),
pair_rev_id: test.next_rev_id_pair(),
}) })
.await; .await;
test.run_script(AddLocalRevision2 { test.run_script(AddLocalRevision2 {
content: "456".to_string(), content: "456".to_string(),
pair_rev_id: test.next_rev_id_pair(),
}) })
.await; .await;
test.run_scripts(vec![ test.run_scripts(vec![
AssertNextSyncRevisionId { rev_id: Some(1) }, AssertNextSyncRevisionId { rev_id: Some(1) },
AssertNextSyncRevisionContent {
expected: "123456".to_string(),
},
AckRevision { rev_id: 1 }, AckRevision { rev_id: 1 },
AssertNextSyncRevisionId { rev_id: None }, AssertNextSyncRevisionId { rev_id: None },
]) ])
@ -44,36 +41,25 @@ async fn revision_compress_2_revisions_with_2_threshold_test() {
#[tokio::test] #[tokio::test]
async fn revision_compress_4_revisions_with_threshold_2_test() { async fn revision_compress_4_revisions_with_threshold_2_test() {
let test = RevisionTest::new_with_configuration(2).await; let test = RevisionTest::new_with_configuration(2).await;
let (base_rev_id, rev_id_1) = test.next_rev_id_pair(); let rev_id_1 = 1;
test.run_script(AddLocalRevision { test.run_script(AddLocalRevision {
content: "1".to_string(), content: "1".to_string(),
base_rev_id,
rev_id: rev_id_1,
}) })
.await; .await;
let (base_rev_id, rev_id_2) = test.next_rev_id_pair(); let rev_id_2 = 2;
test.run_script(AddLocalRevision { test.run_script(AddLocalRevision {
content: "2".to_string(), content: "2".to_string(),
base_rev_id,
rev_id: rev_id_2,
}) })
.await; .await;
let (base_rev_id, rev_id_3) = test.next_rev_id_pair();
test.run_script(AddLocalRevision { test.run_script(AddLocalRevision {
content: "3".to_string(), content: "3".to_string(),
base_rev_id,
rev_id: rev_id_3,
}) })
.await; .await;
let (base_rev_id, rev_id_4) = test.next_rev_id_pair();
test.run_script(AddLocalRevision { test.run_script(AddLocalRevision {
content: "4".to_string(), content: "4".to_string(),
base_rev_id,
rev_id: rev_id_4,
}) })
.await; .await;
@ -96,68 +82,45 @@ async fn revision_compress_4_revisions_with_threshold_2_test() {
#[tokio::test] #[tokio::test]
async fn revision_compress_8_revisions_with_threshold_4_test() { async fn revision_compress_8_revisions_with_threshold_4_test() {
let test = RevisionTest::new_with_configuration(4).await; let test = RevisionTest::new_with_configuration(4).await;
let (base_rev_id, rev_id_1) = test.next_rev_id_pair(); let rev_id_1 = 1;
test.run_script(AddLocalRevision { test.run_script(AddLocalRevision {
content: "1".to_string(), content: "1".to_string(),
base_rev_id,
rev_id: rev_id_1,
}) })
.await; .await;
let (base_rev_id, rev_id_2) = test.next_rev_id_pair();
test.run_script(AddLocalRevision { test.run_script(AddLocalRevision {
content: "2".to_string(), content: "2".to_string(),
base_rev_id,
rev_id: rev_id_2,
}) })
.await; .await;
let (base_rev_id, rev_id_3) = test.next_rev_id_pair();
test.run_script(AddLocalRevision { test.run_script(AddLocalRevision {
content: "3".to_string(), content: "3".to_string(),
base_rev_id,
rev_id: rev_id_3,
}) })
.await; .await;
let (base_rev_id, rev_id_4) = test.next_rev_id_pair();
test.run_script(AddLocalRevision { test.run_script(AddLocalRevision {
content: "4".to_string(), content: "4".to_string(),
base_rev_id,
rev_id: rev_id_4,
}) })
.await; .await;
let (base_rev_id, rev_id_a) = test.next_rev_id_pair(); let rev_id_a = 2;
test.run_script(AddLocalRevision { test.run_script(AddLocalRevision {
content: "a".to_string(), content: "a".to_string(),
base_rev_id,
rev_id: rev_id_a,
}) })
.await; .await;
let (base_rev_id, rev_id_b) = test.next_rev_id_pair();
test.run_script(AddLocalRevision { test.run_script(AddLocalRevision {
content: "b".to_string(), content: "b".to_string(),
base_rev_id,
rev_id: rev_id_b,
}) })
.await; .await;
let (base_rev_id, rev_id_c) = test.next_rev_id_pair();
test.run_script(AddLocalRevision { test.run_script(AddLocalRevision {
content: "c".to_string(), content: "c".to_string(),
base_rev_id,
rev_id: rev_id_c,
}) })
.await; .await;
let (base_rev_id, rev_id_d) = test.next_rev_id_pair();
test.run_script(AddLocalRevision { test.run_script(AddLocalRevision {
content: "d".to_string(), content: "d".to_string(),
base_rev_id,
rev_id: rev_id_d,
}) })
.await; .await;
@ -183,13 +146,7 @@ async fn revision_merge_per_5_revision_test() {
let test = RevisionTest::new_with_configuration(5).await; let test = RevisionTest::new_with_configuration(5).await;
for i in 0..20 { for i in 0..20 {
let content = format!("{}", i); let content = format!("{}", i);
let (base_rev_id, rev_id) = test.next_rev_id_pair(); test.run_script(AddLocalRevision { content }).await;
test.run_script(AddLocalRevision {
content,
base_rev_id,
rev_id,
})
.await;
} }
test.run_scripts(vec![ test.run_scripts(vec![
@ -220,13 +177,7 @@ async fn revision_merge_per_100_revision_test() {
let test = RevisionTest::new_with_configuration(100).await; let test = RevisionTest::new_with_configuration(100).await;
for i in 0..1000 { for i in 0..1000 {
let content = format!("{}", i); let content = format!("{}", i);
let (base_rev_id, rev_id) = test.next_rev_id_pair(); test.run_script(AddLocalRevision { content }).await;
test.run_script(AddLocalRevision {
content,
base_rev_id,
rev_id,
})
.await;
} }
test.run_scripts(vec![AssertNumberOfSyncRevisions { num: 10 }]).await; test.run_scripts(vec![AssertNumberOfSyncRevisions { num: 10 }]).await;
@ -236,11 +187,8 @@ async fn revision_merge_per_100_revision_test() {
async fn revision_merge_per_100_revision_test2() { async fn revision_merge_per_100_revision_test2() {
let test = RevisionTest::new_with_configuration(100).await; let test = RevisionTest::new_with_configuration(100).await;
for i in 0..50 { for i in 0..50 {
let (base_rev_id, rev_id) = test.next_rev_id_pair();
test.run_script(AddLocalRevision { test.run_script(AddLocalRevision {
content: format!("{}", i), content: format!("{}", i),
base_rev_id,
rev_id,
}) })
.await; .await;
} }
@ -252,11 +200,8 @@ async fn revision_merge_per_100_revision_test2() {
async fn revision_merge_per_1000_revision_test() { async fn revision_merge_per_1000_revision_test() {
let test = RevisionTest::new_with_configuration(1000).await; let test = RevisionTest::new_with_configuration(1000).await;
for i in 0..100000 { for i in 0..100000 {
let (base_rev_id, rev_id) = test.next_rev_id_pair();
test.run_script(AddLocalRevision { test.run_script(AddLocalRevision {
content: format!("{}", i), content: format!("{}", i),
base_rev_id,
rev_id,
}) })
.await; .await;
} }
@ -267,50 +212,42 @@ async fn revision_merge_per_1000_revision_test() {
#[tokio::test] #[tokio::test]
async fn revision_compress_revision_test() { async fn revision_compress_revision_test() {
let test = RevisionTest::new_with_configuration(2).await; let test = RevisionTest::new_with_configuration(2).await;
test.run_scripts(vec![ test.run_scripts(vec![
AddLocalRevision2 { AddLocalRevision2 {
content: "1".to_string(), content: "1".to_string(),
pair_rev_id: test.next_rev_id_pair(),
}, },
AddLocalRevision2 { AddLocalRevision2 {
content: "2".to_string(), content: "2".to_string(),
pair_rev_id: test.next_rev_id_pair(),
}, },
AddLocalRevision2 { AddLocalRevision2 {
content: "3".to_string(), content: "3".to_string(),
pair_rev_id: test.next_rev_id_pair(),
}, },
AddLocalRevision2 { AddLocalRevision2 {
content: "4".to_string(), content: "4".to_string(),
pair_rev_id: test.next_rev_id_pair(),
}, },
AssertNumberOfSyncRevisions { num: 2 }, AssertNumberOfSyncRevisions { num: 2 },
]) ])
.await; .await;
} }
#[tokio::test] #[tokio::test]
async fn revision_compress_revision_while_recv_ack_test() { async fn revision_compress_revision_while_recv_ack_test() {
let test = RevisionTest::new_with_configuration(2).await; let test = RevisionTest::new_with_configuration(2).await;
test.run_scripts(vec![ test.run_scripts(vec![
AddLocalRevision2 { AddLocalRevision2 {
content: "1".to_string(), content: "1".to_string(),
pair_rev_id: test.next_rev_id_pair(),
}, },
AckRevision { rev_id: 1 }, AckRevision { rev_id: 1 },
AddLocalRevision2 { AddLocalRevision2 {
content: "2".to_string(), content: "2".to_string(),
pair_rev_id: test.next_rev_id_pair(),
}, },
AckRevision { rev_id: 2 }, AckRevision { rev_id: 2 },
AddLocalRevision2 { AddLocalRevision2 {
content: "3".to_string(), content: "3".to_string(),
pair_rev_id: test.next_rev_id_pair(),
}, },
AckRevision { rev_id: 3 }, AckRevision { rev_id: 3 },
AddLocalRevision2 { AddLocalRevision2 {
content: "4".to_string(), content: "4".to_string(),
pair_rev_id: test.next_rev_id_pair(),
}, },
AssertNumberOfSyncRevisions { num: 4 }, AssertNumberOfSyncRevisions { num: 4 },
]) ])

View File

@ -4,12 +4,8 @@ use crate::revision_test::script::{InvalidRevisionObject, RevisionTest};
#[tokio::test] #[tokio::test]
async fn revision_write_to_disk_test() { async fn revision_write_to_disk_test() {
let test = RevisionTest::new_with_configuration(2).await; let test = RevisionTest::new_with_configuration(2).await;
let (base_rev_id, rev_id) = test.next_rev_id_pair();
test.run_script(AddLocalRevision { test.run_script(AddLocalRevision {
content: "123".to_string(), content: "123".to_string(),
base_rev_id,
rev_id,
}) })
.await; .await;
@ -25,11 +21,8 @@ async fn revision_write_to_disk_test() {
async fn revision_write_to_disk_with_merge_test() { async fn revision_write_to_disk_with_merge_test() {
let test = RevisionTest::new_with_configuration(100).await; let test = RevisionTest::new_with_configuration(100).await;
for i in 0..1000 { for i in 0..1000 {
let (base_rev_id, rev_id) = test.next_rev_id_pair();
test.run_script(AddLocalRevision { test.run_script(AddLocalRevision {
content: format!("{}", i), content: format!("{}", i),
base_rev_id,
rev_id,
}) })
.await; .await;
} }
@ -46,12 +39,9 @@ async fn revision_write_to_disk_with_merge_test() {
#[tokio::test] #[tokio::test]
async fn revision_read_from_disk_test() { async fn revision_read_from_disk_test() {
let test = RevisionTest::new_with_configuration(2).await; let test = RevisionTest::new_with_configuration(2).await;
let (base_rev_id, rev_id) = test.next_rev_id_pair();
test.run_scripts(vec![ test.run_scripts(vec![
AddLocalRevision { AddLocalRevision {
content: "123".to_string(), content: "123".to_string(),
base_rev_id,
rev_id,
}, },
AssertNumberOfRevisionsInDisk { num: 0 }, AssertNumberOfRevisionsInDisk { num: 0 },
WaitWhenWriteToDisk, WaitWhenWriteToDisk,
@ -60,16 +50,13 @@ async fn revision_read_from_disk_test() {
.await; .await;
let test = RevisionTest::new_with_other(test).await; let test = RevisionTest::new_with_other(test).await;
let (base_rev_id, rev_id) = test.next_rev_id_pair();
test.run_scripts(vec![ test.run_scripts(vec![
AssertNextSyncRevisionId { rev_id: Some(1) }, AssertNextSyncRevisionId { rev_id: Some(1) },
AddLocalRevision { AddLocalRevision {
content: "456".to_string(), content: "456".to_string(),
base_rev_id,
rev_id,
}, },
AckRevision { rev_id: 1 }, AckRevision { rev_id: 1 },
AssertNextSyncRevisionId { rev_id: Some(rev_id) }, AssertNextSyncRevisionId { rev_id: Some(2) },
]) ])
.await; .await;
} }
@ -77,20 +64,14 @@ async fn revision_read_from_disk_test() {
#[tokio::test] #[tokio::test]
async fn revision_read_from_disk_with_invalid_record_test() { async fn revision_read_from_disk_with_invalid_record_test() {
let test = RevisionTest::new_with_configuration(2).await; let test = RevisionTest::new_with_configuration(2).await;
let (base_rev_id, rev_id) = test.next_rev_id_pair();
test.run_scripts(vec![AddLocalRevision { test.run_scripts(vec![AddLocalRevision {
content: "123".to_string(), content: "123".to_string(),
base_rev_id,
rev_id,
}]) }])
.await; .await;
let (base_rev_id, rev_id) = test.next_rev_id_pair();
test.run_scripts(vec![ test.run_scripts(vec![
AddInvalidLocalRevision { AddInvalidLocalRevision {
bytes: InvalidRevisionObject::new().to_bytes(), bytes: InvalidRevisionObject::new().to_bytes(),
base_rev_id,
rev_id,
}, },
WaitWhenWriteToDisk, WaitWhenWriteToDisk,
]) ])

View File

@ -3,8 +3,7 @@ use flowy_error::{internal_error, FlowyError, FlowyResult};
use flowy_revision::disk::{RevisionChangeset, RevisionDiskCache, SyncRecord}; use flowy_revision::disk::{RevisionChangeset, RevisionDiskCache, SyncRecord};
use flowy_revision::{ use flowy_revision::{
RevisionManager, RevisionMergeable, RevisionObjectDeserializer, RevisionPersistence, RevisionManager, RevisionMergeable, RevisionObjectDeserializer, RevisionPersistence,
RevisionPersistenceConfiguration, RevisionSnapshotDiskCache, RevisionSnapshotInfo, RevisionPersistenceConfiguration, RevisionSnapshot, RevisionSnapshotDiskCache, REVISION_WRITE_INTERVAL_IN_MILLIS,
REVISION_WRITE_INTERVAL_IN_MILLIS,
}; };
use flowy_http_model::revision::{Revision, RevisionRange}; use flowy_http_model::revision::{Revision, RevisionRange};
@ -16,35 +15,14 @@ use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
pub enum RevisionScript { pub enum RevisionScript {
AddLocalRevision { AddLocalRevision { content: String },
content: String, AddLocalRevision2 { content: String },
base_rev_id: i64, AddInvalidLocalRevision { bytes: Vec<u8> },
rev_id: i64, AckRevision { rev_id: i64 },
}, AssertNextSyncRevisionId { rev_id: Option<i64> },
AddLocalRevision2 { AssertNumberOfSyncRevisions { num: usize },
content: String, AssertNumberOfRevisionsInDisk { num: usize },
pair_rev_id: (i64, i64), AssertNextSyncRevisionContent { expected: String },
},
AddInvalidLocalRevision {
bytes: Vec<u8>,
base_rev_id: i64,
rev_id: i64,
},
AckRevision {
rev_id: i64,
},
AssertNextSyncRevisionId {
rev_id: Option<i64>,
},
AssertNumberOfSyncRevisions {
num: usize,
},
AssertNumberOfRevisionsInDisk {
num: usize,
},
AssertNextSyncRevisionContent {
expected: String,
},
WaitWhenWriteToDisk, WaitWhenWriteToDisk,
} }
@ -66,7 +44,7 @@ impl RevisionTest {
let configuration = RevisionPersistenceConfiguration::new(merge_threshold as usize, false); let configuration = RevisionPersistenceConfiguration::new(merge_threshold as usize, false);
let disk_cache = RevisionDiskCacheMock::new(vec![]); let disk_cache = RevisionDiskCacheMock::new(vec![]);
let persistence = RevisionPersistence::new(&user_id, &object_id, disk_cache, configuration.clone()); let persistence = RevisionPersistence::new(&user_id, &object_id, disk_cache, configuration.clone());
let compress = RevisionCompressMock {}; let compress = RevisionMergeableMock {};
let snapshot = RevisionSnapshotMock {}; let snapshot = RevisionSnapshotMock {};
let mut rev_manager = RevisionManager::new(&user_id, &object_id, persistence, compress, snapshot); let mut rev_manager = RevisionManager::new(&user_id, &object_id, persistence, compress, snapshot);
rev_manager.initialize::<RevisionObjectMockSerde>(None).await.unwrap(); rev_manager.initialize::<RevisionObjectMockSerde>(None).await.unwrap();
@ -89,7 +67,7 @@ impl RevisionTest {
configuration.clone(), configuration.clone(),
); );
let compress = RevisionCompressMock {}; let compress = RevisionMergeableMock {};
let snapshot = RevisionSnapshotMock {}; let snapshot = RevisionSnapshotMock {};
let mut rev_manager = let mut rev_manager =
RevisionManager::new(&old_test.user_id, &old_test.object_id, persistence, compress, snapshot); RevisionManager::new(&old_test.user_id, &old_test.object_id, persistence, compress, snapshot);
@ -107,56 +85,32 @@ impl RevisionTest {
} }
} }
pub fn next_rev_id_pair(&self) -> (i64, i64) {
self.rev_manager.next_rev_id_pair()
}
pub async fn run_script(&self, script: RevisionScript) { pub async fn run_script(&self, script: RevisionScript) {
match script { match script {
RevisionScript::AddLocalRevision { RevisionScript::AddLocalRevision { content } => {
content,
base_rev_id,
rev_id,
} => {
let object = RevisionObjectMock::new(&content); let object = RevisionObjectMock::new(&content);
let bytes = object.to_bytes(); let bytes = object.to_bytes();
let md5 = md5(&bytes); let md5 = md5(&bytes);
let revision = Revision::new( self.rev_manager
&self.rev_manager.object_id, .add_local_revision(Bytes::from(bytes), md5)
base_rev_id, .await
rev_id, .unwrap();
Bytes::from(bytes),
md5,
);
self.rev_manager.add_local_revision(&revision).await.unwrap();
} }
RevisionScript::AddLocalRevision2 { content, pair_rev_id } => { RevisionScript::AddLocalRevision2 { content } => {
let object = RevisionObjectMock::new(&content); let object = RevisionObjectMock::new(&content);
let bytes = object.to_bytes(); let bytes = object.to_bytes();
let md5 = md5(&bytes); let md5 = md5(&bytes);
let revision = Revision::new( self.rev_manager
&self.rev_manager.object_id, .add_local_revision(Bytes::from(bytes), md5)
pair_rev_id.0, .await
pair_rev_id.1, .unwrap();
Bytes::from(bytes),
md5,
);
self.rev_manager.add_local_revision(&revision).await.unwrap();
} }
RevisionScript::AddInvalidLocalRevision { RevisionScript::AddInvalidLocalRevision { bytes } => {
bytes,
base_rev_id,
rev_id,
} => {
let md5 = md5(&bytes); let md5 = md5(&bytes);
let revision = Revision::new( self.rev_manager
&self.rev_manager.object_id, .add_local_revision(Bytes::from(bytes), md5)
base_rev_id, .await
rev_id, .unwrap();
Bytes::from(bytes),
md5,
);
self.rev_manager.add_local_revision(&revision).await.unwrap();
} }
RevisionScript::AckRevision { rev_id } => { RevisionScript::AckRevision { rev_id } => {
// //
@ -291,18 +245,22 @@ impl RevisionDiskCache<RevisionConnectionMock> for RevisionDiskCacheMock {
pub struct RevisionConnectionMock {} pub struct RevisionConnectionMock {}
pub struct RevisionSnapshotMock {} pub struct RevisionSnapshotMock {}
impl RevisionSnapshotDiskCache for RevisionSnapshotMock { impl RevisionSnapshotDiskCache for RevisionSnapshotMock {
fn write_snapshot(&self, _object_id: &str, _rev_id: i64, _data: Vec<u8>) -> FlowyResult<()> { fn write_snapshot(&self, _rev_id: i64, _data: Vec<u8>) -> FlowyResult<()> {
todo!() todo!()
} }
fn read_snapshot(&self, _object_id: &str, _rev_id: i64) -> FlowyResult<RevisionSnapshotInfo> { fn read_snapshot(&self, _rev_id: i64) -> FlowyResult<Option<RevisionSnapshot>> {
todo!() todo!()
} }
fn read_last_snapshot(&self) -> FlowyResult<Option<RevisionSnapshot>> {
Ok(None)
}
} }
pub struct RevisionCompressMock {} pub struct RevisionMergeableMock {}
impl RevisionMergeable for RevisionCompressMock { impl RevisionMergeable for RevisionMergeableMock {
fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> { fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> {
let mut object = RevisionObjectMock::new(""); let mut object = RevisionObjectMock::new("");
for revision in revisions { for revision in revisions {

View File

@ -2,6 +2,7 @@ use crate::queue::TaskQueue;
use crate::store::TaskStore; use crate::store::TaskStore;
use crate::{Task, TaskContent, TaskId, TaskState}; use crate::{Task, TaskContent, TaskId, TaskState};
use anyhow::Error; use anyhow::Error;
use lib_infra::async_trait::async_trait;
use lib_infra::future::BoxResultFuture; use lib_infra::future::BoxResultFuture;
use lib_infra::ref_map::{RefCountHashMap, RefCountValue}; use lib_infra::ref_map::{RefCountHashMap, RefCountValue};
use std::sync::Arc; use std::sync::Arc;
@ -41,8 +42,8 @@ impl TaskDispatcher {
self.handlers.insert(handler_id, RefCountTaskHandler(Arc::new(handler))); self.handlers.insert(handler_id, RefCountTaskHandler(Arc::new(handler)));
} }
pub fn unregister_handler<T: AsRef<str>>(&mut self, handler_id: T) { pub async fn unregister_handler<T: AsRef<str>>(&mut self, handler_id: T) {
self.handlers.remove(handler_id.as_ref()); self.handlers.remove(handler_id.as_ref()).await;
} }
pub fn stop(&mut self) { pub fn stop(&mut self) {
@ -174,8 +175,9 @@ where
#[derive(Clone)] #[derive(Clone)]
struct RefCountTaskHandler(Arc<dyn TaskHandler>); struct RefCountTaskHandler(Arc<dyn TaskHandler>);
#[async_trait]
impl RefCountValue for RefCountTaskHandler { impl RefCountValue for RefCountTaskHandler {
fn did_remove(&self) {} async fn did_remove(&self) {}
} }
impl std::ops::Deref for RefCountTaskHandler { impl std::ops::Deref for RefCountTaskHandler {

View File

@ -2,6 +2,7 @@ use anyhow::Error;
use flowy_task::{Task, TaskContent, TaskDispatcher, TaskHandler, TaskId, TaskResult, TaskRunner, TaskState}; use flowy_task::{Task, TaskContent, TaskDispatcher, TaskHandler, TaskId, TaskResult, TaskRunner, TaskState};
use futures::stream::FuturesUnordered; use futures::stream::FuturesUnordered;
use futures::StreamExt; use futures::StreamExt;
use lib_infra::async_trait::async_trait;
use lib_infra::future::BoxResultFuture; use lib_infra::future::BoxResultFuture;
use lib_infra::ref_map::RefCountValue; use lib_infra::ref_map::RefCountValue;
use rand::Rng; use rand::Rng;
@ -83,7 +84,7 @@ impl SearchTest {
tokio::time::sleep(Duration::from_millis(millisecond)).await; tokio::time::sleep(Duration::from_millis(millisecond)).await;
} }
SearchScript::UnregisterHandler { handler_id } => { SearchScript::UnregisterHandler { handler_id } => {
self.scheduler.write().await.unregister_handler(handler_id); self.scheduler.write().await.unregister_handler(handler_id).await;
} }
SearchScript::AssertTaskStatus { SearchScript::AssertTaskStatus {
task_id, task_id,
@ -109,8 +110,9 @@ impl SearchTest {
} }
pub struct MockTextTaskHandler(); pub struct MockTextTaskHandler();
#[async_trait]
impl RefCountValue for MockTextTaskHandler { impl RefCountValue for MockTextTaskHandler {
fn did_remove(&self) {} async fn did_remove(&self) {}
} }
impl TaskHandler for MockTextTaskHandler { impl TaskHandler for MockTextTaskHandler {
@ -146,8 +148,9 @@ pub fn make_text_user_interactive_task(task_id: TaskId, s: &str) -> (Task, Recei
} }
pub struct MockBlobTaskHandler(); pub struct MockBlobTaskHandler();
#[async_trait]
impl RefCountValue for MockBlobTaskHandler { impl RefCountValue for MockBlobTaskHandler {
fn did_remove(&self) {} async fn did_remove(&self) {}
} }
impl TaskHandler for MockBlobTaskHandler { impl TaskHandler for MockBlobTaskHandler {

View File

@ -1,12 +1,9 @@
mod layer; mod layer;
use log::LevelFilter;
use tracing::subscriber::set_global_default;
use crate::layer::*; use crate::layer::*;
use lazy_static::lazy_static; use lazy_static::lazy_static;
use log::LevelFilter;
use std::sync::RwLock; use std::sync::RwLock;
use tracing::subscriber::set_global_default;
use tracing_appender::{non_blocking::WorkerGuard, rolling::RollingFileAppender}; use tracing_appender::{non_blocking::WorkerGuard, rolling::RollingFileAppender};
use tracing_bunyan_formatter::JsonStorageLayer; use tracing_bunyan_formatter::JsonStorageLayer;
use tracing_log::LogTracer; use tracing_log::LogTracer;

36
shared-lib/Cargo.lock generated
View File

@ -38,6 +38,17 @@ dependencies = [
"syn", "syn",
] ]
[[package]]
name = "async-trait"
version = "0.1.59"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "31e6e93155431f3931513b243d371981bb2770112b370c82745a1d19d2f99364"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]] [[package]]
name = "atty" name = "atty"
version = "0.2.14" version = "0.2.14"
@ -808,6 +819,7 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
name = "lib-infra" name = "lib-infra"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"async-trait",
"bytes", "bytes",
"chrono", "chrono",
"futures-core", "futures-core",
@ -1283,11 +1295,11 @@ checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086"
[[package]] [[package]]
name = "proc-macro2" name = "proc-macro2"
version = "1.0.32" version = "1.0.47"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba508cc11742c0dc5c1659771673afbab7a0efab23aa17e854cbab0837ed0b43" checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725"
dependencies = [ dependencies = [
"unicode-xid", "unicode-ident",
] ]
[[package]] [[package]]
@ -1679,13 +1691,13 @@ dependencies = [
[[package]] [[package]]
name = "syn" name = "syn"
version = "1.0.81" version = "1.0.105"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2afee18b8beb5a596ecb4a2dce128c719b4ba399d34126b9e4396e3f9860966" checksum = "60b9b43d45702de4c839cb9b51d9f529c5dd26a4aff255b42b1ebc03e88ee908"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
"unicode-xid", "unicode-ident",
] ]
[[package]] [[package]]
@ -1994,6 +2006,12 @@ version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a01404663e3db436ed2746d9fefef640d868edae3cceb81c3b8d5732fda678f" checksum = "1a01404663e3db436ed2746d9fefef640d868edae3cceb81c3b8d5732fda678f"
[[package]]
name = "unicode-ident"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3"
[[package]] [[package]]
name = "unicode-normalization" name = "unicode-normalization"
version = "0.1.19" version = "0.1.19"
@ -2015,12 +2033,6 @@ version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973"
[[package]]
name = "unicode-xid"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
[[package]] [[package]]
name = "url" name = "url"
version = "2.2.2" version = "2.2.2"

View File

@ -36,7 +36,7 @@ impl Revision {
let rev_id = rev_id; let rev_id = rev_id;
if base_rev_id != 0 { if base_rev_id != 0 {
debug_assert!(base_rev_id != rev_id); debug_assert!(base_rev_id <= rev_id);
} }
Self { Self {

View File

@ -14,6 +14,7 @@ use std::sync::Arc;
pub type GridOperations = DeltaOperations<EmptyAttributes>; pub type GridOperations = DeltaOperations<EmptyAttributes>;
pub type GridOperationsBuilder = DeltaOperationBuilder<EmptyAttributes>; pub type GridOperationsBuilder = DeltaOperationBuilder<EmptyAttributes>;
#[derive(Clone)]
pub struct GridRevisionPad { pub struct GridRevisionPad {
grid_rev: Arc<GridRevision>, grid_rev: Arc<GridRevision>,
operations: GridOperations, operations: GridOperations,

View File

@ -35,7 +35,7 @@ pub fn contain_newline(s: &str) -> bool {
#[tracing::instrument(level = "trace", skip(revisions), err)] #[tracing::instrument(level = "trace", skip(revisions), err)]
pub fn make_operations_from_revisions<T>(revisions: Vec<Revision>) -> CollaborateResult<DeltaOperations<T>> pub fn make_operations_from_revisions<T>(revisions: Vec<Revision>) -> CollaborateResult<DeltaOperations<T>>
where where
T: OperationAttributes + DeserializeOwned, T: OperationAttributes + DeserializeOwned + OperationAttributes + serde::Serialize,
{ {
let mut new_operations = DeltaOperations::<T>::new(); let mut new_operations = DeltaOperations::<T>::new();
for revision in revisions { for revision in revisions {
@ -45,10 +45,30 @@ where
} }
let operations = DeltaOperations::<T>::from_bytes(revision.bytes).map_err(|e| { let operations = DeltaOperations::<T>::from_bytes(revision.bytes).map_err(|e| {
let err_msg = format!("Deserialize remote revision failed: {:?}", e); let err_msg = format!("Deserialize revision failed: {:?}", e);
CollaborateError::internal().context(err_msg) CollaborateError::internal().context(err_msg)
})?; })?;
new_operations = new_operations.compose(&operations)?;
match new_operations.compose(&operations) {
Ok(composed_operations) => {
new_operations = composed_operations;
// if composed_operations.content().is_ok() {
// new_operations = composed_operations;
// } else {
// tracing::error!(
// "Compose operation failed: rev_id: {}, object_id: {} {:?}",
// revision.rev_id,
// revision.object_id,
// operations
// );
// return Ok(new_operations);
// }
}
Err(e) => {
tracing::error!("Compose operation failed: {}, {:?}", e, operations);
return Ok(new_operations);
}
}
} }
Ok(new_operations) Ok(new_operations)
} }

View File

@ -12,4 +12,4 @@ pin-project = "1.0"
futures-core = { version = "0.3" } futures-core = { version = "0.3" }
tokio = { version = "1.0", features = ["time", "rt"] } tokio = { version = "1.0", features = ["time", "rt"] }
rand = "0.8.5" rand = "0.8.5"
async-trait = "0.1.59"

View File

@ -8,7 +8,7 @@ use std::{
task::{Context, Poll}, task::{Context, Poll},
}; };
pub fn to_future<T, O>(f: T) -> Fut<O> pub fn to_fut<T, O>(f: T) -> Fut<O>
where where
T: Future<Output = O> + Send + Sync + 'static, T: Future<Output = O> + Send + Sync + 'static,
{ {

View File

@ -2,3 +2,5 @@ pub mod future;
pub mod ref_map; pub mod ref_map;
pub mod retry; pub mod retry;
pub mod util; pub mod util;
pub use async_trait;

View File

@ -1,8 +1,10 @@
use async_trait::async_trait;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
#[async_trait]
pub trait RefCountValue { pub trait RefCountValue {
fn did_remove(&self) {} async fn did_remove(&self) {}
} }
struct RefCountHandler<T> { struct RefCountHandler<T> {
@ -30,7 +32,7 @@ impl<T> std::default::Default for RefCountHashMap<T> {
impl<T> RefCountHashMap<T> impl<T> RefCountHashMap<T>
where where
T: Clone + Send + Sync + RefCountValue, T: Clone + Send + Sync + RefCountValue + 'static,
{ {
pub fn new() -> Self { pub fn new() -> Self {
Self::default() Self::default()
@ -53,7 +55,7 @@ where
} }
} }
pub fn remove(&mut self, key: &str) { pub async fn remove(&mut self, key: &str) {
let mut should_remove = false; let mut should_remove = false;
if let Some(value) = self.0.get_mut(key) { if let Some(value) = self.0.get_mut(key) {
if value.ref_count > 0 { if value.ref_count > 0 {
@ -64,17 +66,20 @@ where
if should_remove { if should_remove {
if let Some(handler) = self.0.remove(key) { if let Some(handler) = self.0.remove(key) {
handler.inner.did_remove(); tokio::spawn(async move {
handler.inner.did_remove().await;
});
} }
} }
} }
} }
#[async_trait]
impl<T> RefCountValue for Arc<T> impl<T> RefCountValue for Arc<T>
where where
T: RefCountValue, T: RefCountValue + Sync + Send,
{ {
fn did_remove(&self) { async fn did_remove(&self) {
(**self).did_remove() (**self).did_remove().await
} }
} }