Refactor/revision compose (#1410)

This commit is contained in:
Nathan.fooo 2022-11-01 18:59:53 +08:00 committed by GitHub
parent b0d2cdf55a
commit e9ad705ea3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
45 changed files with 525 additions and 183 deletions

View File

@ -1071,9 +1071,6 @@ dependencies = [
"async-stream",
"bytes",
"dashmap",
"diesel",
"diesel_derives",
"flowy-database",
"flowy-error",
"flowy-sync",
"futures-util",

View File

@ -1,6 +1,6 @@
#[rustfmt::skip]
/*
diesel master support on_conflict on sqlite but not 1.4.7 version. Workaround for this
diesel master support on_conflict on rev_sqlite but not 1.4.7 version. Workaround for this
match dsl::workspace_table
.filter(workspace_table::id.eq(table.id.clone()))
@ -177,20 +177,20 @@ macro_rules! impl_rev_state_map {
}
}
impl std::convert::From<$target> for crate::disk::RevisionState {
impl std::convert::From<$target> for RevisionState {
fn from(s: $target) -> Self {
match s {
$target::Sync => crate::disk::RevisionState::Sync,
$target::Ack => crate::disk::RevisionState::Ack,
$target::Sync => RevisionState::Sync,
$target::Ack => RevisionState::Ack,
}
}
}
impl std::convert::From<crate::disk::RevisionState> for $target {
fn from(s: crate::disk::RevisionState) -> Self {
impl std::convert::From<RevisionState> for $target {
fn from(s: RevisionState) -> Self {
match s {
crate::disk::RevisionState::Sync => $target::Sync,
crate::disk::RevisionState::Ack => $target::Ack,
RevisionState::Sync => $target::Sync,
RevisionState::Ack => $target::Ack,
}
}
}

View File

@ -4,6 +4,7 @@ use crate::editor::make_transaction_from_revisions;
use crate::editor::queue::{Command, CommandSender, DocumentQueue};
use crate::{DocumentEditor, DocumentUser};
use bytes::Bytes;
use flowy_database::ConnectionPool;
use flowy_error::{internal_error, FlowyError, FlowyResult};
use flowy_revision::{RevisionCloudService, RevisionManager};
use flowy_sync::entities::ws_data::ServerRevisionWSData;
@ -18,14 +19,14 @@ pub struct AppFlowyDocumentEditor {
#[allow(dead_code)]
doc_id: String,
command_sender: CommandSender,
rev_manager: Arc<RevisionManager>,
rev_manager: Arc<RevisionManager<Arc<ConnectionPool>>>,
}
impl AppFlowyDocumentEditor {
pub async fn new(
doc_id: &str,
user: Arc<dyn DocumentUser>,
mut rev_manager: RevisionManager,
mut rev_manager: RevisionManager<Arc<ConnectionPool>>,
cloud_service: Arc<dyn RevisionCloudService>,
) -> FlowyResult<Arc<Self>> {
let document = rev_manager.load::<DocumentRevisionSerde>(Some(cloud_service)).await?;
@ -70,7 +71,7 @@ impl AppFlowyDocumentEditor {
fn spawn_edit_queue(
user: Arc<dyn DocumentUser>,
rev_manager: Arc<RevisionManager>,
rev_manager: Arc<RevisionManager<Arc<ConnectionPool>>>,
document: Document,
) -> CommandSender {
let (sender, receiver) = mpsc::channel(1000);

View File

@ -1,3 +0,0 @@
mod delta_migration;
pub use delta_migration::*;

View File

@ -2,13 +2,11 @@
mod document;
mod document_serde;
mod editor;
mod migration;
mod queue;
pub use document::*;
pub use document_serde::*;
pub use editor::*;
pub use migration::*;
#[inline]
pub fn initial_read_me() -> String {

View File

@ -8,6 +8,7 @@ use flowy_sync::entities::revision::{RevId, Revision};
use futures::stream::StreamExt;
use lib_ot::core::Transaction;
use flowy_database::ConnectionPool;
use std::sync::Arc;
use tokio::sync::mpsc::{Receiver, Sender};
use tokio::sync::{oneshot, RwLock};
@ -17,14 +18,14 @@ pub struct DocumentQueue {
user: Arc<dyn DocumentUser>,
document: Arc<RwLock<Document>>,
#[allow(dead_code)]
rev_manager: Arc<RevisionManager>,
rev_manager: Arc<RevisionManager<Arc<ConnectionPool>>>,
receiver: Option<CommandReceiver>,
}
impl DocumentQueue {
pub fn new(
user: Arc<dyn DocumentUser>,
rev_manager: Arc<RevisionManager>,
rev_manager: Arc<RevisionManager<Arc<ConnectionPool>>>,
document: Document,
receiver: CommandReceiver,
) -> Self {

View File

@ -1,13 +1,13 @@
use crate::editor::{initial_document_content, AppFlowyDocumentEditor, DocumentRevisionCompress};
use crate::entities::{DocumentVersionPB, EditParams};
use crate::old_editor::editor::{DeltaDocumentEditor, DeltaDocumentRevisionCompress};
use crate::services::rev_sqlite::{SQLiteDeltaDocumentRevisionPersistence, SQLiteDocumentRevisionPersistence};
use crate::services::DocumentPersistence;
use crate::{errors::FlowyError, DocumentCloudService};
use bytes::Bytes;
use dashmap::DashMap;
use flowy_database::ConnectionPool;
use flowy_error::FlowyResult;
use flowy_revision::disk::{SQLiteDeltaDocumentRevisionPersistence, SQLiteDocumentRevisionPersistence};
use flowy_revision::{
RevisionCloudService, RevisionManager, RevisionPersistence, RevisionWebSocket, SQLiteRevisionSnapshotPersistence,
};
@ -197,7 +197,7 @@ impl DocumentManager {
/// # Arguments
///
/// * `doc_id`: the id of the document
/// * `pool`: sqlite connection pool
/// * `pool`: rev_sqlite connection pool
///
/// returns: Result<Arc<DocumentEditor>, FlowyError>
///
@ -231,7 +231,11 @@ impl DocumentManager {
}
}
fn make_rev_manager(&self, doc_id: &str, pool: Arc<ConnectionPool>) -> Result<RevisionManager, FlowyError> {
fn make_rev_manager(
&self,
doc_id: &str,
pool: Arc<ConnectionPool>,
) -> Result<RevisionManager<Arc<ConnectionPool>>, FlowyError> {
match self.config.version {
DocumentVersionPB::V0 => self.make_delta_document_rev_manager(doc_id, pool),
DocumentVersionPB::V1 => self.make_document_rev_manager(doc_id, pool),
@ -242,7 +246,7 @@ impl DocumentManager {
&self,
doc_id: &str,
pool: Arc<ConnectionPool>,
) -> Result<RevisionManager, FlowyError> {
) -> Result<RevisionManager<Arc<ConnectionPool>>, FlowyError> {
let user_id = self.user.user_id()?;
let disk_cache = SQLiteDocumentRevisionPersistence::new(&user_id, pool.clone());
let rev_persistence = RevisionPersistence::new(&user_id, doc_id, disk_cache);
@ -262,7 +266,7 @@ impl DocumentManager {
&self,
doc_id: &str,
pool: Arc<ConnectionPool>,
) -> Result<RevisionManager, FlowyError> {
) -> Result<RevisionManager<Arc<ConnectionPool>>, FlowyError> {
let user_id = self.user.user_id()?;
let disk_cache = SQLiteDeltaDocumentRevisionPersistence::new(&user_id, pool.clone());
let rev_persistence = RevisionPersistence::new(&user_id, doc_id, disk_cache);

View File

@ -3,6 +3,7 @@
use crate::old_editor::queue::{EditDocumentQueue, EditorCommand, EditorCommandSender};
use crate::{errors::FlowyError, DocumentEditor, DocumentUser};
use bytes::Bytes;
use flowy_database::ConnectionPool;
use flowy_error::{internal_error, FlowyResult};
use flowy_revision::{
RevisionCloudService, RevisionCompress, RevisionManager, RevisionObjectDeserializer, RevisionObjectSerializer,
@ -28,7 +29,7 @@ use tokio::sync::{mpsc, oneshot};
pub struct DeltaDocumentEditor {
pub doc_id: String,
#[allow(dead_code)]
rev_manager: Arc<RevisionManager>,
rev_manager: Arc<RevisionManager<Arc<ConnectionPool>>>,
#[cfg(feature = "sync")]
ws_manager: Arc<flowy_revision::RevisionWebSocketManager>,
edit_cmd_tx: EditorCommandSender,
@ -39,7 +40,7 @@ impl DeltaDocumentEditor {
pub(crate) async fn new(
doc_id: &str,
user: Arc<dyn DocumentUser>,
mut rev_manager: RevisionManager,
mut rev_manager: RevisionManager<Arc<ConnectionPool>>,
rev_web_socket: Arc<dyn RevisionWebSocket>,
cloud_service: Arc<dyn RevisionCloudService>,
) -> FlowyResult<Arc<Self>> {
@ -210,7 +211,7 @@ impl std::ops::Drop for DeltaDocumentEditor {
// The edit queue will exit after the EditorCommandSender was dropped.
fn spawn_edit_queue(
user: Arc<dyn DocumentUser>,
rev_manager: Arc<RevisionManager>,
rev_manager: Arc<RevisionManager<Arc<ConnectionPool>>>,
delta: DeltaTextOperations,
) -> EditorCommandSender {
let (sender, receiver) = mpsc::channel(1000);
@ -238,7 +239,7 @@ impl DeltaDocumentEditor {
Ok(delta)
}
pub fn rev_manager(&self) -> Arc<RevisionManager> {
pub fn rev_manager(&self) -> Arc<RevisionManager<Arc<ConnectionPool>>> {
self.rev_manager.clone()
}
}

View File

@ -1,6 +1,7 @@
use crate::old_editor::web_socket::DeltaDocumentResolveOperations;
use crate::DocumentUser;
use async_stream::stream;
use flowy_database::ConnectionPool;
use flowy_error::FlowyError;
use flowy_revision::{OperationsMD5, RevisionManager, TransformOperations};
use flowy_sync::{
@ -23,14 +24,14 @@ use tokio::sync::{oneshot, RwLock};
pub(crate) struct EditDocumentQueue {
document: Arc<RwLock<ClientDocument>>,
user: Arc<dyn DocumentUser>,
rev_manager: Arc<RevisionManager>,
rev_manager: Arc<RevisionManager<Arc<ConnectionPool>>>,
receiver: Option<EditorCommandReceiver>,
}
impl EditDocumentQueue {
pub(crate) fn new(
user: Arc<dyn DocumentUser>,
rev_manager: Arc<RevisionManager>,
rev_manager: Arc<RevisionManager<Arc<ConnectionPool>>>,
operations: DeltaTextOperations,
receiver: EditorCommandReceiver,
) -> Self {

View File

@ -1,6 +1,7 @@
use crate::old_editor::queue::{EditorCommand, EditorCommandSender, TextTransformOperations};
use crate::TEXT_BLOCK_SYNC_INTERVAL_IN_MILLIS;
use bytes::Bytes;
use flowy_database::ConnectionPool;
use flowy_error::{internal_error, FlowyError, FlowyResult};
use flowy_revision::*;
use flowy_sync::entities::revision::Revision;
@ -41,14 +42,14 @@ impl DeltaDocumentResolveOperations {
}
}
pub type DocumentConflictController = ConflictController<DeltaDocumentResolveOperations>;
pub type DocumentConflictController = ConflictController<DeltaDocumentResolveOperations, Arc<ConnectionPool>>;
#[allow(dead_code)]
pub(crate) async fn make_document_ws_manager(
doc_id: String,
user_id: String,
edit_cmd_tx: EditorCommandSender,
rev_manager: Arc<RevisionManager>,
rev_manager: Arc<RevisionManager<Arc<ConnectionPool>>>,
rev_web_socket: Arc<dyn RevisionWebSocket>,
) -> Arc<RevisionWebSocketManager> {
let ws_data_provider = Arc::new(WSDataProvider::new(&doc_id, Arc::new(rev_manager.clone())));

View File

@ -1,9 +1,10 @@
use crate::editor::DeltaRevisionMigration;
use crate::services::delta_migration::DeltaRevisionMigration;
use crate::services::rev_sqlite::{DeltaRevisionSql, SQLiteDocumentRevisionPersistence};
use crate::DocumentDatabase;
use bytes::Bytes;
use flowy_database::kv::KV;
use flowy_error::FlowyResult;
use flowy_revision::disk::{DeltaRevisionSql, RevisionDiskCache, RevisionRecord, SQLiteDocumentRevisionPersistence};
use flowy_revision::disk::{RevisionDiskCache, RevisionRecord};
use flowy_sync::entities::revision::{md5, Revision};
use flowy_sync::util::make_operations_from_revisions;
use std::sync::Arc;

View File

@ -170,8 +170,8 @@ impl DeltaRevisionMigration {
#[cfg(test)]
mod tests {
use crate::editor::migration::delta_migration::DeltaRevisionMigration;
use crate::editor::Document;
use crate::services::delta_migration::DeltaRevisionMigration;
use lib_ot::text_delta::DeltaTextOperations;
#[test]

View File

@ -1,3 +1,6 @@
pub mod delta_migration;
pub mod rev_sqlite;
use crate::services::migration::DocumentMigration;
use crate::DocumentDatabase;
use flowy_error::FlowyResult;

View File

@ -1,5 +1,3 @@
use crate::cache::disk::RevisionDiskCache;
use crate::disk::{RevisionChangeset, RevisionRecord};
use bytes::Bytes;
use diesel::{sql_types::Integer, update, SqliteConnection};
use flowy_database::{
@ -9,6 +7,7 @@ use flowy_database::{
ConnectionPool,
};
use flowy_error::{internal_error, FlowyError, FlowyResult};
use flowy_revision::disk::{RevisionChangeset, RevisionDiskCache, RevisionRecord, RevisionState};
use flowy_sync::{
entities::revision::{RevType, Revision, RevisionRange},
util::md5,
@ -21,7 +20,7 @@ pub struct SQLiteDeltaDocumentRevisionPersistence {
pub(crate) pool: Arc<ConnectionPool>,
}
impl RevisionDiskCache for SQLiteDeltaDocumentRevisionPersistence {
impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteDeltaDocumentRevisionPersistence {
type Error = FlowyError;
fn create_revision_records(&self, revision_records: Vec<RevisionRecord>) -> Result<(), Self::Error> {
@ -30,6 +29,10 @@ impl RevisionDiskCache for SQLiteDeltaDocumentRevisionPersistence {
Ok(())
}
fn get_connection(&self) -> Result<Arc<ConnectionPool>, Self::Error> {
Ok(self.pool.clone())
}
fn read_revision_records(
&self,
object_id: &str,

View File

@ -1,5 +1,3 @@
use crate::cache::disk::RevisionDiskCache;
use crate::disk::{RevisionChangeset, RevisionRecord};
use bytes::Bytes;
use diesel::{sql_types::Integer, update, SqliteConnection};
use flowy_database::{
@ -9,6 +7,7 @@ use flowy_database::{
ConnectionPool,
};
use flowy_error::{internal_error, FlowyError, FlowyResult};
use flowy_revision::disk::{RevisionChangeset, RevisionDiskCache, RevisionRecord, RevisionState};
use flowy_sync::{
entities::revision::{Revision, RevisionRange},
util::md5,
@ -20,7 +19,7 @@ pub struct SQLiteDocumentRevisionPersistence {
pub(crate) pool: Arc<ConnectionPool>,
}
impl RevisionDiskCache for SQLiteDocumentRevisionPersistence {
impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteDocumentRevisionPersistence {
type Error = FlowyError;
fn create_revision_records(&self, revision_records: Vec<RevisionRecord>) -> Result<(), Self::Error> {
@ -29,6 +28,10 @@ impl RevisionDiskCache for SQLiteDocumentRevisionPersistence {
Ok(())
}
fn get_connection(&self) -> Result<Arc<ConnectionPool>, Self::Error> {
Ok(self.pool.clone())
}
fn read_revision_records(
&self,
object_id: &str,
@ -103,7 +106,7 @@ impl DocumentRevisionSql {
record.revision.object_id,
record.revision.rev_id
);
let rev_state: RevisionState = record.state.into();
let rev_state: DocumentRevisionState = record.state.into();
(
dsl::document_id.eq(record.revision.object_id),
dsl::base_rev_id.eq(record.revision.base_rev_id),
@ -121,7 +124,7 @@ impl DocumentRevisionSql {
}
fn update(changeset: RevisionChangeset, conn: &SqliteConnection) -> Result<(), FlowyError> {
let state: RevisionState = changeset.state.clone().into();
let state: DocumentRevisionState = changeset.state.clone().into();
let filter = dsl::document_rev_table
.filter(dsl::rev_id.eq(changeset.rev_id.as_ref()))
.filter(dsl::document_id.eq(changeset.object_id));
@ -198,22 +201,22 @@ struct DocumentRevisionTable {
base_rev_id: i64,
rev_id: i64,
data: Vec<u8>,
state: RevisionState,
state: DocumentRevisionState,
}
#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash, FromSqlRow, AsExpression)]
#[repr(i32)]
#[sql_type = "Integer"]
enum RevisionState {
enum DocumentRevisionState {
Sync = 0,
Ack = 1,
}
impl_sql_integer_expression!(RevisionState);
impl_rev_state_map!(RevisionState);
impl_sql_integer_expression!(DocumentRevisionState);
impl_rev_state_map!(DocumentRevisionState);
impl std::default::Default for RevisionState {
impl std::default::Default for DocumentRevisionState {
fn default() -> Self {
RevisionState::Sync
DocumentRevisionState::Sync
}
}

View File

@ -0,0 +1,5 @@
mod document_rev_sqlite_v0;
mod document_rev_sqlite_v1;
pub use document_rev_sqlite_v0::*;
pub use document_rev_sqlite_v1::*;

View File

@ -12,16 +12,15 @@ use crate::{
},
};
use bytes::Bytes;
use flowy_document::editor::initial_read_me;
use flowy_error::FlowyError;
use flowy_folder_data_model::user_default;
use flowy_revision::disk::SQLiteDeltaDocumentRevisionPersistence;
use flowy_revision::{RevisionManager, RevisionPersistence, RevisionWebSocket, SQLiteRevisionSnapshotPersistence};
use flowy_document::editor::initial_read_me;
use flowy_sync::{client_folder::FolderPad, entities::ws_data::ServerRevisionWSData};
use lazy_static::lazy_static;
use lib_infra::future::FutureResult;
use crate::services::persistence::rev_sqlite::SQLiteFolderRevisionPersistence;
use std::{collections::HashMap, convert::TryInto, fmt::Formatter, sync::Arc};
use tokio::sync::RwLock as TokioRwLock;
lazy_static! {
@ -165,7 +164,7 @@ impl FolderManager {
let pool = self.persistence.db_pool()?;
let object_id = folder_id.as_ref();
let disk_cache = SQLiteDeltaDocumentRevisionPersistence::new(user_id, pool.clone());
let disk_cache = SQLiteFolderRevisionPersistence::new(user_id, pool.clone());
let rev_persistence = RevisionPersistence::new(user_id, object_id, disk_cache);
let rev_compactor = FolderRevisionCompress();
// let history_persistence = SQLiteRevisionHistoryPersistence::new(object_id, pool.clone());

View File

@ -12,6 +12,7 @@ use flowy_sync::{
};
use lib_infra::future::FutureResult;
use flowy_database::ConnectionPool;
use lib_ot::core::EmptyAttributes;
use parking_lot::RwLock;
use std::sync::Arc;
@ -21,7 +22,7 @@ pub struct FolderEditor {
#[allow(dead_code)]
pub(crate) folder_id: FolderId,
pub(crate) folder: Arc<RwLock<FolderPad>>,
rev_manager: Arc<RevisionManager>,
rev_manager: Arc<RevisionManager<Arc<ConnectionPool>>>,
#[cfg(feature = "sync")]
ws_manager: Arc<flowy_revision::RevisionWebSocketManager>,
}
@ -32,7 +33,7 @@ impl FolderEditor {
user_id: &str,
folder_id: &FolderId,
token: &str,
mut rev_manager: RevisionManager,
mut rev_manager: RevisionManager<Arc<ConnectionPool>>,
web_socket: Arc<dyn RevisionWebSocket>,
) -> FlowyResult<Self> {
let cloud = Arc::new(FolderRevisionCloudService {
@ -139,7 +140,7 @@ impl RevisionCloudService for FolderRevisionCloudService {
#[cfg(feature = "flowy_unit_test")]
impl FolderEditor {
pub fn rev_manager(&self) -> Arc<RevisionManager> {
pub fn rev_manager(&self) -> Arc<RevisionManager<Arc<ConnectionPool>>> {
self.rev_manager.clone()
}
}

View File

@ -7,13 +7,13 @@ use bytes::Bytes;
use flowy_database::kv::KV;
use flowy_error::{FlowyError, FlowyResult};
use flowy_folder_data_model::revision::{AppRevision, FolderRevision, ViewRevision, WorkspaceRevision};
use flowy_revision::disk::SQLiteDeltaDocumentRevisionPersistence;
use flowy_revision::reset::{RevisionResettable, RevisionStructReset};
use flowy_sync::client_folder::make_folder_rev_json_str;
use flowy_sync::entities::revision::Revision;
use flowy_sync::server_folder::FolderOperationsBuilder;
use flowy_sync::{client_folder::FolderPad, entities::revision::md5};
use crate::services::persistence::rev_sqlite::SQLiteFolderRevisionPersistence;
use std::sync::Arc;
const V1_MIGRATION: &str = "FOLDER_V1_MIGRATION";
@ -113,7 +113,7 @@ impl FolderMigration {
};
let pool = self.database.db_pool()?;
let disk_cache = SQLiteDeltaDocumentRevisionPersistence::new(&self.user_id, pool);
let disk_cache = SQLiteFolderRevisionPersistence::new(&self.user_id, pool);
let reset = RevisionStructReset::new(&self.user_id, object, Arc::new(disk_cache));
reset.run().await
}
@ -144,4 +144,12 @@ impl RevisionResettable for FolderRevisionResettable {
let json = make_folder_rev_json_str(&folder)?;
Ok(json)
}
fn read_record(&self) -> Option<String> {
KV::get_str(self.target_id())
}
fn set_record(&self, record: String) {
KV::set_str(self.target_id(), record);
}
}

View File

@ -1,4 +1,5 @@
mod migration;
pub mod rev_sqlite;
pub mod version_1;
mod version_2;
@ -10,10 +11,10 @@ use crate::{
use flowy_database::ConnectionPool;
use flowy_error::{FlowyError, FlowyResult};
use flowy_folder_data_model::revision::{AppRevision, TrashRevision, ViewRevision, WorkspaceRevision};
use flowy_revision::disk::{RevisionRecord, RevisionState};
use flowy_revision::mk_text_block_revision_disk_cache;
use flowy_revision::disk::{RevisionDiskCache, RevisionRecord, RevisionState};
use flowy_sync::{client_folder::FolderPad, entities::revision::Revision};
use crate::services::persistence::rev_sqlite::SQLiteFolderRevisionPersistence;
use flowy_sync::server_folder::FolderOperationsBuilder;
use std::sync::Arc;
use tokio::sync::RwLock;
@ -121,3 +122,10 @@ impl FolderPersistence {
disk_cache.delete_and_insert_records(folder_id.as_ref(), None, vec![record])
}
}
pub fn mk_text_block_revision_disk_cache(
user_id: &str,
pool: Arc<ConnectionPool>,
) -> Arc<dyn RevisionDiskCache<Arc<ConnectionPool>, Error = FlowyError>> {
Arc::new(SQLiteFolderRevisionPersistence::new(user_id, pool))
}

View File

@ -0,0 +1,284 @@
use bytes::Bytes;
use diesel::{sql_types::Integer, update, SqliteConnection};
use flowy_database::{
impl_sql_integer_expression, insert_or_ignore_into,
prelude::*,
schema::{rev_table, rev_table::dsl},
ConnectionPool,
};
use flowy_error::{internal_error, FlowyError, FlowyResult};
use flowy_revision::disk::{RevisionChangeset, RevisionDiskCache, RevisionRecord, RevisionState};
use flowy_sync::{
entities::revision::{RevType, Revision, RevisionRange},
util::md5,
};
use std::sync::Arc;
pub struct SQLiteFolderRevisionPersistence {
user_id: String,
pub(crate) pool: Arc<ConnectionPool>,
}
impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteFolderRevisionPersistence {
type Error = FlowyError;
fn create_revision_records(&self, revision_records: Vec<RevisionRecord>) -> Result<(), Self::Error> {
let conn = self.pool.get().map_err(internal_error)?;
let _ = FolderRevisionSql::create(revision_records, &*conn)?;
Ok(())
}
fn get_connection(&self) -> Result<Arc<ConnectionPool>, Self::Error> {
Ok(self.pool.clone())
}
fn read_revision_records(
&self,
object_id: &str,
rev_ids: Option<Vec<i64>>,
) -> Result<Vec<RevisionRecord>, Self::Error> {
let conn = self.pool.get().map_err(internal_error)?;
let records = FolderRevisionSql::read(&self.user_id, object_id, rev_ids, &*conn)?;
Ok(records)
}
fn read_revision_records_with_range(
&self,
object_id: &str,
range: &RevisionRange,
) -> Result<Vec<RevisionRecord>, Self::Error> {
let conn = &*self.pool.get().map_err(internal_error)?;
let revisions = FolderRevisionSql::read_with_range(&self.user_id, object_id, range.clone(), conn)?;
Ok(revisions)
}
fn update_revision_record(&self, changesets: Vec<RevisionChangeset>) -> FlowyResult<()> {
let conn = &*self.pool.get().map_err(internal_error)?;
let _ = conn.immediate_transaction::<_, FlowyError, _>(|| {
for changeset in changesets {
let _ = FolderRevisionSql::update(changeset, conn)?;
}
Ok(())
})?;
Ok(())
}
fn delete_revision_records(&self, object_id: &str, rev_ids: Option<Vec<i64>>) -> Result<(), Self::Error> {
let conn = &*self.pool.get().map_err(internal_error)?;
let _ = FolderRevisionSql::delete(object_id, rev_ids, conn)?;
Ok(())
}
fn delete_and_insert_records(
&self,
object_id: &str,
deleted_rev_ids: Option<Vec<i64>>,
inserted_records: Vec<RevisionRecord>,
) -> Result<(), Self::Error> {
let conn = self.pool.get().map_err(internal_error)?;
conn.immediate_transaction::<_, FlowyError, _>(|| {
let _ = FolderRevisionSql::delete(object_id, deleted_rev_ids, &*conn)?;
let _ = FolderRevisionSql::create(inserted_records, &*conn)?;
Ok(())
})
}
}
impl SQLiteFolderRevisionPersistence {
pub fn new(user_id: &str, pool: Arc<ConnectionPool>) -> Self {
Self {
user_id: user_id.to_owned(),
pool,
}
}
}
struct FolderRevisionSql {}
impl FolderRevisionSql {
fn create(revision_records: Vec<RevisionRecord>, conn: &SqliteConnection) -> Result<(), FlowyError> {
// Batch insert: https://diesel.rs/guides/all-about-inserts.html
let records = revision_records
.into_iter()
.map(|record| {
tracing::trace!(
"[TextRevisionSql] create revision: {}:{:?}",
record.revision.object_id,
record.revision.rev_id
);
let rev_state: TextRevisionState = record.state.into();
(
dsl::doc_id.eq(record.revision.object_id),
dsl::base_rev_id.eq(record.revision.base_rev_id),
dsl::rev_id.eq(record.revision.rev_id),
dsl::data.eq(record.revision.bytes),
dsl::state.eq(rev_state),
dsl::ty.eq(RevTableType::Local),
)
})
.collect::<Vec<_>>();
let _ = insert_or_ignore_into(dsl::rev_table).values(&records).execute(conn)?;
Ok(())
}
fn update(changeset: RevisionChangeset, conn: &SqliteConnection) -> Result<(), FlowyError> {
let state: TextRevisionState = changeset.state.clone().into();
let filter = dsl::rev_table
.filter(dsl::rev_id.eq(changeset.rev_id.as_ref()))
.filter(dsl::doc_id.eq(changeset.object_id));
let _ = update(filter).set(dsl::state.eq(state)).execute(conn)?;
tracing::debug!(
"[TextRevisionSql] update revision:{} state:to {:?}",
changeset.rev_id,
changeset.state
);
Ok(())
}
fn read(
user_id: &str,
object_id: &str,
rev_ids: Option<Vec<i64>>,
conn: &SqliteConnection,
) -> Result<Vec<RevisionRecord>, FlowyError> {
let mut sql = dsl::rev_table.filter(dsl::doc_id.eq(object_id)).into_boxed();
if let Some(rev_ids) = rev_ids {
sql = sql.filter(dsl::rev_id.eq_any(rev_ids));
}
let rows = sql.order(dsl::rev_id.asc()).load::<RevisionTable>(conn)?;
let records = rows
.into_iter()
.map(|row| mk_revision_record_from_table(user_id, row))
.collect::<Vec<_>>();
Ok(records)
}
fn read_with_range(
user_id: &str,
object_id: &str,
range: RevisionRange,
conn: &SqliteConnection,
) -> Result<Vec<RevisionRecord>, FlowyError> {
let rev_tables = dsl::rev_table
.filter(dsl::rev_id.ge(range.start))
.filter(dsl::rev_id.le(range.end))
.filter(dsl::doc_id.eq(object_id))
.order(dsl::rev_id.asc())
.load::<RevisionTable>(conn)?;
let revisions = rev_tables
.into_iter()
.map(|table| mk_revision_record_from_table(user_id, table))
.collect::<Vec<_>>();
Ok(revisions)
}
fn delete(object_id: &str, rev_ids: Option<Vec<i64>>, conn: &SqliteConnection) -> Result<(), FlowyError> {
let mut sql = diesel::delete(dsl::rev_table).into_boxed();
sql = sql.filter(dsl::doc_id.eq(object_id));
if let Some(rev_ids) = rev_ids {
tracing::trace!("[TextRevisionSql] Delete revision: {}:{:?}", object_id, rev_ids);
sql = sql.filter(dsl::rev_id.eq_any(rev_ids));
}
let affected_row = sql.execute(conn)?;
tracing::trace!("[TextRevisionSql] Delete {} rows", affected_row);
Ok(())
}
}
#[derive(PartialEq, Clone, Debug, Queryable, Identifiable, Insertable, Associations)]
#[table_name = "rev_table"]
struct RevisionTable {
id: i32,
doc_id: String,
base_rev_id: i64,
rev_id: i64,
data: Vec<u8>,
state: TextRevisionState,
ty: RevTableType, // Deprecated
}
#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash, FromSqlRow, AsExpression)]
#[repr(i32)]
#[sql_type = "Integer"]
enum TextRevisionState {
Sync = 0,
Ack = 1,
}
impl_sql_integer_expression!(TextRevisionState);
impl_rev_state_map!(TextRevisionState);
impl std::default::Default for TextRevisionState {
fn default() -> Self {
TextRevisionState::Sync
}
}
fn mk_revision_record_from_table(user_id: &str, table: RevisionTable) -> RevisionRecord {
let md5 = md5(&table.data);
let revision = Revision::new(
&table.doc_id,
table.base_rev_id,
table.rev_id,
Bytes::from(table.data),
user_id,
md5,
);
RevisionRecord {
revision,
state: table.state.into(),
write_to_disk: false,
}
}
#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash, FromSqlRow, AsExpression)]
#[repr(i32)]
#[sql_type = "Integer"]
pub enum RevTableType {
Local = 0,
Remote = 1,
}
impl_sql_integer_expression!(RevTableType);
impl std::default::Default for RevTableType {
fn default() -> Self {
RevTableType::Local
}
}
impl std::convert::From<i32> for RevTableType {
fn from(value: i32) -> Self {
match value {
0 => RevTableType::Local,
1 => RevTableType::Remote,
o => {
tracing::error!("Unsupported rev type {}, fallback to RevTableType::Local", o);
RevTableType::Local
}
}
}
}
impl std::convert::From<RevType> for RevTableType {
fn from(ty: RevType) -> Self {
match ty {
RevType::DeprecatedLocal => RevTableType::Local,
RevType::DeprecatedRemote => RevTableType::Remote,
}
}
}
impl std::convert::From<RevTableType> for RevType {
fn from(ty: RevTableType) -> Self {
match ty {
RevTableType::Local => RevType::DeprecatedLocal,
RevTableType::Remote => RevType::DeprecatedRemote,
}
}
}

View File

@ -0,0 +1,2 @@
mod folder_rev_sqlite;
pub use folder_rev_sqlite::*;

View File

@ -1,5 +1,6 @@
use crate::services::FOLDER_SYNC_INTERVAL_IN_MILLIS;
use bytes::Bytes;
use flowy_database::ConnectionPool;
use flowy_error::{FlowyError, FlowyResult};
use flowy_revision::*;
use flowy_sync::entities::revision::Revision;
@ -37,13 +38,13 @@ impl FolderResolveOperations {
}
}
pub type FolderConflictController = ConflictController<FolderResolveOperations>;
pub type FolderConflictController = ConflictController<FolderResolveOperations, Arc<ConnectionPool>>;
#[allow(dead_code)]
pub(crate) async fn make_folder_ws_manager(
user_id: &str,
folder_id: &str,
rev_manager: Arc<RevisionManager>,
rev_manager: Arc<RevisionManager<Arc<ConnectionPool>>>,
web_socket: Arc<dyn RevisionWebSocket>,
folder_pad: Arc<RwLock<FolderPad>>,
) -> Arc<RevisionWebSocketManager> {

View File

@ -5,6 +5,7 @@ use crate::services::grid_view_manager::make_grid_view_rev_manager;
use crate::services::persistence::block_index::BlockIndexCache;
use crate::services::persistence::kv::GridKVPersistence;
use crate::services::persistence::migration::GridMigration;
use crate::services::persistence::rev_sqlite::{SQLiteGridBlockRevisionPersistence, SQLiteGridRevisionPersistence};
use crate::services::persistence::GridDatabase;
use crate::services::tasks::GridTaskScheduler;
use bytes::Bytes;
@ -12,7 +13,6 @@ use dashmap::DashMap;
use flowy_database::ConnectionPool;
use flowy_error::{FlowyError, FlowyResult};
use flowy_grid_data_model::revision::{BuildGridContext, GridRevision, GridViewRevision};
use flowy_revision::disk::{SQLiteGridBlockRevisionPersistence, SQLiteGridRevisionPersistence};
use flowy_revision::{RevisionManager, RevisionPersistence, RevisionWebSocket, SQLiteRevisionSnapshotPersistence};
use flowy_sync::client_grid::{make_grid_block_operations, make_grid_operations, make_grid_view_operations};
use flowy_sync::entities::revision::{RepeatedRevision, Revision};
@ -154,7 +154,11 @@ impl GridManager {
Ok(grid_editor)
}
pub fn make_grid_rev_manager(&self, grid_id: &str, pool: Arc<ConnectionPool>) -> FlowyResult<RevisionManager> {
pub fn make_grid_rev_manager(
&self,
grid_id: &str,
pool: Arc<ConnectionPool>,
) -> FlowyResult<RevisionManager<Arc<ConnectionPool>>> {
let user_id = self.grid_user.user_id()?;
let disk_cache = SQLiteGridRevisionPersistence::new(&user_id, pool.clone());
let rev_persistence = RevisionPersistence::new(&user_id, grid_id, disk_cache);
@ -164,7 +168,11 @@ impl GridManager {
Ok(rev_manager)
}
fn make_grid_block_rev_manager(&self, block_id: &str, pool: Arc<ConnectionPool>) -> FlowyResult<RevisionManager> {
fn make_grid_block_rev_manager(
&self,
block_id: &str,
pool: Arc<ConnectionPool>,
) -> FlowyResult<RevisionManager<Arc<ConnectionPool>>> {
let user_id = self.grid_user.user_id()?;
let disk_cache = SQLiteGridBlockRevisionPersistence::new(&user_id, pool.clone());
let rev_persistence = RevisionPersistence::new(&user_id, block_id, disk_cache);

View File

@ -10,6 +10,7 @@ use flowy_sync::entities::revision::Revision;
use flowy_sync::util::make_operations_from_revisions;
use lib_infra::future::FutureResult;
use flowy_database::ConnectionPool;
use lib_ot::core::EmptyAttributes;
use std::borrow::Cow;
use std::sync::Arc;
@ -19,7 +20,7 @@ pub struct GridBlockRevisionEditor {
user_id: String,
pub block_id: String,
pad: Arc<RwLock<GridBlockRevisionPad>>,
rev_manager: Arc<RevisionManager>,
rev_manager: Arc<RevisionManager<Arc<ConnectionPool>>>,
}
impl GridBlockRevisionEditor {
@ -27,7 +28,7 @@ impl GridBlockRevisionEditor {
user_id: &str,
token: &str,
block_id: &str,
mut rev_manager: RevisionManager,
mut rev_manager: RevisionManager<Arc<ConnectionPool>>,
) -> FlowyResult<Self> {
let cloud = Arc::new(GridBlockRevisionCloudService {
token: token.to_owned(),

View File

@ -3,13 +3,13 @@ use crate::entities::{CellChangesetPB, GridBlockChangesetPB, InsertedRowPB, RowP
use crate::manager::GridUser;
use crate::services::block_editor::{GridBlockRevisionCompress, GridBlockRevisionEditor};
use crate::services::persistence::block_index::BlockIndexCache;
use crate::services::persistence::rev_sqlite::SQLiteGridBlockRevisionPersistence;
use crate::services::row::{block_from_row_orders, make_row_from_row_rev, GridBlockSnapshot};
use dashmap::DashMap;
use flowy_error::FlowyResult;
use flowy_grid_data_model::revision::{
GridBlockMetaRevision, GridBlockMetaRevisionChangeset, RowChangeset, RowRevision,
};
use flowy_revision::disk::SQLiteGridBlockRevisionPersistence;
use flowy_revision::{RevisionManager, RevisionPersistence, SQLiteRevisionSnapshotPersistence};
use std::borrow::Cow;
use std::collections::HashMap;

View File

@ -25,6 +25,7 @@ use flowy_sync::errors::{CollaborateError, CollaborateResult};
use flowy_sync::util::make_operations_from_revisions;
use lib_infra::future::{wrap_future, FutureResult};
use flowy_database::ConnectionPool;
use lib_ot::core::EmptyAttributes;
use std::collections::HashMap;
use std::sync::Arc;
@ -35,7 +36,7 @@ pub struct GridRevisionEditor {
user: Arc<dyn GridUser>,
grid_pad: Arc<RwLock<GridRevisionPad>>,
view_manager: Arc<GridViewManager>,
rev_manager: Arc<RevisionManager>,
rev_manager: Arc<RevisionManager<Arc<ConnectionPool>>>,
block_manager: Arc<GridBlockManager>,
#[allow(dead_code)]
@ -52,7 +53,7 @@ impl GridRevisionEditor {
pub async fn new(
grid_id: &str,
user: Arc<dyn GridUser>,
mut rev_manager: RevisionManager,
mut rev_manager: RevisionManager<Arc<ConnectionPool>>,
persistence: Arc<BlockIndexCache>,
task_scheduler: GridTaskSchedulerRwLock,
) -> FlowyResult<Arc<Self>> {
@ -819,7 +820,7 @@ impl GridRevisionEditor {
#[cfg(feature = "flowy_unit_test")]
impl GridRevisionEditor {
pub fn rev_manager(&self) -> Arc<RevisionManager> {
pub fn rev_manager(&self) -> Arc<RevisionManager<Arc<ConnectionPool>>> {
self.rev_manager.clone()
}
}

View File

@ -12,6 +12,7 @@ use crate::services::group::{
GroupConfigurationWriter, GroupController, MoveGroupRowContext,
};
use bytes::Bytes;
use flowy_database::ConnectionPool;
use flowy_error::{FlowyError, FlowyResult};
use flowy_grid_data_model::revision::{
gen_grid_filter_id, FieldRevision, FieldTypeRevision, FilterConfigurationRevision, GroupConfigurationRevision,
@ -34,7 +35,7 @@ pub struct GridViewRevisionEditor {
user_id: String,
view_id: String,
pad: Arc<RwLock<GridViewRevisionPad>>,
rev_manager: Arc<RevisionManager>,
rev_manager: Arc<RevisionManager<Arc<ConnectionPool>>>,
field_delegate: Arc<dyn GridViewFieldDelegate>,
row_delegate: Arc<dyn GridViewRowDelegate>,
group_controller: Arc<RwLock<Box<dyn GroupController>>>,
@ -49,7 +50,7 @@ impl GridViewRevisionEditor {
field_delegate: Arc<dyn GridViewFieldDelegate>,
row_delegate: Arc<dyn GridViewRowDelegate>,
scheduler: Arc<dyn GridServiceTaskScheduler>,
mut rev_manager: RevisionManager,
mut rev_manager: RevisionManager<Arc<ConnectionPool>>,
) -> FlowyResult<Self> {
let cloud = Arc::new(GridViewRevisionCloudService {
token: token.to_owned(),
@ -401,7 +402,7 @@ async fn new_group_controller(
user_id: String,
view_id: String,
view_rev_pad: Arc<RwLock<GridViewRevisionPad>>,
rev_manager: Arc<RevisionManager>,
rev_manager: Arc<RevisionManager<Arc<ConnectionPool>>>,
field_delegate: Arc<dyn GridViewFieldDelegate>,
row_delegate: Arc<dyn GridViewRowDelegate>,
) -> FlowyResult<Box<dyn GroupController>> {
@ -438,7 +439,7 @@ async fn new_group_controller_with_field_rev(
user_id: String,
view_id: String,
view_rev_pad: Arc<RwLock<GridViewRevisionPad>>,
rev_manager: Arc<RevisionManager>,
rev_manager: Arc<RevisionManager<Arc<ConnectionPool>>>,
field_rev: Arc<FieldRevision>,
row_delegate: Arc<dyn GridViewRowDelegate>,
) -> FlowyResult<Box<dyn GroupController>> {
@ -454,7 +455,7 @@ async fn new_group_controller_with_field_rev(
async fn apply_change(
user_id: &str,
rev_manager: Arc<RevisionManager>,
rev_manager: Arc<RevisionManager<Arc<ConnectionPool>>>,
change: GridViewRevisionChangeset,
) -> FlowyResult<()> {
let GridViewRevisionChangeset { operations: delta, md5 } = change;
@ -520,7 +521,7 @@ impl GroupConfigurationReader for GroupConfigurationReaderImpl {
struct GroupConfigurationWriterImpl {
user_id: String,
rev_manager: Arc<RevisionManager>,
rev_manager: Arc<RevisionManager<Arc<ConnectionPool>>>,
view_pad: Arc<RwLock<GridViewRevisionPad>>,
}

View File

@ -6,10 +6,11 @@ use crate::manager::GridUser;
use crate::services::grid_editor_task::GridServiceTaskScheduler;
use crate::services::grid_view_editor::{GridViewRevisionCompress, GridViewRevisionEditor};
use crate::services::persistence::rev_sqlite::SQLiteGridViewRevisionPersistence;
use dashmap::DashMap;
use flowy_database::ConnectionPool;
use flowy_error::FlowyResult;
use flowy_grid_data_model::revision::{FieldRevision, RowChangeset, RowRevision};
use flowy_revision::disk::SQLiteGridViewRevisionPersistence;
use flowy_revision::{RevisionManager, RevisionPersistence, SQLiteRevisionSnapshotPersistence};
use lib_infra::future::AFFuture;
use std::sync::Arc;
@ -244,7 +245,10 @@ async fn make_view_editor(
.await
}
pub async fn make_grid_view_rev_manager(user: &Arc<dyn GridUser>, view_id: &str) -> FlowyResult<RevisionManager> {
pub async fn make_grid_view_rev_manager(
user: &Arc<dyn GridUser>,
view_id: &str,
) -> FlowyResult<RevisionManager<Arc<ConnectionPool>>> {
let user_id = user.user_id()?;
let pool = user.db_pool()?;

View File

@ -4,12 +4,12 @@ use bytes::Bytes;
use flowy_database::kv::KV;
use flowy_error::FlowyResult;
use flowy_grid_data_model::revision::GridRevision;
use flowy_revision::disk::SQLiteGridRevisionPersistence;
use flowy_revision::reset::{RevisionResettable, RevisionStructReset};
use flowy_sync::client_grid::{make_grid_rev_json_str, GridOperationsBuilder, GridRevisionPad};
use flowy_sync::entities::revision::Revision;
use flowy_sync::util::md5;
use crate::services::persistence::rev_sqlite::SQLiteGridRevisionPersistence;
use std::sync::Arc;
const V1_MIGRATION: &str = "GRID_V1_MIGRATION";
@ -73,4 +73,12 @@ impl RevisionResettable for GridRevisionResettable {
let json = make_grid_rev_json_str(&grid_rev)?;
Ok(json)
}
fn read_record(&self) -> Option<String> {
KV::get_str(self.target_id())
}
fn set_record(&self, record: String) {
KV::set_str(self.target_id(), record);
}
}

View File

@ -5,6 +5,7 @@ use std::sync::Arc;
pub mod block_index;
pub mod kv;
pub mod migration;
pub mod rev_sqlite;
pub trait GridDatabase: Send + Sync {
fn db_pool(&self) -> Result<Arc<ConnectionPool>, FlowyError>;

View File

@ -1,5 +1,3 @@
use crate::cache::disk::RevisionDiskCache;
use crate::disk::{RevisionChangeset, RevisionRecord};
use bytes::Bytes;
use diesel::{sql_types::Integer, update, SqliteConnection};
use flowy_database::{
@ -9,6 +7,7 @@ use flowy_database::{
ConnectionPool,
};
use flowy_error::{internal_error, FlowyError, FlowyResult};
use flowy_revision::disk::{RevisionChangeset, RevisionDiskCache, RevisionRecord, RevisionState};
use flowy_sync::{
entities::revision::{Revision, RevisionRange},
util::md5,
@ -20,7 +19,7 @@ pub struct SQLiteGridBlockRevisionPersistence {
pub(crate) pool: Arc<ConnectionPool>,
}
impl RevisionDiskCache for SQLiteGridBlockRevisionPersistence {
impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteGridBlockRevisionPersistence {
type Error = FlowyError;
fn create_revision_records(&self, revision_records: Vec<RevisionRecord>) -> Result<(), Self::Error> {
@ -29,6 +28,10 @@ impl RevisionDiskCache for SQLiteGridBlockRevisionPersistence {
Ok(())
}
fn get_connection(&self) -> Result<Arc<ConnectionPool>, Self::Error> {
Ok(self.pool.clone())
}
fn read_revision_records(
&self,
object_id: &str,

View File

@ -1,5 +1,3 @@
use crate::cache::disk::RevisionDiskCache;
use crate::disk::{RevisionChangeset, RevisionRecord};
use bytes::Bytes;
use diesel::{sql_types::Integer, update, SqliteConnection};
use flowy_database::{
@ -9,6 +7,7 @@ use flowy_database::{
ConnectionPool,
};
use flowy_error::{internal_error, FlowyError, FlowyResult};
use flowy_revision::disk::{RevisionChangeset, RevisionDiskCache, RevisionRecord, RevisionState};
use flowy_sync::{
entities::revision::{Revision, RevisionRange},
util::md5,
@ -20,7 +19,7 @@ pub struct SQLiteGridRevisionPersistence {
pub(crate) pool: Arc<ConnectionPool>,
}
impl RevisionDiskCache for SQLiteGridRevisionPersistence {
impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteGridRevisionPersistence {
type Error = FlowyError;
fn create_revision_records(&self, revision_records: Vec<RevisionRecord>) -> Result<(), Self::Error> {
@ -29,6 +28,10 @@ impl RevisionDiskCache for SQLiteGridRevisionPersistence {
Ok(())
}
fn get_connection(&self) -> Result<Arc<ConnectionPool>, Self::Error> {
Ok(self.pool.clone())
}
fn read_revision_records(
&self,
object_id: &str,

View File

@ -1,4 +1,3 @@
use crate::disk::{RevisionChangeset, RevisionDiskCache, RevisionRecord};
use bytes::Bytes;
use diesel::{sql_types::Integer, update, SqliteConnection};
use flowy_database::{
@ -8,6 +7,7 @@ use flowy_database::{
ConnectionPool,
};
use flowy_error::{internal_error, FlowyError, FlowyResult};
use flowy_revision::disk::{RevisionChangeset, RevisionDiskCache, RevisionRecord, RevisionState};
use flowy_sync::{
entities::revision::{Revision, RevisionRange},
util::md5,
@ -28,7 +28,7 @@ impl SQLiteGridViewRevisionPersistence {
}
}
impl RevisionDiskCache for SQLiteGridViewRevisionPersistence {
impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteGridViewRevisionPersistence {
type Error = FlowyError;
fn create_revision_records(&self, revision_records: Vec<RevisionRecord>) -> Result<(), Self::Error> {
@ -37,6 +37,10 @@ impl RevisionDiskCache for SQLiteGridViewRevisionPersistence {
Ok(())
}
fn get_connection(&self) -> Result<Arc<ConnectionPool>, Self::Error> {
Ok(self.pool.clone())
}
fn read_revision_records(
&self,
object_id: &str,

View File

@ -0,0 +1,7 @@
mod grid_block_impl;
mod grid_impl;
mod grid_view_impl;
pub use grid_block_impl::*;
pub use grid_impl::*;
pub use grid_view_impl::*;

View File

@ -9,10 +9,7 @@ edition = "2018"
flowy-sync = { path = "../../../shared-lib/flowy-sync" }
lib-ws = { path = "../../../shared-lib/lib-ws" }
lib-infra = { path = "../../../shared-lib/lib-infra" }
flowy-database = { path = "../flowy-database" }
flowy-error = { path = "../flowy-error", features = ["collaboration", "ot", "http_server", "serde", "db"] }
diesel = {version = "1.4.8", features = ["sqlite"]}
diesel_derives = {version = "1.4.1", features = ["sqlite"]}
flowy-error = { path = "../flowy-error" }
tracing = { version = "0.1", features = ["log"] }
tokio = {version = "1", features = ["sync"]}
bytes = { version = "1.1" }

View File

@ -1,24 +1,14 @@
mod delta_document_impl;
mod document_impl;
mod grid_block_impl;
mod grid_impl;
mod grid_view_impl;
pub use delta_document_impl::*;
pub use document_impl::*;
pub use grid_block_impl::*;
pub use grid_impl::*;
pub use grid_view_impl::*;
use flowy_error::{FlowyError, FlowyResult};
use flowy_sync::entities::revision::{RevId, Revision, RevisionRange};
use std::fmt::Debug;
use std::sync::Arc;
pub trait RevisionDiskCache: Sync + Send {
pub trait RevisionDiskCache<Connection>: Sync + Send {
type Error: Debug;
fn create_revision_records(&self, revision_records: Vec<RevisionRecord>) -> Result<(), Self::Error>;
fn get_connection(&self) -> Result<Connection, Self::Error>;
// Read all the records if the rev_ids is None
fn read_revision_records(
&self,
@ -48,9 +38,9 @@ pub trait RevisionDiskCache: Sync + Send {
) -> Result<(), Self::Error>;
}
impl<T> RevisionDiskCache for Arc<T>
impl<T, Connection> RevisionDiskCache<Connection> for Arc<T>
where
T: RevisionDiskCache<Error = FlowyError>,
T: RevisionDiskCache<Connection, Error = FlowyError>,
{
type Error = FlowyError;
@ -58,6 +48,10 @@ where
(**self).create_revision_records(revision_records)
}
fn get_connection(&self) -> Result<Connection, Self::Error> {
(**self).get_connection()
}
fn read_revision_records(
&self,
object_id: &str,
@ -114,9 +108,9 @@ impl RevisionRecord {
}
pub struct RevisionChangeset {
pub(crate) object_id: String,
pub(crate) rev_id: RevId,
pub(crate) state: RevisionState,
pub object_id: String,
pub rev_id: RevId,
pub state: RevisionState,
}
/// Sync: revision is not synced to the server

View File

@ -1,7 +1,6 @@
use crate::disk::{RevisionDiskCache, RevisionRecord};
use crate::{RevisionLoader, RevisionPersistence};
use bytes::Bytes;
use flowy_database::kv::KV;
use flowy_error::{FlowyError, FlowyResult};
use flowy_sync::entities::revision::Revision;
use serde::{Deserialize, Serialize};
@ -16,19 +15,24 @@ pub trait RevisionResettable {
// String in json format
fn default_target_rev_str(&self) -> FlowyResult<String>;
fn read_record(&self) -> Option<String>;
fn set_record(&self, record: String);
}
pub struct RevisionStructReset<T> {
pub struct RevisionStructReset<T, C> {
user_id: String,
target: T,
disk_cache: Arc<dyn RevisionDiskCache<Error = FlowyError>>,
disk_cache: Arc<dyn RevisionDiskCache<C, Error = FlowyError>>,
}
impl<T> RevisionStructReset<T>
impl<T, C> RevisionStructReset<T, C>
where
T: RevisionResettable,
C: 'static,
{
pub fn new(user_id: &str, object: T, disk_cache: Arc<dyn RevisionDiskCache<Error = FlowyError>>) -> Self {
pub fn new(user_id: &str, object: T, disk_cache: Arc<dyn RevisionDiskCache<C, Error = FlowyError>>) -> Self {
Self {
user_id: user_id.to_owned(),
target: object,
@ -37,18 +41,18 @@ where
}
pub async fn run(&self) -> FlowyResult<()> {
match KV::get_str(self.target.target_id()) {
match self.target.read_record() {
None => {
let _ = self.reset_object().await?;
let _ = self.save_migrate_record()?;
}
Some(s) => {
let mut record = MigrationGridRecord::from_str(&s)?;
let mut record = MigrationObjectRecord::from_str(&s)?;
let rev_str = self.target.default_target_rev_str()?;
if record.len < rev_str.len() {
let _ = self.reset_object().await?;
record.len = rev_str.len();
KV::set_str(self.target.target_id(), record.to_string());
self.target.set_record(record.to_string());
}
}
}
@ -84,30 +88,30 @@ where
fn save_migrate_record(&self) -> FlowyResult<()> {
let rev_str = self.target.default_target_rev_str()?;
let record = MigrationGridRecord {
let record = MigrationObjectRecord {
object_id: self.target.target_id().to_owned(),
len: rev_str.len(),
};
KV::set_str(self.target.target_id(), record.to_string());
self.target.set_record(record.to_string());
Ok(())
}
}
#[derive(Serialize, Deserialize)]
struct MigrationGridRecord {
struct MigrationObjectRecord {
object_id: String,
len: usize,
}
impl FromStr for MigrationGridRecord {
impl FromStr for MigrationObjectRecord {
type Err = serde_json::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
serde_json::from_str::<MigrationGridRecord>(s)
serde_json::from_str::<MigrationObjectRecord>(s)
}
}
impl ToString for MigrationGridRecord {
impl ToString for MigrationObjectRecord {
fn to_string(&self) -> String {
serde_json::to_string(self).unwrap_or_else(|_| "".to_string())
}

View File

@ -6,8 +6,8 @@ use flowy_sync::entities::{
ws_data::ServerRevisionWSDataType,
};
use lib_infra::future::BoxResultFuture;
use std::{convert::TryFrom, sync::Arc};
pub type OperationsMD5 = String;
pub struct TransformOperations<Operations> {
@ -41,25 +41,26 @@ pub trait ConflictRevisionSink: Send + Sync + 'static {
fn ack(&self, rev_id: String, ty: ServerRevisionWSDataType) -> BoxResultFuture<(), FlowyError>;
}
pub struct ConflictController<Operations>
pub struct ConflictController<Operations, Connection>
where
Operations: Send + Sync,
{
user_id: String,
resolver: Arc<dyn ConflictResolver<Operations> + Send + Sync>,
rev_sink: Arc<dyn ConflictRevisionSink>,
rev_manager: Arc<RevisionManager>,
rev_manager: Arc<RevisionManager<Connection>>,
}
impl<Operations> ConflictController<Operations>
impl<Operations, Connection> ConflictController<Operations, Connection>
where
Operations: Clone + Send + Sync,
Connection: 'static,
{
pub fn new(
user_id: &str,
resolver: Arc<dyn ConflictResolver<Operations> + Send + Sync>,
rev_sink: Arc<dyn ConflictRevisionSink>,
rev_manager: Arc<RevisionManager>,
rev_manager: Arc<RevisionManager<Connection>>,
) -> Self {
let user_id = user_id.to_owned();
Self {
@ -71,9 +72,10 @@ where
}
}
impl<Operations> ConflictController<Operations>
impl<Operations, Connection> ConflictController<Operations, Connection>
where
Operations: OperationsSerializer + OperationsDeserializer<Operations> + Clone + Send + Sync,
Connection: Send + Sync + 'static,
{
pub async fn receive_bytes(&self, bytes: Bytes) -> FlowyResult<()> {
let repeated_revision = RepeatedRevision::try_from(bytes)?;
@ -151,15 +153,16 @@ where
}
}
fn make_client_and_server_revision<Operations>(
fn make_client_and_server_revision<Operations, Connection>(
user_id: &str,
rev_manager: &Arc<RevisionManager>,
rev_manager: &Arc<RevisionManager<Connection>>,
client_operations: Operations,
server_operations: Option<Operations>,
md5: String,
) -> (Revision, Option<Revision>)
where
Operations: OperationsSerializer,
Connection: 'static,
{
let (base_rev_id, rev_id) = rev_manager.next_rev_id_pair();
let bytes = client_operations.serialize_operations();

View File

@ -13,6 +13,3 @@ pub use rev_manager::*;
pub use rev_persistence::*;
pub use snapshot::*;
pub use ws_manager::*;
#[macro_use]
extern crate flowy_database;

View File

@ -69,11 +69,11 @@ pub trait RevisionCompress: Send + Sync {
fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes>;
}
pub struct RevisionManager {
pub struct RevisionManager<Connection> {
pub object_id: String,
user_id: String,
rev_id_counter: RevIdCounter,
rev_persistence: Arc<RevisionPersistence>,
rev_persistence: Arc<RevisionPersistence<Connection>>,
#[allow(dead_code)]
rev_snapshot: Arc<RevisionSnapshotManager>,
rev_compress: Arc<dyn RevisionCompress>,
@ -81,11 +81,11 @@ pub struct RevisionManager {
rev_ack_notifier: tokio::sync::broadcast::Sender<i64>,
}
impl RevisionManager {
impl<Connection: 'static> RevisionManager<Connection> {
pub fn new<SP, C>(
user_id: &str,
object_id: &str,
rev_persistence: RevisionPersistence,
rev_persistence: RevisionPersistence<Connection>,
rev_compress: C,
snapshot_persistence: SP,
) -> Self
@ -209,7 +209,7 @@ impl RevisionManager {
}
}
impl WSDataProviderDataSource for Arc<RevisionManager> {
impl<Connection: 'static> WSDataProviderDataSource for Arc<RevisionManager<Connection>> {
fn next_revision(&self) -> FutureResult<Option<Revision>, FlowyError> {
let rev_manager = self.clone();
FutureResult::new(async move { rev_manager.next_sync_revision().await })
@ -226,8 +226,8 @@ impl WSDataProviderDataSource for Arc<RevisionManager> {
}
#[cfg(feature = "flowy_unit_test")]
impl RevisionManager {
pub async fn revision_cache(&self) -> Arc<RevisionPersistence> {
impl<Connection> RevisionManager<Connection> {
pub async fn revision_cache(&self) -> Arc<RevisionPersistence<Connection>> {
self.rev_persistence.clone()
}
pub fn ack_notify(&self) -> tokio::sync::broadcast::Receiver<i64> {
@ -235,14 +235,14 @@ impl RevisionManager {
}
}
pub struct RevisionLoader {
pub struct RevisionLoader<Connection> {
pub object_id: String,
pub user_id: String,
pub cloud: Option<Arc<dyn RevisionCloudService>>,
pub rev_persistence: Arc<RevisionPersistence>,
pub rev_persistence: Arc<RevisionPersistence<Connection>>,
}
impl RevisionLoader {
impl<Connection: 'static> RevisionLoader<Connection> {
pub async fn load(&self) -> Result<(Vec<Revision>, i64), FlowyError> {
let records = self.rev_persistence.batch_get(&self.object_id)?;
let revisions: Vec<Revision>;

View File

@ -1,11 +1,11 @@
use crate::cache::{
disk::{RevisionChangeset, RevisionDiskCache, SQLiteDeltaDocumentRevisionPersistence},
disk::{RevisionChangeset, RevisionDiskCache},
memory::RevisionMemoryCacheDelegate,
};
use crate::disk::{RevisionRecord, RevisionState, SQLiteGridBlockRevisionPersistence};
use crate::disk::{RevisionRecord, RevisionState};
use crate::memory::RevisionMemoryCache;
use crate::RevisionCompress;
use flowy_database::ConnectionPool;
use flowy_error::{internal_error, FlowyError, FlowyResult};
use flowy_sync::entities::revision::{Revision, RevisionRange};
use std::collections::VecDeque;
@ -15,28 +15,28 @@ use tokio::task::spawn_blocking;
pub const REVISION_WRITE_INTERVAL_IN_MILLIS: u64 = 600;
pub struct RevisionPersistence {
pub struct RevisionPersistence<Connection> {
user_id: String,
object_id: String,
disk_cache: Arc<dyn RevisionDiskCache<Error = FlowyError>>,
disk_cache: Arc<dyn RevisionDiskCache<Connection, Error = FlowyError>>,
memory_cache: Arc<RevisionMemoryCache>,
sync_seq: RwLock<RevisionSyncSequence>,
}
impl RevisionPersistence {
pub fn new<C>(user_id: &str, object_id: &str, disk_cache: C) -> RevisionPersistence
impl<Connection: 'static> RevisionPersistence<Connection> {
pub fn new<C>(user_id: &str, object_id: &str, disk_cache: C) -> RevisionPersistence<Connection>
where
C: 'static + RevisionDiskCache<Error = FlowyError>,
C: 'static + RevisionDiskCache<Connection, Error = FlowyError>,
{
let disk_cache = Arc::new(disk_cache) as Arc<dyn RevisionDiskCache<Error = FlowyError>>;
let disk_cache = Arc::new(disk_cache) as Arc<dyn RevisionDiskCache<Connection, Error = FlowyError>>;
Self::from_disk_cache(user_id, object_id, disk_cache)
}
pub fn from_disk_cache(
user_id: &str,
object_id: &str,
disk_cache: Arc<dyn RevisionDiskCache<Error = FlowyError>>,
) -> RevisionPersistence {
disk_cache: Arc<dyn RevisionDiskCache<Connection, Error = FlowyError>>,
) -> RevisionPersistence<Connection> {
let object_id = object_id.to_owned();
let user_id = user_id.to_owned();
let sync_seq = RwLock::new(RevisionSyncSequence::new());
@ -224,21 +224,7 @@ impl RevisionPersistence {
}
}
pub fn mk_text_block_revision_disk_cache(
user_id: &str,
pool: Arc<ConnectionPool>,
) -> Arc<dyn RevisionDiskCache<Error = FlowyError>> {
Arc::new(SQLiteDeltaDocumentRevisionPersistence::new(user_id, pool))
}
pub fn mk_grid_block_revision_disk_cache(
user_id: &str,
pool: Arc<ConnectionPool>,
) -> Arc<dyn RevisionDiskCache<Error = FlowyError>> {
Arc::new(SQLiteGridBlockRevisionPersistence::new(user_id, pool))
}
impl RevisionMemoryCacheDelegate for Arc<dyn RevisionDiskCache<Error = FlowyError>> {
impl<C> RevisionMemoryCacheDelegate for Arc<dyn RevisionDiskCache<C, Error = FlowyError>> {
fn checkpoint_tick(&self, mut records: Vec<RevisionRecord>) -> FlowyResult<()> {
records.retain(|record| record.write_to_disk);
if !records.is_empty() {

View File

@ -2,17 +2,15 @@
#![allow(dead_code)]
#![allow(unused_variables)]
use crate::{RevisionSnapshotDiskCache, RevisionSnapshotInfo};
use flowy_database::ConnectionPool;
use flowy_error::FlowyResult;
use std::sync::Arc;
pub struct SQLiteRevisionSnapshotPersistence {
pub struct SQLiteRevisionSnapshotPersistence<Connection> {
object_id: String,
pool: Arc<ConnectionPool>,
pool: Connection,
}
impl SQLiteRevisionSnapshotPersistence {
pub fn new(object_id: &str, pool: Arc<ConnectionPool>) -> Self {
impl<Connection: 'static> SQLiteRevisionSnapshotPersistence<Connection> {
pub fn new(object_id: &str, pool: Connection) -> Self {
Self {
object_id: object_id.to_string(),
pool,
@ -20,7 +18,10 @@ impl SQLiteRevisionSnapshotPersistence {
}
}
impl RevisionSnapshotDiskCache for SQLiteRevisionSnapshotPersistence {
impl<Connection> RevisionSnapshotDiskCache for SQLiteRevisionSnapshotPersistence<Connection>
where
Connection: Send + Sync + 'static,
{
fn write_snapshot(&self, object_id: &str, rev_id: i64, data: Vec<u8>) -> FlowyResult<()> {
todo!()
}

View File

@ -17,7 +17,7 @@ RUN git clone https://aur.archlinux.org/yay.git \
&& cd yay \
&& makepkg -sri --needed --noconfirm
RUN yay -S --noconfirm curl base-devel sqlite openssl clang cmake ninja pkg-config gtk3 unzip
RUN yay -S --noconfirm curl base-devel rev_sqlite openssl clang cmake ninja pkg-config gtk3 unzip
RUN xdg-user-dirs-update
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
RUN source $HOME/.cargo/env && rustup toolchain install stable && rustup default stable

View File

@ -19,7 +19,7 @@ impl CrateProtoInfo {
pub fn create_crate_mod_file(&self) {
// mod model;
// pub use model::*;
let mod_file_path = format!("{}/mod.rs", self.inner.protobuf_crate_name());
let mod_file_path = format!("{}/rev_sqlite", self.inner.protobuf_crate_name());
let mut content = "#![cfg_attr(rustfmt, rustfmt::skip)]\n".to_owned();
content.push_str("// Auto-generated, do not edit\n");
content.push_str("mod model;\npub use model::*;");
@ -84,7 +84,7 @@ impl ProtobufCrate {
}
pub fn proto_model_mod_file(&self) -> String {
format!("{}/mod.rs", self.proto_struct_output_dir())
format!("{}/rev_sqlite", self.proto_struct_output_dir())
}
}