chore: add tests

This commit is contained in:
nathan 2022-11-02 17:15:27 +08:00
parent f5dc9ed975
commit 2c71e4f885
30 changed files with 294 additions and 150 deletions

View File

@ -1077,6 +1077,7 @@ dependencies = [
"lib-infra",
"lib-ws",
"nanoid",
"parking_lot 0.11.2",
"serde",
"serde_json",
"strum",

View File

@ -79,8 +79,7 @@ impl DocumentQueue {
async fn save_local_operations(&self, transaction: Transaction, md5: String) -> Result<RevId, FlowyError> {
let bytes = Bytes::from(transaction.to_bytes()?);
let (base_rev_id, rev_id) = self.rev_manager.next_rev_id_pair();
let user_id = self.user.user_id()?;
let revision = Revision::new(&self.rev_manager.object_id, base_rev_id, rev_id, bytes, &user_id, md5);
let revision = Revision::new(&self.rev_manager.object_id, base_rev_id, rev_id, bytes, md5);
let _ = self.rev_manager.add_local_revision(&revision).await?;
Ok(rev_id.into())
}

View File

@ -291,7 +291,6 @@ impl RevisionCloudService for DocumentRevisionCloudService {
let params: DocumentIdPB = object_id.to_string().into();
let server = self.server.clone();
let token = self.token.clone();
let user_id = user_id.to_string();
FutureResult::new(async move {
match server.fetch_document(&token, params).await? {
@ -299,14 +298,7 @@ impl RevisionCloudService for DocumentRevisionCloudService {
Some(payload) => {
let bytes = Bytes::from(payload.content.clone());
let doc_md5 = md5(&bytes);
let revision = Revision::new(
&payload.doc_id,
payload.base_rev_id,
payload.rev_id,
bytes,
&user_id,
doc_md5,
);
let revision = Revision::new(&payload.doc_id, payload.base_rev_id, payload.rev_id, bytes, doc_md5);
Ok(vec![revision])
}
}

View File

@ -178,8 +178,7 @@ impl EditDocumentQueue {
async fn save_local_operations(&self, operations: DeltaTextOperations, md5: String) -> Result<RevId, FlowyError> {
let bytes = operations.json_bytes();
let (base_rev_id, rev_id) = self.rev_manager.next_rev_id_pair();
let user_id = self.user.user_id()?;
let revision = Revision::new(&self.rev_manager.object_id, base_rev_id, rev_id, bytes, &user_id, md5);
let revision = Revision::new(&self.rev_manager.object_id, base_rev_id, rev_id, bytes, md5);
let _ = self.rev_manager.add_local_revision(&revision).await?;
Ok(rev_id.into())
}

View File

@ -43,7 +43,7 @@ impl DocumentMigration {
Ok(transaction) => {
let bytes = Bytes::from(transaction.to_bytes()?);
let md5 = format!("{:x}", md5::compute(&bytes));
let revision = Revision::new(&document_id, 0, 1, bytes, &self.user_id, md5);
let revision = Revision::new(&document_id, 0, 1, bytes, md5);
let record = SyncRecord::new(revision);
match disk_cache.create_revision_records(vec![record]) {
Ok(_) => {}

View File

@ -9,7 +9,7 @@ use flowy_database::{
use flowy_error::{internal_error, FlowyError, FlowyResult};
use flowy_revision::disk::{RevisionChangeset, RevisionDiskCache, RevisionState, SyncRecord};
use flowy_sync::{
entities::revision::{RevType, Revision, RevisionRange},
entities::revision::{Revision, RevisionRange},
util::md5,
};
use std::collections::HashMap;
@ -251,7 +251,6 @@ fn mk_revision_record_from_table(user_id: &str, table: RevisionTable) -> SyncRec
table.base_rev_id,
table.rev_id,
Bytes::from(table.data),
user_id,
md5,
);
SyncRecord {
@ -288,21 +287,3 @@ impl std::convert::From<i32> for RevTableType {
}
}
}
impl std::convert::From<RevType> for RevTableType {
fn from(ty: RevType) -> Self {
match ty {
RevType::DeprecatedLocal => RevTableType::Local,
RevType::DeprecatedRemote => RevTableType::Remote,
}
}
}
impl std::convert::From<RevTableType> for RevType {
fn from(ty: RevTableType) -> Self {
match ty {
RevTableType::Local => RevType::DeprecatedLocal,
RevTableType::Remote => RevType::DeprecatedRemote,
}
}
}

View File

@ -227,7 +227,6 @@ fn mk_revision_record_from_table(user_id: &str, table: DocumentRevisionTable) ->
table.base_rev_id,
table.rev_id,
Bytes::from(table.data),
user_id,
md5,
);
SyncRecord {

View File

@ -83,14 +83,7 @@ impl FolderEditor {
let FolderChangeset { operations: delta, md5 } = change;
let (base_rev_id, rev_id) = self.rev_manager.next_rev_id_pair();
let delta_data = delta.json_bytes();
let revision = Revision::new(
&self.rev_manager.object_id,
base_rev_id,
rev_id,
delta_data,
&self.user_id,
md5,
);
let revision = Revision::new(&self.rev_manager.object_id, base_rev_id, rev_id, delta_data, md5);
let _ = futures::executor::block_on(async { self.rev_manager.add_local_revision(&revision).await })?;
Ok(())
}

View File

@ -111,7 +111,7 @@ impl FolderPersistence {
let pool = self.database.db_pool()?;
let json = folder.to_json()?;
let delta_data = FolderOperationsBuilder::new().insert(&json).build().json_bytes();
let revision = Revision::initial_revision(user_id, folder_id.as_ref(), delta_data);
let revision = Revision::initial_revision(folder_id.as_ref(), delta_data);
let record = SyncRecord {
revision,
state: RevisionState::Sync,

View File

@ -9,7 +9,7 @@ use flowy_database::{
use flowy_error::{internal_error, FlowyError, FlowyResult};
use flowy_revision::disk::{RevisionChangeset, RevisionDiskCache, RevisionState, SyncRecord};
use flowy_sync::{
entities::revision::{RevType, Revision, RevisionRange},
entities::revision::{Revision, RevisionRange},
util::md5,
};
@ -227,7 +227,6 @@ fn mk_revision_record_from_table(user_id: &str, table: RevisionTable) -> SyncRec
table.base_rev_id,
table.rev_id,
Bytes::from(table.data),
user_id,
md5,
);
SyncRecord {
@ -264,21 +263,3 @@ impl std::convert::From<i32> for RevTableType {
}
}
}
impl std::convert::From<RevType> for RevTableType {
fn from(ty: RevType) -> Self {
match ty {
RevType::DeprecatedLocal => RevTableType::Local,
RevType::DeprecatedRemote => RevTableType::Remote,
}
}
}
impl std::convert::From<RevTableType> for RevType {
fn from(ty: RevTableType) -> Self {
match ty {
RevTableType::Local => RevType::DeprecatedLocal,
RevTableType::Remote => RevType::DeprecatedRemote,
}
}
}

View File

@ -208,7 +208,7 @@ pub async fn make_grid_view_data(
// Create grid's block
let grid_block_delta = make_grid_block_operations(block_meta_data);
let block_delta_data = grid_block_delta.json_bytes();
let revision = Revision::initial_revision(user_id, block_id, block_delta_data);
let revision = Revision::initial_revision(block_id, block_delta_data);
let _ = grid_manager.create_grid_block(&block_id, vec![revision]).await?;
}
@ -219,7 +219,7 @@ pub async fn make_grid_view_data(
// Create grid
let grid_rev_delta = make_grid_operations(&grid_rev);
let grid_rev_delta_bytes = grid_rev_delta.json_bytes();
let revision = Revision::initial_revision(user_id, &grid_id, grid_rev_delta_bytes.clone());
let revision = Revision::initial_revision(&grid_id, grid_rev_delta_bytes.clone());
let _ = grid_manager.create_grid(&grid_id, vec![revision]).await?;
// Create grid view
@ -230,7 +230,7 @@ pub async fn make_grid_view_data(
};
let grid_view_delta = make_grid_view_operations(&grid_view);
let grid_view_delta_bytes = grid_view_delta.json_bytes();
let revision = Revision::initial_revision(user_id, view_id, grid_view_delta_bytes);
let revision = Revision::initial_revision(view_id, grid_view_delta_bytes);
let _ = grid_manager.create_grid_view(view_id, vec![revision]).await?;
Ok(grid_rev_delta_bytes)

View File

@ -167,17 +167,9 @@ impl GridBlockRevisionEditor {
async fn apply_change(&self, change: GridBlockRevisionChangeset) -> FlowyResult<()> {
let GridBlockRevisionChangeset { operations: delta, md5 } = change;
let user_id = self.user_id.clone();
let (base_rev_id, rev_id) = self.rev_manager.next_rev_id_pair();
let delta_data = delta.json_bytes();
let revision = Revision::new(
&self.rev_manager.object_id,
base_rev_id,
rev_id,
delta_data,
&user_id,
md5,
);
let revision = Revision::new(&self.rev_manager.object_id, base_rev_id, rev_id, delta_data, md5);
let _ = self.rev_manager.add_local_revision(&revision).await?;
Ok(())
}

View File

@ -757,17 +757,9 @@ impl GridRevisionEditor {
async fn apply_change(&self, change: GridRevisionChangeset) -> FlowyResult<()> {
let GridRevisionChangeset { operations: delta, md5 } = change;
let user_id = self.user.user_id()?;
let (base_rev_id, rev_id) = self.rev_manager.next_rev_id_pair();
let delta_data = delta.json_bytes();
let revision = Revision::new(
&self.rev_manager.object_id,
base_rev_id,
rev_id,
delta_data,
&user_id,
md5,
);
let revision = Revision::new(&self.rev_manager.object_id, base_rev_id, rev_id, delta_data, md5);
let _ = self.rev_manager.add_local_revision(&revision).await?;
Ok(())
}

View File

@ -461,7 +461,7 @@ async fn apply_change(
let GridViewRevisionChangeset { operations: delta, md5 } = change;
let (base_rev_id, rev_id) = rev_manager.next_rev_id_pair();
let delta_data = delta.json_bytes();
let revision = Revision::new(&rev_manager.object_id, base_rev_id, rev_id, delta_data, user_id, md5);
let revision = Revision::new(&rev_manager.object_id, base_rev_id, rev_id, delta_data, md5);
let _ = rev_manager.add_local_revision(&revision).await?;
Ok(())
}

View File

@ -226,7 +226,6 @@ fn mk_revision_record_from_table(user_id: &str, table: GridBlockRevisionTable) -
table.base_rev_id,
table.rev_id,
Bytes::from(table.data),
user_id,
md5,
);
SyncRecord {

View File

@ -224,7 +224,6 @@ fn mk_revision_record_from_table(user_id: &str, table: GridRevisionTable) -> Syn
table.base_rev_id,
table.rev_id,
Bytes::from(table.data),
user_id,
md5,
);
SyncRecord {

View File

@ -226,7 +226,6 @@ fn mk_revision_record_from_table(user_id: &str, table: GridViewRevisionTable) ->
table.base_rev_id,
table.rev_id,
Bytes::from(table.data),
user_id,
md5,
);
SyncRecord {

View File

@ -23,6 +23,9 @@ serde_json = {version = "1.0"}
[dev-dependencies]
nanoid = "0.4.0"
serde = { version = "1.0", features = ["derive"] }
serde_json = { version = "1.0" }
parking_lot = "0.11"
[features]
flowy_unit_test = []

View File

@ -75,7 +75,7 @@ where
.await?;
let bytes = self.target.reset_data(revisions)?;
let revision = Revision::initial_revision(&self.user_id, self.target.target_id(), bytes);
let revision = Revision::initial_revision(self.target.target_id(), bytes);
let record = SyncRecord::new(revision);
tracing::trace!("Reset {} revision record object", self.target.target_id());

View File

@ -163,13 +163,13 @@ where
{
let (base_rev_id, rev_id) = rev_manager.next_rev_id_pair();
let bytes = client_operations.serialize_operations();
let client_revision = Revision::new(&rev_manager.object_id, base_rev_id, rev_id, bytes, user_id, md5.clone());
let client_revision = Revision::new(&rev_manager.object_id, base_rev_id, rev_id, bytes, md5.clone());
match server_operations {
None => (client_revision, None),
Some(operations) => {
let bytes = operations.serialize_operations();
let server_revision = Revision::new(&rev_manager.object_id, base_rev_id, rev_id, bytes, user_id, md5);
let server_revision = Revision::new(&rev_manager.object_id, base_rev_id, rev_id, bytes, md5);
(client_revision, Some(server_revision))
}
}

View File

@ -63,12 +63,24 @@ pub trait RevisionCompress: Send + Sync {
let (base_rev_id, rev_id) = first_revision.pair_rev_id();
let md5 = last_revision.md5.clone();
let bytes = self.combine_revisions(revisions)?;
Ok(Revision::new(object_id, base_rev_id, rev_id, bytes, user_id, md5))
Ok(Revision::new(object_id, base_rev_id, rev_id, bytes, md5))
}
fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes>;
}
pub struct RevisionConfiguration {
merge_when_excess_number_of_version: i64,
}
impl std::default::Default for RevisionConfiguration {
fn default() -> Self {
Self {
merge_when_excess_number_of_version: 100,
}
}
}
pub struct RevisionManager<Connection> {
pub object_id: String,
user_id: String,
@ -79,6 +91,7 @@ pub struct RevisionManager<Connection> {
rev_compress: Arc<dyn RevisionCompress>,
#[cfg(feature = "flowy_unit_test")]
rev_ack_notifier: tokio::sync::broadcast::Sender<i64>,
// configuration: RevisionConfiguration,
}
impl<Connection: 'static> RevisionManager<Connection> {
@ -190,6 +203,10 @@ impl<Connection: 'static> RevisionManager<Connection> {
self.rev_id_counter.value()
}
pub async fn next_sync_rev_id(&self) -> Option<i64> {
self.rev_persistence.next_sync_rev_id().await
}
pub fn next_rev_id_pair(&self) -> (i64, i64) {
let cur = self.rev_id_counter.value();
let next = self.rev_id_counter.next_id();

View File

@ -128,6 +128,10 @@ where
}
}
pub(crate) async fn next_sync_rev_id(&self) -> Option<i64> {
self.sync_seq.read().await.next_rev_id()
}
/// The cache gets reset while it conflicts with the remote revisions.
#[tracing::instrument(level = "trace", skip(self, revisions), err)]
pub(crate) async fn reset(&self, revisions: Vec<Revision>) -> FlowyResult<()> {

View File

@ -0,0 +1,123 @@
use crate::revision_test::script::{RevisionScript::*, RevisionTest};
use flowy_revision::REVISION_WRITE_INTERVAL_IN_MILLIS;
#[tokio::test]
async fn revision_sync_test() {
let test = RevisionTest::new().await;
let (base_rev_id, rev_id) = test.next_rev_id_pair();
test.run_script(AddLocalRevision {
content: "123".to_string(),
base_rev_id,
rev_id,
})
.await;
test.run_script(AssertNextSyncRevisionId { rev_id: Some(rev_id) }).await;
test.run_script(AckRevision { rev_id }).await;
test.run_script(AssertNextSyncRevisionId { rev_id: None }).await;
}
#[tokio::test]
async fn revision_sync_multiple_revisions() {
let test = RevisionTest::new().await;
let (base_rev_id, rev_id_1) = test.next_rev_id_pair();
test.run_script(AddLocalRevision {
content: "123".to_string(),
base_rev_id,
rev_id: rev_id_1,
})
.await;
let (base_rev_id, rev_id_2) = test.next_rev_id_pair();
test.run_script(AddLocalRevision {
content: "456".to_string(),
base_rev_id,
rev_id: rev_id_2,
})
.await;
test.run_scripts(vec![
AssertNextSyncRevisionId { rev_id: Some(rev_id_1) },
AckRevision { rev_id: rev_id_1 },
AssertNextSyncRevisionId { rev_id: Some(rev_id_2) },
AckRevision { rev_id: rev_id_2 },
AssertNextSyncRevisionId { rev_id: None },
])
.await;
}
#[tokio::test]
async fn revision_compress_two_revisions_test() {
let test = RevisionTest::new().await;
let (base_rev_id, rev_id_1) = test.next_rev_id_pair();
test.run_script(AddLocalRevision {
content: "123".to_string(),
base_rev_id,
rev_id: rev_id_1,
})
.await;
// rev_id_2 will be merged with rev_id_3
let (base_rev_id, rev_id_2) = test.next_rev_id_pair();
test.run_script(AddLocalRevision {
content: "456".to_string(),
base_rev_id,
rev_id: rev_id_2,
})
.await;
let (base_rev_id, rev_id_3) = test.next_rev_id_pair();
test.run_script(AddLocalRevision {
content: "789".to_string(),
base_rev_id,
rev_id: rev_id_3,
})
.await;
test.run_scripts(vec![
Wait {
milliseconds: REVISION_WRITE_INTERVAL_IN_MILLIS,
},
AssertNextSyncRevisionId { rev_id: Some(rev_id_1) },
AckRevision { rev_id: rev_id_1 },
AssertNextSyncRevisionId { rev_id: Some(rev_id_2) },
AssertNextSyncRevisionContent {
expected: "456789".to_string(),
},
])
.await;
}
#[tokio::test]
async fn revision_compress_multiple_revisions_test() {
let test = RevisionTest::new().await;
let mut expected = "".to_owned();
for i in 0..100 {
let content = format!("{}", i);
if i != 0 {
expected.push_str(&content);
}
let (base_rev_id, rev_id) = test.next_rev_id_pair();
test.run_script(AddLocalRevision {
content,
base_rev_id,
rev_id,
})
.await;
}
test.run_scripts(vec![
Wait {
milliseconds: REVISION_WRITE_INTERVAL_IN_MILLIS,
},
AssertNextSyncRevisionId { rev_id: Some(1) },
AckRevision { rev_id: 1 },
AssertNextSyncRevisionId { rev_id: Some(2) },
AssertNextSyncRevisionContent { expected },
])
.await;
}

View File

@ -1,2 +1,2 @@
mod revision_order_test;
mod local_revision_test;
mod script;

View File

@ -1,8 +0,0 @@
use crate::revision_test::script::{RevisionScript::*, RevisionTest};
#[tokio::test]
async fn test() {
let test = RevisionTest::new().await;
let scripts = vec![];
test.run_scripts(scripts).await;
}

View File

@ -5,13 +5,33 @@ use flowy_revision::{
RevisionCompress, RevisionManager, RevisionPersistence, RevisionSnapshotDiskCache, RevisionSnapshotInfo,
};
use flowy_sync::entities::revision::{Revision, RevisionRange};
use flowy_sync::util::md5;
use nanoid::nanoid;
use parking_lot::RwLock;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use std::time::Duration;
use tokio::time::interval;
pub enum RevisionScript {
AddLocalRevision(Revision),
AckRevision { rev_id: i64 },
AssertNextSyncRevisionId { rev_id: i64 },
AddLocalRevision {
content: String,
base_rev_id: i64,
rev_id: i64,
},
AckRevision {
rev_id: i64,
},
AssertNextSyncRevisionId {
rev_id: Option<i64>,
},
AssertNextSyncRevisionContent {
expected: String,
},
Wait {
milliseconds: u64,
},
AssertNextSyncRevision(Option<Revision>),
}
@ -36,9 +56,28 @@ impl RevisionTest {
self.run_script(script).await;
}
}
pub fn next_rev_id_pair(&self) -> (i64, i64) {
self.rev_manager.next_rev_id_pair()
}
pub async fn run_script(&self, script: RevisionScript) {
match script {
RevisionScript::AddLocalRevision(revision) => {
RevisionScript::AddLocalRevision {
content,
base_rev_id,
rev_id,
} => {
let object = RevisionObjectMock::new(&content);
let bytes = object.to_bytes();
let md5 = md5(&bytes);
let revision = Revision::new(
&self.rev_manager.object_id,
base_rev_id,
rev_id,
Bytes::from(bytes),
md5,
);
self.rev_manager.add_local_revision(&revision).await.unwrap();
}
RevisionScript::AckRevision { rev_id } => {
@ -46,8 +85,19 @@ impl RevisionTest {
self.rev_manager.ack_revision(rev_id).await.unwrap()
}
RevisionScript::AssertNextSyncRevisionId { rev_id } => {
assert_eq!(self.rev_manager.next_sync_rev_id().await, rev_id)
}
RevisionScript::AssertNextSyncRevisionContent { expected } => {
//
assert_eq!(self.rev_manager.rev_id(), rev_id)
let rev_id = self.rev_manager.next_sync_rev_id().await.unwrap();
let revision = self.rev_manager.get_revision(rev_id).await.unwrap();
let object = RevisionObjectMock::from_bytes(&revision.bytes);
assert_eq!(object.content, expected);
}
RevisionScript::Wait { milliseconds } => {
// let mut interval = interval(Duration::from_millis(milliseconds));
// interval.tick().await;
tokio::time::sleep(Duration::from_millis(milliseconds)).await;
}
RevisionScript::AssertNextSyncRevision(expected) => {
let next_revision = self.rev_manager.next_sync_revision().await.unwrap();
@ -57,11 +107,15 @@ impl RevisionTest {
}
}
pub struct RevisionDiskCacheMock {}
pub struct RevisionDiskCacheMock {
records: RwLock<Vec<SyncRecord>>,
}
impl RevisionDiskCacheMock {
pub fn new() -> Self {
Self {}
Self {
records: RwLock::new(vec![]),
}
}
}
@ -69,7 +123,8 @@ impl RevisionDiskCache<RevisionConnectionMock> for RevisionDiskCacheMock {
type Error = FlowyError;
fn create_revision_records(&self, revision_records: Vec<SyncRecord>) -> Result<(), Self::Error> {
todo!()
self.records.write().extend(revision_records);
Ok(())
}
fn get_connection(&self) -> Result<RevisionConnectionMock, Self::Error> {
@ -93,11 +148,36 @@ impl RevisionDiskCache<RevisionConnectionMock> for RevisionDiskCacheMock {
}
fn update_revision_record(&self, changesets: Vec<RevisionChangeset>) -> FlowyResult<()> {
todo!()
for changeset in changesets {
if let Some(record) = self
.records
.write()
.iter_mut()
.find(|record| record.revision.rev_id == *changeset.rev_id.as_ref())
{
record.state = changeset.state;
}
}
Ok(())
}
fn delete_revision_records(&self, object_id: &str, rev_ids: Option<Vec<i64>>) -> Result<(), Self::Error> {
todo!()
match rev_ids {
None => {}
Some(rev_ids) => {
for rev_id in rev_ids {
if let Some(index) = self
.records
.read()
.iter()
.position(|record| record.revision.rev_id == rev_id)
{
self.records.write().remove(index);
}
}
}
}
Ok(())
}
fn delete_and_insert_records(
@ -128,12 +208,34 @@ pub struct RevisionCompressMock {}
impl RevisionCompress for RevisionCompressMock {
fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> {
todo!()
let mut object = RevisionObjectMock::new("");
for revision in revisions {
let other = RevisionObjectMock::from_bytes(&revision.bytes);
object.compose(other);
}
Ok(Bytes::from(object.to_bytes()))
}
}
pub struct RevisionMock {}
#[derive(Serialize, Deserialize)]
pub struct RevisionObjectMock {
content: String,
}
// impl std::convert::From<RevisionMock> for Revision {
// fn from(_: RevisionMock) -> Self {}
// }
impl RevisionObjectMock {
pub fn new(s: &str) -> Self {
Self { content: s.to_owned() }
}
pub fn compose(&mut self, other: RevisionObjectMock) {
self.content.push_str(other.content.as_str());
}
pub fn to_bytes(&self) -> Vec<u8> {
serde_json::to_vec(self).unwrap()
}
pub fn from_bytes(bytes: &[u8]) -> Self {
serde_json::from_slice(bytes).unwrap()
}
}

View File

@ -151,7 +151,7 @@ impl ViewDataProcessor for DocumentViewDataProcessor {
) -> FutureResult<(), FlowyError> {
// Only accept Document type
debug_assert_eq!(layout, ViewLayoutTypePB::Document);
let revision = Revision::initial_revision(user_id, view_id, view_data);
let revision = Revision::initial_revision(view_id, view_data);
let view_id = view_id.to_string();
let manager = self.0.clone();
@ -194,7 +194,7 @@ impl ViewDataProcessor for DocumentViewDataProcessor {
let document_content = self.0.initial_document_content();
FutureResult::new(async move {
let delta_data = Bytes::from(document_content);
let revision = Revision::initial_revision(&user_id, &view_id, delta_data.clone());
let revision = Revision::initial_revision(&view_id, delta_data.clone());
let _ = manager.create_document(view_id, vec![revision]).await?;
Ok(delta_data)
})
@ -225,7 +225,7 @@ impl ViewDataProcessor for GridViewDataProcessor {
_layout: ViewLayoutTypePB,
delta_data: Bytes,
) -> FutureResult<(), FlowyError> {
let revision = Revision::initial_revision(user_id, view_id, delta_data);
let revision = Revision::initial_revision(view_id, delta_data);
let view_id = view_id.to_string();
let grid_manager = self.0.clone();
FutureResult::new(async move {

View File

@ -259,7 +259,7 @@ pub fn make_grid_block_operations(block_rev: &GridBlockRevision) -> GridBlockOpe
pub fn make_grid_block_revisions(user_id: &str, grid_block_meta_data: &GridBlockRevision) -> RepeatedRevision {
let operations = make_grid_block_operations(grid_block_meta_data);
let bytes = operations.json_bytes();
let revision = Revision::initial_revision(user_id, &grid_block_meta_data.block_id, bytes);
let revision = Revision::initial_revision(&grid_block_meta_data.block_id, bytes);
revision.into()
}

View File

@ -412,7 +412,7 @@ pub fn make_grid_operations(grid_rev: &GridRevision) -> GridOperations {
pub fn make_grid_revisions(user_id: &str, grid_rev: &GridRevision) -> RepeatedRevision {
let operations = make_grid_operations(grid_rev);
let bytes = operations.json_bytes();
let revision = Revision::initial_revision(user_id, &grid_rev.grid_id, bytes);
let revision = Revision::initial_revision(&grid_rev.grid_id, bytes);
revision.into()
}

View File

@ -1,6 +1,6 @@
use crate::util::md5;
use bytes::Bytes;
use flowy_derive::{ProtoBuf, ProtoBuf_Enum};
use flowy_derive::ProtoBuf;
use std::{convert::TryFrom, fmt::Formatter, ops::RangeInclusive};
pub type RevisionObject = lib_ot::text_delta::DeltaTextOperations;
@ -21,11 +21,6 @@ pub struct Revision {
#[pb(index = 5)]
pub object_id: String,
// #[pb(index = 6)]
// ty: RevType, // Deprecated
//
// #[pb(index = 7)]
// pub user_id: String,
}
impl std::convert::From<Vec<u8>> for Revision {
@ -36,14 +31,7 @@ impl std::convert::From<Vec<u8>> for Revision {
}
impl Revision {
pub fn new<T: Into<String>>(
object_id: &str,
base_rev_id: i64,
rev_id: i64,
bytes: Bytes,
_user_id: &str,
md5: T,
) -> Revision {
pub fn new<T: Into<String>>(object_id: &str, base_rev_id: i64, rev_id: i64, bytes: Bytes, md5: T) -> Revision {
let object_id = object_id.to_owned();
let bytes = bytes.to_vec();
let base_rev_id = base_rev_id;
@ -61,6 +49,7 @@ impl Revision {
object_id,
}
}
pub fn is_empty(&self) -> bool {
self.base_rev_id == self.rev_id
}
@ -73,9 +62,9 @@ impl Revision {
self.rev_id == 0
}
pub fn initial_revision(user_id: &str, object_id: &str, bytes: Bytes) -> Self {
pub fn initial_revision(object_id: &str, bytes: Bytes) -> Self {
let md5 = md5(&bytes);
Self::new(object_id, 0, 0, bytes, user_id, md5)
Self::new(object_id, 0, 0, bytes, md5)
}
}
@ -211,15 +200,3 @@ impl RevisionRange {
self.iter().collect::<Vec<_>>()
}
}
#[derive(Debug, ProtoBuf_Enum, Clone, Eq, PartialEq)]
pub enum RevType {
DeprecatedLocal = 0,
DeprecatedRemote = 1,
}
impl std::default::Default for RevType {
fn default() -> Self {
RevType::DeprecatedLocal
}
}