mirror of
https://github.com/AppFlowy-IO/AppFlowy.git
synced 2024-08-30 18:12:39 +00:00
chore: rename some struct
This commit is contained in:
parent
6593855f8f
commit
dd832b7482
@ -1,9 +0,0 @@
|
||||
mod editor;
|
||||
mod queue;
|
||||
mod web_socket;
|
||||
|
||||
pub use editor::*;
|
||||
pub(crate) use queue::*;
|
||||
pub(crate) use web_socket::*;
|
||||
|
||||
pub const DOCUMENT_SYNC_INTERVAL_IN_MILLIS: u64 = 1000;
|
@ -1,7 +1,8 @@
|
||||
use crate::core::DocumentRevisionCompact;
|
||||
use crate::queue::DocumentRevisionCompact;
|
||||
use crate::web_socket::{make_document_ws_manager, EditorCommandSender};
|
||||
use crate::{
|
||||
core::{make_document_ws_manager, EditorCommand, EditorCommandQueue, EditorCommandSender},
|
||||
errors::FlowyError,
|
||||
queue::{EditorCommand, EditorCommandQueue},
|
||||
DocumentUser, DocumentWSReceiver,
|
||||
};
|
||||
use bytes::Bytes;
|
||||
@ -34,11 +35,11 @@ impl ClientDocumentEditor {
|
||||
doc_id: &str,
|
||||
user: Arc<dyn DocumentUser>,
|
||||
mut rev_manager: RevisionManager,
|
||||
web_socket: Arc<dyn RevisionWebSocket>,
|
||||
server: Arc<dyn RevisionCloudService>,
|
||||
rev_web_socket: Arc<dyn RevisionWebSocket>,
|
||||
cloud_service: Arc<dyn RevisionCloudService>,
|
||||
) -> FlowyResult<Arc<Self>> {
|
||||
let document_info = rev_manager
|
||||
.load::<DocumentInfoBuilder, DocumentRevisionCompact>(server)
|
||||
.load::<DocumentInfoBuilder, DocumentRevisionCompact>(cloud_service)
|
||||
.await?;
|
||||
let delta = document_info.delta()?;
|
||||
let rev_manager = Arc::new(rev_manager);
|
||||
@ -51,7 +52,7 @@ impl ClientDocumentEditor {
|
||||
user_id.clone(),
|
||||
edit_cmd_tx.clone(),
|
||||
rev_manager.clone(),
|
||||
web_socket,
|
||||
rev_web_socket,
|
||||
)
|
||||
.await;
|
||||
let editor = Arc::new(Self {
|
@ -1,10 +1,15 @@
|
||||
pub mod controller;
|
||||
pub mod core;
|
||||
pub use controller::*;
|
||||
pub mod editor;
|
||||
pub mod manager;
|
||||
mod queue;
|
||||
mod web_socket;
|
||||
|
||||
pub use manager::*;
|
||||
pub mod errors {
|
||||
pub use flowy_error::{internal_error, ErrorCode, FlowyError};
|
||||
}
|
||||
|
||||
pub const DOCUMENT_SYNC_INTERVAL_IN_MILLIS: u64 = 1000;
|
||||
|
||||
use crate::errors::FlowyError;
|
||||
use flowy_collaboration::entities::document_info::{CreateDocParams, DocumentId, DocumentInfo, ResetDocumentParams};
|
||||
use lib_infra::future::FutureResult;
|
||||
|
@ -1,4 +1,4 @@
|
||||
use crate::{core::ClientDocumentEditor, errors::FlowyError, DocumentCloudService};
|
||||
use crate::{editor::ClientDocumentEditor, errors::FlowyError, DocumentCloudService};
|
||||
use async_trait::async_trait;
|
||||
use bytes::Bytes;
|
||||
use dashmap::DashMap;
|
||||
@ -29,31 +29,31 @@ pub(crate) trait DocumentWSReceiver: Send + Sync {
|
||||
type WebSocketDataReceivers = Arc<DashMap<String, Arc<dyn DocumentWSReceiver>>>;
|
||||
pub struct FlowyDocumentManager {
|
||||
cloud_service: Arc<dyn DocumentCloudService>,
|
||||
ws_receivers: WebSocketDataReceivers,
|
||||
web_socket: Arc<dyn RevisionWebSocket>,
|
||||
open_cache: Arc<OpenDocCache>,
|
||||
user: Arc<dyn DocumentUser>,
|
||||
ws_data_receivers: WebSocketDataReceivers,
|
||||
rev_web_socket: Arc<dyn RevisionWebSocket>,
|
||||
document_handlers: Arc<DocumentEditorHandlers>,
|
||||
document_user: Arc<dyn DocumentUser>,
|
||||
}
|
||||
|
||||
impl FlowyDocumentManager {
|
||||
pub fn new(
|
||||
cloud_service: Arc<dyn DocumentCloudService>,
|
||||
user: Arc<dyn DocumentUser>,
|
||||
web_socket: Arc<dyn RevisionWebSocket>,
|
||||
document_user: Arc<dyn DocumentUser>,
|
||||
rev_web_socket: Arc<dyn RevisionWebSocket>,
|
||||
) -> Self {
|
||||
let ws_receivers = Arc::new(DashMap::new());
|
||||
let open_cache = Arc::new(OpenDocCache::new());
|
||||
let ws_data_receivers = Arc::new(DashMap::new());
|
||||
let document_handlers = Arc::new(DocumentEditorHandlers::new());
|
||||
Self {
|
||||
cloud_service,
|
||||
ws_receivers,
|
||||
web_socket,
|
||||
open_cache,
|
||||
user,
|
||||
ws_data_receivers,
|
||||
rev_web_socket,
|
||||
document_handlers,
|
||||
document_user,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn init(&self) -> FlowyResult<()> {
|
||||
listen_ws_state_changed(self.web_socket.clone(), self.ws_receivers.clone());
|
||||
listen_ws_state_changed(self.rev_web_socket.clone(), self.ws_data_receivers.clone());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -69,8 +69,8 @@ impl FlowyDocumentManager {
|
||||
pub fn close_document<T: AsRef<str>>(&self, doc_id: T) -> Result<(), FlowyError> {
|
||||
let doc_id = doc_id.as_ref();
|
||||
tracing::Span::current().record("doc_id", &doc_id);
|
||||
self.open_cache.remove(doc_id);
|
||||
self.remove_ws_receiver(doc_id);
|
||||
self.document_handlers.remove(doc_id);
|
||||
self.ws_data_receivers.remove(doc_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -78,8 +78,8 @@ impl FlowyDocumentManager {
|
||||
pub fn delete<T: AsRef<str>>(&self, doc_id: T) -> Result<(), FlowyError> {
|
||||
let doc_id = doc_id.as_ref();
|
||||
tracing::Span::current().record("doc_id", &doc_id);
|
||||
self.open_cache.remove(doc_id);
|
||||
self.remove_ws_receiver(doc_id);
|
||||
self.document_handlers.remove(doc_id);
|
||||
self.ws_data_receivers.remove(doc_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -94,18 +94,18 @@ impl FlowyDocumentManager {
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn save_document<T: AsRef<str>>(&self, doc_id: T, revisions: RepeatedRevision) -> FlowyResult<()> {
|
||||
pub async fn receive_revisions<T: AsRef<str>>(&self, doc_id: T, revisions: RepeatedRevision) -> FlowyResult<()> {
|
||||
let doc_id = doc_id.as_ref().to_owned();
|
||||
let db_pool = self.user.db_pool()?;
|
||||
let db_pool = self.document_user.db_pool()?;
|
||||
let rev_manager = self.make_rev_manager(&doc_id, db_pool)?;
|
||||
let _ = rev_manager.reset_object(revisions).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn did_receive_ws_data(&self, data: Bytes) {
|
||||
pub async fn receive_ws_data(&self, data: Bytes) {
|
||||
let result: Result<ServerRevisionWSData, protobuf::ProtobufError> = data.try_into();
|
||||
match result {
|
||||
Ok(data) => match self.ws_receivers.get(&data.object_id) {
|
||||
Ok(data) => match self.ws_data_receivers.get(&data.object_id) {
|
||||
None => tracing::error!("Can't find any source handler for {:?}-{:?}", data.object_id, data.ty),
|
||||
Some(handler) => match handler.receive_ws_data(data).await {
|
||||
Ok(_) => {}
|
||||
@ -117,19 +117,13 @@ impl FlowyDocumentManager {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn ws_connect_state_changed(&self, state: &WSConnectState) {
|
||||
for receiver in self.ws_receivers.iter() {
|
||||
receiver.value().connect_state_changed(state.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FlowyDocumentManager {
|
||||
async fn get_editor(&self, doc_id: &str) -> FlowyResult<Arc<ClientDocumentEditor>> {
|
||||
match self.open_cache.get(doc_id) {
|
||||
match self.document_handlers.get(doc_id) {
|
||||
None => {
|
||||
let db_pool = self.user.db_pool()?;
|
||||
let db_pool = self.document_user.db_pool()?;
|
||||
self.make_editor(doc_id, db_pool).await
|
||||
}
|
||||
Some(editor) => Ok(editor),
|
||||
@ -141,35 +135,26 @@ impl FlowyDocumentManager {
|
||||
doc_id: &str,
|
||||
pool: Arc<ConnectionPool>,
|
||||
) -> Result<Arc<ClientDocumentEditor>, FlowyError> {
|
||||
let user = self.user.clone();
|
||||
let token = self.user.token()?;
|
||||
let user = self.document_user.clone();
|
||||
let token = self.document_user.token()?;
|
||||
let rev_manager = self.make_rev_manager(doc_id, pool.clone())?;
|
||||
let server = Arc::new(DocumentRevisionCloudServiceImpl {
|
||||
let cloud_service = Arc::new(DocumentRevisionCloudServiceImpl {
|
||||
token,
|
||||
server: self.cloud_service.clone(),
|
||||
});
|
||||
let doc_editor = ClientDocumentEditor::new(doc_id, user, rev_manager, self.web_socket.clone(), server).await?;
|
||||
self.add_ws_receiver(doc_id, doc_editor.ws_handler());
|
||||
self.open_cache.insert(doc_id, &doc_editor);
|
||||
let doc_editor =
|
||||
ClientDocumentEditor::new(doc_id, user, rev_manager, self.rev_web_socket.clone(), cloud_service).await?;
|
||||
self.ws_data_receivers
|
||||
.insert(doc_id.to_string(), doc_editor.ws_handler());
|
||||
self.document_handlers.insert(doc_id, &doc_editor);
|
||||
Ok(doc_editor)
|
||||
}
|
||||
|
||||
fn make_rev_manager(&self, doc_id: &str, pool: Arc<ConnectionPool>) -> Result<RevisionManager, FlowyError> {
|
||||
let user_id = self.user.user_id()?;
|
||||
let user_id = self.document_user.user_id()?;
|
||||
let cache = Arc::new(RevisionCache::new(&user_id, doc_id, pool));
|
||||
Ok(RevisionManager::new(&user_id, doc_id, cache))
|
||||
}
|
||||
|
||||
fn add_ws_receiver(&self, object_id: &str, receiver: Arc<dyn DocumentWSReceiver>) {
|
||||
if self.ws_receivers.contains_key(object_id) {
|
||||
log::error!("Duplicate handler registered for {:?}", object_id);
|
||||
}
|
||||
self.ws_receivers.insert(object_id.to_string(), receiver);
|
||||
}
|
||||
|
||||
fn remove_ws_receiver(&self, id: &str) {
|
||||
self.ws_receivers.remove(id);
|
||||
}
|
||||
}
|
||||
|
||||
struct DocumentRevisionCloudServiceImpl {
|
||||
@ -202,11 +187,11 @@ impl RevisionCloudService for DocumentRevisionCloudServiceImpl {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct OpenDocCache {
|
||||
pub struct DocumentEditorHandlers {
|
||||
inner: DashMap<String, Arc<ClientDocumentEditor>>,
|
||||
}
|
||||
|
||||
impl OpenDocCache {
|
||||
impl DocumentEditorHandlers {
|
||||
fn new() -> Self {
|
||||
Self { inner: DashMap::new() }
|
||||
}
|
@ -1,4 +1,5 @@
|
||||
use crate::{core::web_socket::EditorCommandReceiver, DocumentUser};
|
||||
use crate::web_socket::EditorCommandReceiver;
|
||||
use crate::DocumentUser;
|
||||
use async_stream::stream;
|
||||
use flowy_collaboration::util::make_delta_from_revisions;
|
||||
use flowy_collaboration::{
|
@ -1,7 +1,4 @@
|
||||
use crate::{
|
||||
core::{EditorCommand, DOCUMENT_SYNC_INTERVAL_IN_MILLIS},
|
||||
DocumentWSReceiver,
|
||||
};
|
||||
use crate::{queue::EditorCommand, DocumentWSReceiver, DOCUMENT_SYNC_INTERVAL_IN_MILLIS};
|
||||
use async_trait::async_trait;
|
||||
use bytes::Bytes;
|
||||
use flowy_collaboration::{
|
||||
@ -31,7 +28,7 @@ pub(crate) async fn make_document_ws_manager(
|
||||
user_id: String,
|
||||
edit_cmd_tx: EditorCommandSender,
|
||||
rev_manager: Arc<RevisionManager>,
|
||||
web_socket: Arc<dyn RevisionWebSocket>,
|
||||
rev_web_socket: Arc<dyn RevisionWebSocket>,
|
||||
) -> Arc<RevisionWebSocketManager> {
|
||||
let composite_sink_provider = Arc::new(CompositeWSSinkDataProvider::new(&doc_id, rev_manager.clone()));
|
||||
let resolve_target = Arc::new(DocumentRevisionResolveTarget { edit_cmd_tx });
|
||||
@ -50,7 +47,7 @@ pub(crate) async fn make_document_ws_manager(
|
||||
let ws_manager = Arc::new(RevisionWebSocketManager::new(
|
||||
"Document",
|
||||
&doc_id,
|
||||
web_socket,
|
||||
rev_web_socket,
|
||||
sink_provider,
|
||||
ws_stream_consumer,
|
||||
ping_duration,
|
@ -1,5 +1,6 @@
|
||||
use flowy_collaboration::entities::revision::RevisionState;
|
||||
use flowy_document::core::{ClientDocumentEditor, DOCUMENT_SYNC_INTERVAL_IN_MILLIS};
|
||||
use flowy_document::editor::ClientDocumentEditor;
|
||||
use flowy_document::DOCUMENT_SYNC_INTERVAL_IN_MILLIS;
|
||||
use flowy_test::{helper::ViewTest, FlowySDKTest};
|
||||
use lib_ot::{core::Interval, rich_text::RichTextDelta};
|
||||
use std::sync::Arc;
|
||||
@ -67,8 +68,8 @@ impl EditorTest {
|
||||
return;
|
||||
}
|
||||
let next_revision = next_revision.unwrap();
|
||||
let mut receiver = rev_manager.revision_ack_receiver();
|
||||
let _ = receiver.recv().await;
|
||||
let mut notify = rev_manager.ack_notify();
|
||||
let _ = notify.recv().await;
|
||||
assert_eq!(next_revision.rev_id, rev_id.unwrap());
|
||||
}
|
||||
EditorScript::AssertJson(expected) => {
|
||||
|
@ -73,7 +73,7 @@ impl ViewController {
|
||||
Revision::initial_revision(&user_id, ¶ms.view_id, delta_data).into();
|
||||
let _ = self
|
||||
.document_manager
|
||||
.save_document(¶ms.view_id, repeated_revision)
|
||||
.receive_revisions(¶ms.view_id, repeated_revision)
|
||||
.await?;
|
||||
let view = self.create_view_on_server(params).await?;
|
||||
let _ = self.create_view_on_local(view.clone()).await?;
|
||||
@ -94,7 +94,10 @@ impl ViewController {
|
||||
let delta_data = Bytes::from(view_data);
|
||||
let user_id = self.user.user_id()?;
|
||||
let repeated_revision: RepeatedRevision = Revision::initial_revision(&user_id, view_id, delta_data).into();
|
||||
let _ = self.document_manager.save_document(view_id, repeated_revision).await?;
|
||||
let _ = self
|
||||
.document_manager
|
||||
.receive_revisions(view_id, repeated_revision)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -203,8 +203,8 @@ impl FolderTest {
|
||||
}
|
||||
let next_revision = next_revision
|
||||
.unwrap_or_else(|| panic!("Expected Next revision is {}, but receive None", rev_id.unwrap()));
|
||||
let mut receiver = rev_manager.revision_ack_receiver();
|
||||
let _ = receiver.recv().await;
|
||||
let mut notify = rev_manager.ack_notify();
|
||||
let _ = notify.recv().await;
|
||||
assert_eq!(next_revision.rev_id, rev_id.unwrap());
|
||||
}
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ impl WSMessageReceiver for DocumentWSMessageReceiverImpl {
|
||||
fn receive_message(&self, msg: WebSocketRawMessage) {
|
||||
let handler = self.0.clone();
|
||||
tokio::spawn(async move {
|
||||
handler.did_receive_ws_data(Bytes::from(msg.data)).await;
|
||||
handler.receive_ws_data(Bytes::from(msg.data)).await;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -26,20 +26,16 @@ pub struct RevisionManager {
|
||||
pub object_id: String,
|
||||
user_id: String,
|
||||
rev_id_counter: RevIdCounter,
|
||||
cache: Arc<RwLock<RevisionCacheCompact>>,
|
||||
rev_compressor: Arc<RwLock<RevisionCompressor>>,
|
||||
|
||||
#[cfg(feature = "flowy_unit_test")]
|
||||
revision_ack_notifier: tokio::sync::broadcast::Sender<i64>,
|
||||
rev_ack_notifier: tokio::sync::broadcast::Sender<i64>,
|
||||
}
|
||||
|
||||
impl RevisionManager {
|
||||
pub fn new(user_id: &str, object_id: &str, revision_cache: Arc<RevisionCache>) -> Self {
|
||||
let rev_id_counter = RevIdCounter::new(0);
|
||||
let cache = Arc::new(RwLock::new(RevisionCacheCompact::new(
|
||||
object_id,
|
||||
user_id,
|
||||
revision_cache,
|
||||
)));
|
||||
let rev_compressor = Arc::new(RwLock::new(RevisionCompressor::new(object_id, user_id, revision_cache)));
|
||||
#[cfg(feature = "flowy_unit_test")]
|
||||
let (revision_ack_notifier, _) = tokio::sync::broadcast::channel(1);
|
||||
|
||||
@ -47,10 +43,10 @@ impl RevisionManager {
|
||||
object_id: object_id.to_string(),
|
||||
user_id: user_id.to_owned(),
|
||||
rev_id_counter,
|
||||
cache,
|
||||
rev_compressor,
|
||||
|
||||
#[cfg(feature = "flowy_unit_test")]
|
||||
revision_ack_notifier,
|
||||
rev_ack_notifier: revision_ack_notifier,
|
||||
}
|
||||
}
|
||||
|
||||
@ -63,7 +59,7 @@ impl RevisionManager {
|
||||
object_id: self.object_id.clone(),
|
||||
user_id: self.user_id.clone(),
|
||||
cloud,
|
||||
cache: self.cache.clone(),
|
||||
rev_compressor: self.rev_compressor.clone(),
|
||||
}
|
||||
.load::<C>()
|
||||
.await?;
|
||||
@ -75,7 +71,7 @@ impl RevisionManager {
|
||||
pub async fn reset_object(&self, revisions: RepeatedRevision) -> FlowyResult<()> {
|
||||
let rev_id = pair_rev_id_from_revisions(&revisions).1;
|
||||
|
||||
let write_guard = self.cache.write().await;
|
||||
let write_guard = self.rev_compressor.write().await;
|
||||
let _ = write_guard.reset(revisions.into_inner()).await?;
|
||||
self.rev_id_counter.set(rev_id);
|
||||
Ok(())
|
||||
@ -87,7 +83,7 @@ impl RevisionManager {
|
||||
return Err(FlowyError::internal().context("Delta data should be empty"));
|
||||
}
|
||||
|
||||
let write_guard = self.cache.write().await;
|
||||
let write_guard = self.rev_compressor.write().await;
|
||||
let _ = write_guard.add_ack_revision(revision).await?;
|
||||
self.rev_id_counter.set(revision.rev_id);
|
||||
Ok(())
|
||||
@ -101,7 +97,7 @@ impl RevisionManager {
|
||||
if revision.delta_data.is_empty() {
|
||||
return Err(FlowyError::internal().context("Delta data should be empty"));
|
||||
}
|
||||
let mut write_guard = self.cache.write().await;
|
||||
let mut write_guard = self.rev_compressor.write().await;
|
||||
let rev_id = write_guard.write_sync_revision::<C>(revision).await?;
|
||||
|
||||
self.rev_id_counter.set(rev_id);
|
||||
@ -110,9 +106,9 @@ impl RevisionManager {
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self), err)]
|
||||
pub async fn ack_revision(&self, rev_id: i64) -> Result<(), FlowyError> {
|
||||
if self.cache.write().await.ack_revision(rev_id).await.is_ok() {
|
||||
if self.rev_compressor.write().await.ack_revision(rev_id).await.is_ok() {
|
||||
#[cfg(feature = "flowy_unit_test")]
|
||||
let _ = self.revision_ack_notifier.send(rev_id);
|
||||
let _ = self.rev_ack_notifier.send(rev_id);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -128,37 +124,42 @@ impl RevisionManager {
|
||||
}
|
||||
|
||||
pub async fn get_revisions_in_range(&self, range: RevisionRange) -> Result<Vec<Revision>, FlowyError> {
|
||||
let revisions = self.cache.read().await.revisions_in_range(&range).await?;
|
||||
let revisions = self.rev_compressor.read().await.revisions_in_range(&range).await?;
|
||||
Ok(revisions)
|
||||
}
|
||||
|
||||
pub async fn next_sync_revision(&self) -> FlowyResult<Option<Revision>> {
|
||||
Ok(self.cache.read().await.next_sync_revision().await?)
|
||||
Ok(self.rev_compressor.read().await.next_sync_revision().await?)
|
||||
}
|
||||
|
||||
pub async fn get_revision(&self, rev_id: i64) -> Option<Revision> {
|
||||
self.cache.read().await.get(rev_id).await.map(|record| record.revision)
|
||||
self.rev_compressor
|
||||
.read()
|
||||
.await
|
||||
.get(rev_id)
|
||||
.await
|
||||
.map(|record| record.revision)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "flowy_unit_test")]
|
||||
impl RevisionManager {
|
||||
pub async fn revision_cache(&self) -> Arc<RevisionCache> {
|
||||
self.cache.read().await.inner.clone()
|
||||
self.rev_compressor.read().await.inner.clone()
|
||||
}
|
||||
pub fn revision_ack_receiver(&self) -> tokio::sync::broadcast::Receiver<i64> {
|
||||
self.revision_ack_notifier.subscribe()
|
||||
pub fn ack_notify(&self) -> tokio::sync::broadcast::Receiver<i64> {
|
||||
self.rev_ack_notifier.subscribe()
|
||||
}
|
||||
}
|
||||
|
||||
struct RevisionCacheCompact {
|
||||
struct RevisionCompressor {
|
||||
object_id: String,
|
||||
user_id: String,
|
||||
inner: Arc<RevisionCache>,
|
||||
sync_seq: RevisionSyncSequence,
|
||||
}
|
||||
|
||||
impl RevisionCacheCompact {
|
||||
impl RevisionCompressor {
|
||||
fn new(object_id: &str, user_id: &str, inner: Arc<RevisionCache>) -> Self {
|
||||
let sync_seq = RevisionSyncSequence::new();
|
||||
let object_id = object_id.to_owned();
|
||||
@ -251,7 +252,7 @@ impl RevisionCacheCompact {
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::Deref for RevisionCacheCompact {
|
||||
impl std::ops::Deref for RevisionCompressor {
|
||||
type Target = Arc<RevisionCache>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
@ -323,7 +324,7 @@ struct RevisionLoader {
|
||||
object_id: String,
|
||||
user_id: String,
|
||||
cloud: Arc<dyn RevisionCloudService>,
|
||||
cache: Arc<RwLock<RevisionCacheCompact>>,
|
||||
rev_compressor: Arc<RwLock<RevisionCompressor>>,
|
||||
}
|
||||
|
||||
impl RevisionLoader {
|
||||
@ -331,14 +332,14 @@ impl RevisionLoader {
|
||||
where
|
||||
C: RevisionCompact,
|
||||
{
|
||||
let records = self.cache.read().await.batch_get(&self.object_id)?;
|
||||
let records = self.rev_compressor.read().await.batch_get(&self.object_id)?;
|
||||
let revisions: Vec<Revision>;
|
||||
let mut rev_id = 0;
|
||||
if records.is_empty() {
|
||||
let remote_revisions = self.cloud.fetch_object(&self.user_id, &self.object_id).await?;
|
||||
for revision in &remote_revisions {
|
||||
rev_id = revision.rev_id;
|
||||
let _ = self.cache.read().await.add_ack_revision(revision).await?;
|
||||
let _ = self.rev_compressor.read().await.add_ack_revision(revision).await?;
|
||||
}
|
||||
revisions = remote_revisions;
|
||||
} else {
|
||||
@ -346,7 +347,12 @@ impl RevisionLoader {
|
||||
rev_id = record.revision.rev_id;
|
||||
if record.state == RevisionState::Sync {
|
||||
// Sync the records if their state is RevisionState::Sync.
|
||||
let _ = self.cache.write().await.add_sync_revision(&record.revision).await?;
|
||||
let _ = self
|
||||
.rev_compressor
|
||||
.write()
|
||||
.await
|
||||
.add_sync_revision(&record.revision)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
revisions = records.into_iter().map(|record| record.revision).collect::<_>();
|
||||
|
@ -44,7 +44,7 @@ pub struct RevisionWebSocketManager {
|
||||
pub object_id: String,
|
||||
sink_provider: Arc<dyn RevisionWSSinkDataProvider>,
|
||||
stream_consumer: Arc<dyn RevisionWSSteamConsumer>,
|
||||
web_socket: Arc<dyn RevisionWebSocket>,
|
||||
rev_web_socket: Arc<dyn RevisionWebSocket>,
|
||||
pub ws_passthrough_tx: Sender<ServerRevisionWSData>,
|
||||
ws_passthrough_rx: Option<Receiver<ServerRevisionWSData>>,
|
||||
pub state_passthrough_tx: broadcast::Sender<WSConnectState>,
|
||||
@ -60,7 +60,7 @@ impl RevisionWebSocketManager {
|
||||
pub fn new(
|
||||
object_name: &str,
|
||||
object_id: &str,
|
||||
web_socket: Arc<dyn RevisionWebSocket>,
|
||||
rev_web_socket: Arc<dyn RevisionWebSocket>,
|
||||
sink_provider: Arc<dyn RevisionWSSinkDataProvider>,
|
||||
stream_consumer: Arc<dyn RevisionWSSteamConsumer>,
|
||||
ping_duration: Duration,
|
||||
@ -75,7 +75,7 @@ impl RevisionWebSocketManager {
|
||||
object_name,
|
||||
sink_provider,
|
||||
stream_consumer,
|
||||
web_socket,
|
||||
rev_web_socket,
|
||||
ws_passthrough_tx,
|
||||
ws_passthrough_rx: Some(ws_passthrough_rx),
|
||||
state_passthrough_tx,
|
||||
@ -91,7 +91,7 @@ impl RevisionWebSocketManager {
|
||||
&self.object_id,
|
||||
&self.object_name,
|
||||
self.sink_provider.clone(),
|
||||
self.web_socket.clone(),
|
||||
self.rev_web_socket.clone(),
|
||||
self.stop_sync_tx.subscribe(),
|
||||
ping_duration,
|
||||
);
|
||||
|
@ -28,7 +28,7 @@ impl CrateConfig {
|
||||
.flowy_config
|
||||
.proto_crates
|
||||
.iter()
|
||||
.map(|name| path_buf_with_component(&self.crate_path, vec![&name]))
|
||||
.map(|name| path_buf_with_component(&self.crate_path, vec![name]))
|
||||
.collect::<Vec<PathBuf>>();
|
||||
proto_paths
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ pub fn parse_crate_protobuf(crate_paths: Vec<String>) -> Vec<ProtobufCrateContex
|
||||
.collect::<Vec<ProtobufCrateContext>>()
|
||||
}
|
||||
|
||||
fn parse_files_protobuf(proto_crate_path: &PathBuf, proto_output_dir: &PathBuf) -> Vec<ProtoFile> {
|
||||
fn parse_files_protobuf(proto_crate_path: &Path, proto_output_dir: &Path) -> Vec<ProtoFile> {
|
||||
let mut gen_proto_vec: Vec<ProtoFile> = vec![];
|
||||
// file_stem https://doc.rust-lang.org/std/path/struct.Path.html#method.file_stem
|
||||
for (path, file_name) in WalkDir::new(proto_crate_path)
|
||||
@ -54,7 +54,7 @@ fn parse_files_protobuf(proto_crate_path: &PathBuf, proto_output_dir: &PathBuf)
|
||||
.unwrap_or_else(|_| panic!("Unable to parse file at {}", path));
|
||||
let structs = get_ast_structs(&ast);
|
||||
let proto_file = format!("{}.proto", &file_name);
|
||||
let proto_file_path = path_string_with_component(&proto_output_dir, vec![&proto_file]);
|
||||
let proto_file_path = path_string_with_component(proto_output_dir, vec![&proto_file]);
|
||||
let mut proto_file_content = parse_or_init_proto_file(proto_file_path.as_ref());
|
||||
|
||||
structs.iter().for_each(|s| {
|
||||
|
@ -12,7 +12,7 @@ pub use proto_gen::*;
|
||||
pub use proto_info::*;
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use std::path::PathBuf;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command;
|
||||
use walkdir::WalkDir;
|
||||
|
||||
@ -56,7 +56,7 @@ pub fn gen(crate_name: &str, proto_file_dir: &str) {
|
||||
generate_rust_protobuf_files(&protoc_bin_path, &proto_file_paths, proto_file_dir);
|
||||
}
|
||||
|
||||
fn generate_rust_protobuf_files(protoc_bin_path: &PathBuf, proto_file_paths: &Vec<String>, proto_file_dir: &str) {
|
||||
fn generate_rust_protobuf_files(protoc_bin_path: &Path, proto_file_paths: &[String], proto_file_dir: &str) {
|
||||
protoc_rust::Codegen::new()
|
||||
.out_dir("./src/protobuf/model")
|
||||
.protoc_path(protoc_bin_path)
|
||||
|
@ -94,18 +94,18 @@ pub fn is_hidden(entry: &walkdir::DirEntry) -> bool {
|
||||
entry.file_name().to_str().map(|s| s.starts_with('.')).unwrap_or(false)
|
||||
}
|
||||
|
||||
pub fn create_dir_if_not_exist(dir: &PathBuf) {
|
||||
if !dir.as_path().exists() {
|
||||
pub fn create_dir_if_not_exist(dir: &Path) {
|
||||
if !dir.exists() {
|
||||
std::fs::create_dir_all(dir).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn path_string_with_component(path: &PathBuf, components: Vec<&str>) -> String {
|
||||
pub fn path_string_with_component(path: &Path, components: Vec<&str>) -> String {
|
||||
path_buf_with_component(path, components).to_str().unwrap().to_string()
|
||||
}
|
||||
|
||||
pub fn path_buf_with_component(path: &PathBuf, components: Vec<&str>) -> PathBuf {
|
||||
let mut path_buf = path.clone();
|
||||
pub fn path_buf_with_component(path: &Path, components: Vec<&str>) -> PathBuf {
|
||||
let mut path_buf = path.to_path_buf();
|
||||
for component in components {
|
||||
path_buf.push(component);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user