generic cloud storage

This commit is contained in:
appflowy
2022-01-11 22:23:19 +08:00
parent e7aad4045b
commit 4bdd9df54c
30 changed files with 879 additions and 918 deletions

View File

@ -2,7 +2,13 @@ use crate::{
context::DocumentUser,
core::{
web_socket::{make_document_ws_manager, DocumentWebSocketManager},
*,
DocumentMD5,
DocumentRevisionManager,
DocumentWSReceiver,
DocumentWebSocket,
EditorCommand,
EditorCommandQueue,
RevisionServer,
},
errors::FlowyError,
};
@ -20,7 +26,7 @@ pub struct ClientDocumentEditor {
pub doc_id: String,
#[allow(dead_code)]
rev_manager: Arc<DocumentRevisionManager>,
ws_manager: Arc<dyn DocumentWebSocketManager>,
ws_manager: Arc<DocumentWebSocketManager>,
edit_queue: UnboundedSender<EditorCommand>,
}
@ -153,7 +159,7 @@ impl ClientDocumentEditor {
#[tracing::instrument(level = "debug", skip(self))]
pub fn stop(&self) { self.ws_manager.stop(); }
pub(crate) fn ws_handler(&self) -> Arc<dyn DocumentWSReceiver> { self.ws_manager.receiver() }
pub(crate) fn ws_handler(&self) -> Arc<dyn DocumentWSReceiver> { self.ws_manager.clone() }
}
fn spawn_edit_queue(

View File

@ -1,288 +0,0 @@
use crate::{
core::{web_socket::ws_manager::DocumentWebSocketManager, SYNC_INTERVAL_IN_MILLIS},
ws_receivers::{DocumentWSReceiver, DocumentWebSocket},
};
use async_stream::stream;
use bytes::Bytes;
use flowy_collaboration::entities::{
revision::{RevId, RevisionRange},
ws::{DocumentClientWSData, DocumentServerWSData, DocumentServerWSDataType, NewDocumentUser},
};
use flowy_error::{internal_error, FlowyError, FlowyResult};
use futures::stream::StreamExt;
use lib_infra::future::FutureResult;
use lib_ws::WSConnectState;
use std::{convert::TryFrom, sync::Arc};
use tokio::{
sync::{
broadcast,
mpsc,
mpsc::{UnboundedReceiver, UnboundedSender},
},
task::spawn_blocking,
time::{interval, Duration},
};
pub(crate) struct HttpWebSocketManager {
doc_id: String,
data_provider: Arc<dyn DocumentWSSinkDataProvider>,
stream_consumer: Arc<dyn DocumentWSSteamConsumer>,
ws_conn: Arc<dyn DocumentWebSocket>,
ws_msg_tx: UnboundedSender<DocumentServerWSData>,
ws_msg_rx: Option<UnboundedReceiver<DocumentServerWSData>>,
stop_sync_tx: SinkStopTx,
state: broadcast::Sender<WSConnectState>,
}
impl HttpWebSocketManager {
pub(crate) fn new(
doc_id: &str,
ws_conn: Arc<dyn DocumentWebSocket>,
data_provider: Arc<dyn DocumentWSSinkDataProvider>,
stream_consumer: Arc<dyn DocumentWSSteamConsumer>,
) -> Self {
let (ws_msg_tx, ws_msg_rx) = mpsc::unbounded_channel();
let (stop_sync_tx, _) = tokio::sync::broadcast::channel(2);
let doc_id = doc_id.to_string();
let (state, _) = broadcast::channel(2);
let mut manager = HttpWebSocketManager {
doc_id,
data_provider,
stream_consumer,
ws_conn,
ws_msg_tx,
ws_msg_rx: Some(ws_msg_rx),
stop_sync_tx,
state,
};
manager.run();
manager
}
fn run(&mut self) {
let ws_msg_rx = self.ws_msg_rx.take().expect("Only take once");
let sink = DocumentWSSink::new(
&self.doc_id,
self.data_provider.clone(),
self.ws_conn.clone(),
self.stop_sync_tx.subscribe(),
);
let stream = DocumentWSStream::new(
&self.doc_id,
self.stream_consumer.clone(),
ws_msg_rx,
self.stop_sync_tx.subscribe(),
);
tokio::spawn(sink.run());
tokio::spawn(stream.run());
}
pub fn scribe_state(&self) -> broadcast::Receiver<WSConnectState> { self.state.subscribe() }
}
impl DocumentWebSocketManager for Arc<HttpWebSocketManager> {
fn stop(&self) {
if self.stop_sync_tx.send(()).is_ok() {
tracing::debug!("{} stop sync", self.doc_id)
}
}
fn receiver(&self) -> Arc<dyn DocumentWSReceiver> { self.clone() }
}
impl DocumentWSReceiver for HttpWebSocketManager {
fn receive_ws_data(&self, doc_data: DocumentServerWSData) {
match self.ws_msg_tx.send(doc_data) {
Ok(_) => {},
Err(e) => tracing::error!("❌ Propagate ws message failed. {}", e),
}
}
fn connect_state_changed(&self, state: &WSConnectState) {
match self.state.send(state.clone()) {
Ok(_) => {},
Err(e) => tracing::error!("{}", e),
}
}
}
impl std::ops::Drop for HttpWebSocketManager {
fn drop(&mut self) { tracing::debug!("{} HttpWebSocketManager was drop", self.doc_id) }
}
pub trait DocumentWSSteamConsumer: Send + Sync {
fn receive_push_revision(&self, bytes: Bytes) -> FutureResult<(), FlowyError>;
fn receive_ack(&self, id: String, ty: DocumentServerWSDataType) -> FutureResult<(), FlowyError>;
fn receive_new_user_connect(&self, new_user: NewDocumentUser) -> FutureResult<(), FlowyError>;
fn pull_revisions_in_range(&self, range: RevisionRange) -> FutureResult<(), FlowyError>;
}
pub struct DocumentWSStream {
doc_id: String,
consumer: Arc<dyn DocumentWSSteamConsumer>,
ws_msg_rx: Option<mpsc::UnboundedReceiver<DocumentServerWSData>>,
stop_rx: Option<SinkStopRx>,
}
impl DocumentWSStream {
pub fn new(
doc_id: &str,
consumer: Arc<dyn DocumentWSSteamConsumer>,
ws_msg_rx: mpsc::UnboundedReceiver<DocumentServerWSData>,
stop_rx: SinkStopRx,
) -> Self {
DocumentWSStream {
doc_id: doc_id.to_owned(),
consumer,
ws_msg_rx: Some(ws_msg_rx),
stop_rx: Some(stop_rx),
}
}
pub async fn run(mut self) {
let mut receiver = self.ws_msg_rx.take().expect("Only take once");
let mut stop_rx = self.stop_rx.take().expect("Only take once");
let doc_id = self.doc_id.clone();
let stream = stream! {
loop {
tokio::select! {
result = receiver.recv() => {
match result {
Some(msg) => {
yield msg
},
None => {
tracing::debug!("[DocumentStream:{}] loop exit", doc_id);
break;
},
}
},
_ = stop_rx.recv() => {
tracing::debug!("[DocumentStream:{}] loop exit", doc_id);
break
},
};
}
};
stream
.for_each(|msg| async {
match self.handle_message(msg).await {
Ok(_) => {},
Err(e) => log::error!("[DocumentStream:{}] error: {}", self.doc_id, e),
}
})
.await;
}
async fn handle_message(&self, msg: DocumentServerWSData) -> FlowyResult<()> {
let DocumentServerWSData { doc_id: _, ty, data } = msg;
let bytes = spawn_blocking(move || Bytes::from(data))
.await
.map_err(internal_error)?;
tracing::trace!("[DocumentStream]: new message: {:?}", ty);
match ty {
DocumentServerWSDataType::ServerPushRev => {
let _ = self.consumer.receive_push_revision(bytes).await?;
},
DocumentServerWSDataType::ServerPullRev => {
let range = RevisionRange::try_from(bytes)?;
let _ = self.consumer.pull_revisions_in_range(range).await?;
},
DocumentServerWSDataType::ServerAck => {
let rev_id = RevId::try_from(bytes).unwrap().value;
let _ = self.consumer.receive_ack(rev_id.to_string(), ty).await;
},
DocumentServerWSDataType::UserConnect => {
let new_user = NewDocumentUser::try_from(bytes)?;
let _ = self.consumer.receive_new_user_connect(new_user).await;
// Notify the user that someone has connected to this document
},
}
Ok(())
}
}
pub type Tick = ();
pub type SinkStopRx = broadcast::Receiver<()>;
pub type SinkStopTx = broadcast::Sender<()>;
pub trait DocumentWSSinkDataProvider: Send + Sync {
fn next(&self) -> FutureResult<Option<DocumentClientWSData>, FlowyError>;
}
pub struct DocumentWSSink {
provider: Arc<dyn DocumentWSSinkDataProvider>,
ws_sender: Arc<dyn DocumentWebSocket>,
stop_rx: Option<SinkStopRx>,
doc_id: String,
}
impl DocumentWSSink {
pub fn new(
doc_id: &str,
provider: Arc<dyn DocumentWSSinkDataProvider>,
ws_sender: Arc<dyn DocumentWebSocket>,
stop_rx: SinkStopRx,
) -> Self {
Self {
provider,
ws_sender,
stop_rx: Some(stop_rx),
doc_id: doc_id.to_owned(),
}
}
pub async fn run(mut self) {
let (tx, mut rx) = mpsc::unbounded_channel();
let mut stop_rx = self.stop_rx.take().expect("Only take once");
let doc_id = self.doc_id.clone();
tokio::spawn(tick(tx));
let stream = stream! {
loop {
tokio::select! {
result = rx.recv() => {
match result {
Some(msg) => yield msg,
None => break,
}
},
_ = stop_rx.recv() => {
tracing::debug!("[DocumentSink:{}] loop exit", doc_id);
break
},
};
}
};
stream
.for_each(|_| async {
match self.send_next_revision().await {
Ok(_) => {},
Err(e) => log::error!("[DocumentSink]: Send failed, {:?}", e),
}
})
.await;
}
async fn send_next_revision(&self) -> FlowyResult<()> {
match self.provider.next().await? {
None => {
tracing::trace!("Finish synchronizing revisions");
Ok(())
},
Some(data) => {
tracing::trace!("[DocumentSink]: send: {}:{}-{:?}", data.doc_id, data.id(), data.ty);
self.ws_sender.send(data)
// let _ = tokio::time::timeout(Duration::from_millis(2000),
},
}
}
}
async fn tick(sender: mpsc::UnboundedSender<Tick>) {
let mut interval = interval(Duration::from_millis(SYNC_INTERVAL_IN_MILLIS));
while sender.send(()).is_ok() {
interval.tick().await;
}
}

View File

@ -1,18 +0,0 @@
use crate::core::{web_socket::DocumentWebSocketManager, DocumentWSReceiver};
use flowy_collaboration::entities::ws::DocumentServerWSData;
use lib_ws::WSConnectState;
use std::sync::Arc;
pub(crate) struct LocalWebSocketManager {}
impl DocumentWebSocketManager for Arc<LocalWebSocketManager> {
fn stop(&self) {}
fn receiver(&self) -> Arc<dyn DocumentWSReceiver> { self.clone() }
}
impl DocumentWSReceiver for LocalWebSocketManager {
fn receive_ws_data(&self, _doc_data: DocumentServerWSData) {}
fn connect_state_changed(&self, _state: &WSConnectState) {}
}

View File

@ -1,7 +1,283 @@
#![allow(clippy::module_inception)]
mod http_ws_impl;
mod local_ws_impl;
mod ws_manager;
pub use ws_manager::*;
pub(crate) use http_ws_impl::*;
pub(crate) use ws_manager::*;
use crate::core::{
web_socket::{DocumentWSSinkDataProvider, DocumentWSSteamConsumer},
DocumentRevisionManager,
DocumentWSReceiver,
DocumentWebSocket,
EditorCommand,
TransformDeltas,
};
use bytes::Bytes;
use flowy_collaboration::{
entities::{
revision::{RepeatedRevision, Revision, RevisionRange},
ws::{DocumentClientWSData, DocumentServerWSDataType, NewDocumentUser},
},
errors::CollaborateResult,
};
use flowy_error::{internal_error, FlowyError, FlowyResult};
use lib_infra::future::FutureResult;
use lib_ws::WSConnectState;
use std::{collections::VecDeque, convert::TryFrom, sync::Arc};
use tokio::sync::{broadcast, mpsc::UnboundedSender, oneshot, RwLock};
pub(crate) async fn make_document_ws_manager(
doc_id: String,
user_id: String,
edit_cmd_tx: UnboundedSender<EditorCommand>,
rev_manager: Arc<DocumentRevisionManager>,
ws_conn: Arc<dyn DocumentWebSocket>,
) -> Arc<DocumentWebSocketManager> {
let shared_sink = Arc::new(SharedWSSinkDataProvider::new(rev_manager.clone()));
let ws_stream_consumer = Arc::new(DocumentWebSocketSteamConsumerAdapter {
doc_id: doc_id.clone(),
edit_cmd_tx,
rev_manager: rev_manager.clone(),
shared_sink: shared_sink.clone(),
});
let data_provider = Arc::new(DocumentWSSinkDataProviderAdapter(shared_sink));
let ws_manager = Arc::new(DocumentWebSocketManager::new(
&doc_id,
ws_conn,
data_provider,
ws_stream_consumer,
));
listen_document_ws_state(&user_id, &doc_id, ws_manager.scribe_state(), rev_manager);
ws_manager
}
fn listen_document_ws_state(
_user_id: &str,
_doc_id: &str,
mut subscriber: broadcast::Receiver<WSConnectState>,
_rev_manager: Arc<DocumentRevisionManager>,
) {
tokio::spawn(async move {
while let Ok(state) = subscriber.recv().await {
match state {
WSConnectState::Init => {},
WSConnectState::Connecting => {},
WSConnectState::Connected => {},
WSConnectState::Disconnected => {},
}
}
});
}
pub(crate) struct DocumentWebSocketSteamConsumerAdapter {
pub(crate) doc_id: String,
pub(crate) edit_cmd_tx: UnboundedSender<EditorCommand>,
pub(crate) rev_manager: Arc<DocumentRevisionManager>,
pub(crate) shared_sink: Arc<SharedWSSinkDataProvider>,
}
impl DocumentWSSteamConsumer for DocumentWebSocketSteamConsumerAdapter {
fn receive_push_revision(&self, bytes: Bytes) -> FutureResult<(), FlowyError> {
let rev_manager = self.rev_manager.clone();
let edit_cmd_tx = self.edit_cmd_tx.clone();
let shared_sink = self.shared_sink.clone();
let doc_id = self.doc_id.clone();
FutureResult::new(async move {
if let Some(server_composed_revision) = handle_remote_revision(edit_cmd_tx, rev_manager, bytes).await? {
let data = DocumentClientWSData::from_revisions(&doc_id, vec![server_composed_revision]);
shared_sink.push_back(data).await;
}
Ok(())
})
}
fn receive_ack(&self, id: String, ty: DocumentServerWSDataType) -> FutureResult<(), FlowyError> {
let shared_sink = self.shared_sink.clone();
FutureResult::new(async move { shared_sink.ack(id, ty).await })
}
fn receive_new_user_connect(&self, _new_user: NewDocumentUser) -> FutureResult<(), FlowyError> {
// the _new_user will be used later
FutureResult::new(async move { Ok(()) })
}
fn pull_revisions_in_range(&self, range: RevisionRange) -> FutureResult<(), FlowyError> {
let rev_manager = self.rev_manager.clone();
let shared_sink = self.shared_sink.clone();
let doc_id = self.doc_id.clone();
FutureResult::new(async move {
let revisions = rev_manager.get_revisions_in_range(range).await?;
let data = DocumentClientWSData::from_revisions(&doc_id, revisions);
shared_sink.push_back(data).await;
Ok(())
})
}
}
pub(crate) struct DocumentWSSinkDataProviderAdapter(pub(crate) Arc<SharedWSSinkDataProvider>);
impl DocumentWSSinkDataProvider for DocumentWSSinkDataProviderAdapter {
fn next(&self) -> FutureResult<Option<DocumentClientWSData>, FlowyError> {
let shared_sink = self.0.clone();
FutureResult::new(async move { shared_sink.next().await })
}
}
async fn transform_pushed_revisions(
revisions: Vec<Revision>,
edit_cmd: &UnboundedSender<EditorCommand>,
) -> FlowyResult<TransformDeltas> {
let (ret, rx) = oneshot::channel::<CollaborateResult<TransformDeltas>>();
let _ = edit_cmd.send(EditorCommand::TransformRevision { revisions, ret });
Ok(rx.await.map_err(internal_error)??)
}
#[tracing::instrument(level = "debug", skip(edit_cmd_tx, rev_manager, bytes))]
pub(crate) async fn handle_remote_revision(
edit_cmd_tx: UnboundedSender<EditorCommand>,
rev_manager: Arc<DocumentRevisionManager>,
bytes: Bytes,
) -> FlowyResult<Option<Revision>> {
let mut revisions = RepeatedRevision::try_from(bytes)?.into_inner();
if revisions.is_empty() {
return Ok(None);
}
let first_revision = revisions.first().unwrap();
if let Some(local_revision) = rev_manager.get_revision(first_revision.rev_id).await {
if local_revision.md5 == first_revision.md5 {
// The local revision is equal to the pushed revision. Just ignore it.
revisions = revisions.split_off(1);
if revisions.is_empty() {
return Ok(None);
}
} else {
return Ok(None);
}
}
let TransformDeltas {
client_prime,
server_prime,
} = transform_pushed_revisions(revisions.clone(), &edit_cmd_tx).await?;
match server_prime {
None => {
// The server_prime is None means the client local revisions conflict with the
// server, and it needs to override the client delta.
let (ret, rx) = oneshot::channel();
let _ = edit_cmd_tx.send(EditorCommand::OverrideDelta {
revisions,
delta: client_prime,
ret,
});
let _ = rx.await.map_err(internal_error)??;
Ok(None)
},
Some(server_prime) => {
let (ret, rx) = oneshot::channel();
let _ = edit_cmd_tx.send(EditorCommand::ComposeRemoteDelta {
revisions,
client_delta: client_prime,
server_delta: server_prime,
ret,
});
Ok(rx.await.map_err(internal_error)??)
},
}
}
#[derive(Clone)]
enum SourceType {
Shared,
Revision,
}
#[derive(Clone)]
pub(crate) struct SharedWSSinkDataProvider {
shared: Arc<RwLock<VecDeque<DocumentClientWSData>>>,
rev_manager: Arc<DocumentRevisionManager>,
source_ty: Arc<RwLock<SourceType>>,
}
impl SharedWSSinkDataProvider {
pub(crate) fn new(rev_manager: Arc<DocumentRevisionManager>) -> Self {
SharedWSSinkDataProvider {
shared: Arc::new(RwLock::new(VecDeque::new())),
rev_manager,
source_ty: Arc::new(RwLock::new(SourceType::Shared)),
}
}
#[allow(dead_code)]
pub(crate) async fn push_front(&self, data: DocumentClientWSData) { self.shared.write().await.push_front(data); }
async fn push_back(&self, data: DocumentClientWSData) { self.shared.write().await.push_back(data); }
async fn next(&self) -> FlowyResult<Option<DocumentClientWSData>> {
let source_ty = self.source_ty.read().await.clone();
match source_ty {
SourceType::Shared => match self.shared.read().await.front() {
None => {
*self.source_ty.write().await = SourceType::Revision;
Ok(None)
},
Some(data) => {
tracing::debug!("[SharedWSSinkDataProvider]: {}:{:?}", data.doc_id, data.ty);
Ok(Some(data.clone()))
},
},
SourceType::Revision => {
if !self.shared.read().await.is_empty() {
*self.source_ty.write().await = SourceType::Shared;
return Ok(None);
}
match self.rev_manager.next_sync_revision().await? {
Some(rev) => {
let doc_id = rev.doc_id.clone();
Ok(Some(DocumentClientWSData::from_revisions(&doc_id, vec![rev])))
},
None => {
//
let doc_id = self.rev_manager.doc_id.clone();
let latest_rev_id = self.rev_manager.rev_id();
Ok(Some(DocumentClientWSData::ping(&doc_id, latest_rev_id)))
},
}
},
}
}
async fn ack(&self, id: String, _ty: DocumentServerWSDataType) -> FlowyResult<()> {
// let _ = self.rev_manager.ack_revision(id).await?;
let source_ty = self.source_ty.read().await.clone();
match source_ty {
SourceType::Shared => {
let should_pop = match self.shared.read().await.front() {
None => false,
Some(val) => {
let expected_id = val.id();
if expected_id == id {
true
} else {
tracing::error!("The front element's {} is not equal to the {}", expected_id, id);
false
}
},
};
if should_pop {
let _ = self.shared.write().await.pop_front();
}
},
SourceType::Revision => {
match id.parse::<i64>() {
Ok(rev_id) => {
let _ = self.rev_manager.ack_revision(rev_id).await?;
},
Err(e) => {
tracing::error!("Parse rev_id from {} failed. {}", id, e);
},
};
},
}
Ok(())
}
}

View File

@ -1,310 +1,283 @@
use crate::core::{
web_socket::{DocumentWSSinkDataProvider, DocumentWSSteamConsumer, HttpWebSocketManager},
DocumentRevisionManager,
DocumentWSReceiver,
DocumentWebSocket,
EditorCommand,
TransformDeltas,
use crate::{
core::SYNC_INTERVAL_IN_MILLIS,
ws_receivers::{DocumentWSReceiver, DocumentWebSocket},
};
use async_stream::stream;
use bytes::Bytes;
use flowy_collaboration::{
entities::{
revision::{RepeatedRevision, Revision, RevisionRange},
ws::{DocumentClientWSData, NewDocumentUser},
},
errors::CollaborateResult,
use flowy_collaboration::entities::{
revision::{RevId, RevisionRange},
ws::{DocumentClientWSData, DocumentServerWSData, DocumentServerWSDataType, NewDocumentUser},
};
use flowy_error::{internal_error, FlowyError, FlowyResult};
use futures::stream::StreamExt;
use lib_infra::future::FutureResult;
use flowy_collaboration::entities::ws::DocumentServerWSDataType;
use lib_ws::WSConnectState;
use std::{collections::VecDeque, convert::TryFrom, sync::Arc};
use tokio::sync::{broadcast, mpsc::UnboundedSender, oneshot, RwLock};
use std::{convert::TryFrom, sync::Arc};
use tokio::{
sync::{
broadcast,
mpsc,
mpsc::{UnboundedReceiver, UnboundedSender},
},
task::spawn_blocking,
time::{interval, Duration},
};
pub(crate) trait DocumentWebSocketManager: Send + Sync {
fn stop(&self);
fn receiver(&self) -> Arc<dyn DocumentWSReceiver>;
}
pub(crate) async fn make_document_ws_manager(
pub struct DocumentWebSocketManager {
doc_id: String,
user_id: String,
edit_cmd_tx: UnboundedSender<EditorCommand>,
rev_manager: Arc<DocumentRevisionManager>,
data_provider: Arc<dyn DocumentWSSinkDataProvider>,
stream_consumer: Arc<dyn DocumentWSSteamConsumer>,
ws_conn: Arc<dyn DocumentWebSocket>,
) -> Arc<dyn DocumentWebSocketManager> {
// if cfg!(feature = "http_server") {
// let shared_sink =
// Arc::new(SharedWSSinkDataProvider::new(rev_manager.clone()));
// let ws_stream_consumer = Arc::new(DocumentWebSocketSteamConsumerAdapter {
// doc_id: doc_id.clone(),
// edit_cmd_tx,
// rev_manager: rev_manager.clone(),
// shared_sink: shared_sink.clone(),
// });
// let data_provider =
// Arc::new(DocumentWSSinkDataProviderAdapter(shared_sink));
// let ws_manager = Arc::new(HttpWebSocketManager::new(
// &doc_id,
// ws_conn,
// data_provider,
// ws_stream_consumer,
// ));
// listen_document_ws_state(&user_id, &doc_id, ws_manager.scribe_state(),
// rev_manager); Arc::new(ws_manager)
// } else {
// Arc::new(Arc::new(LocalWebSocketManager {}))
// }
let shared_sink = Arc::new(SharedWSSinkDataProvider::new(rev_manager.clone()));
let ws_stream_consumer = Arc::new(DocumentWebSocketSteamConsumerAdapter {
doc_id: doc_id.clone(),
edit_cmd_tx,
rev_manager: rev_manager.clone(),
shared_sink: shared_sink.clone(),
});
let data_provider = Arc::new(DocumentWSSinkDataProviderAdapter(shared_sink));
let ws_manager = Arc::new(HttpWebSocketManager::new(
&doc_id,
ws_conn,
data_provider,
ws_stream_consumer,
));
listen_document_ws_state(&user_id, &doc_id, ws_manager.scribe_state(), rev_manager);
Arc::new(ws_manager)
ws_msg_tx: UnboundedSender<DocumentServerWSData>,
ws_msg_rx: Option<UnboundedReceiver<DocumentServerWSData>>,
stop_sync_tx: SinkStopTx,
state: broadcast::Sender<WSConnectState>,
}
fn listen_document_ws_state(
_user_id: &str,
_doc_id: &str,
mut subscriber: broadcast::Receiver<WSConnectState>,
_rev_manager: Arc<DocumentRevisionManager>,
) {
tokio::spawn(async move {
while let Ok(state) = subscriber.recv().await {
match state {
WSConnectState::Init => {},
WSConnectState::Connecting => {},
WSConnectState::Connected => {},
WSConnectState::Disconnected => {},
}
impl DocumentWebSocketManager {
pub(crate) fn new(
doc_id: &str,
ws_conn: Arc<dyn DocumentWebSocket>,
data_provider: Arc<dyn DocumentWSSinkDataProvider>,
stream_consumer: Arc<dyn DocumentWSSteamConsumer>,
) -> Self {
let (ws_msg_tx, ws_msg_rx) = mpsc::unbounded_channel();
let (stop_sync_tx, _) = tokio::sync::broadcast::channel(2);
let doc_id = doc_id.to_string();
let (state, _) = broadcast::channel(2);
let mut manager = DocumentWebSocketManager {
doc_id,
data_provider,
stream_consumer,
ws_conn,
ws_msg_tx,
ws_msg_rx: Some(ws_msg_rx),
stop_sync_tx,
state,
};
manager.run();
manager
}
fn run(&mut self) {
let ws_msg_rx = self.ws_msg_rx.take().expect("Only take once");
let sink = DocumentWSSink::new(
&self.doc_id,
self.data_provider.clone(),
self.ws_conn.clone(),
self.stop_sync_tx.subscribe(),
);
let stream = DocumentWSStream::new(
&self.doc_id,
self.stream_consumer.clone(),
ws_msg_rx,
self.stop_sync_tx.subscribe(),
);
tokio::spawn(sink.run());
tokio::spawn(stream.run());
}
pub fn scribe_state(&self) -> broadcast::Receiver<WSConnectState> { self.state.subscribe() }
pub(crate) fn stop(&self) {
if self.stop_sync_tx.send(()).is_ok() {
tracing::debug!("{} stop sync", self.doc_id)
}
});
}
}
pub(crate) struct DocumentWebSocketSteamConsumerAdapter {
pub(crate) doc_id: String,
pub(crate) edit_cmd_tx: UnboundedSender<EditorCommand>,
pub(crate) rev_manager: Arc<DocumentRevisionManager>,
pub(crate) shared_sink: Arc<SharedWSSinkDataProvider>,
impl DocumentWSReceiver for DocumentWebSocketManager {
fn receive_ws_data(&self, doc_data: DocumentServerWSData) {
match self.ws_msg_tx.send(doc_data) {
Ok(_) => {},
Err(e) => tracing::error!("❌ Propagate ws message failed. {}", e),
}
}
fn connect_state_changed(&self, state: &WSConnectState) {
match self.state.send(state.clone()) {
Ok(_) => {},
Err(e) => tracing::error!("{}", e),
}
}
}
impl DocumentWSSteamConsumer for DocumentWebSocketSteamConsumerAdapter {
fn receive_push_revision(&self, bytes: Bytes) -> FutureResult<(), FlowyError> {
let rev_manager = self.rev_manager.clone();
let edit_cmd_tx = self.edit_cmd_tx.clone();
let shared_sink = self.shared_sink.clone();
impl std::ops::Drop for DocumentWebSocketManager {
fn drop(&mut self) { tracing::debug!("{} HttpWebSocketManager was drop", self.doc_id) }
}
pub trait DocumentWSSteamConsumer: Send + Sync {
fn receive_push_revision(&self, bytes: Bytes) -> FutureResult<(), FlowyError>;
fn receive_ack(&self, id: String, ty: DocumentServerWSDataType) -> FutureResult<(), FlowyError>;
fn receive_new_user_connect(&self, new_user: NewDocumentUser) -> FutureResult<(), FlowyError>;
fn pull_revisions_in_range(&self, range: RevisionRange) -> FutureResult<(), FlowyError>;
}
pub struct DocumentWSStream {
doc_id: String,
consumer: Arc<dyn DocumentWSSteamConsumer>,
ws_msg_rx: Option<mpsc::UnboundedReceiver<DocumentServerWSData>>,
stop_rx: Option<SinkStopRx>,
}
impl DocumentWSStream {
pub fn new(
doc_id: &str,
consumer: Arc<dyn DocumentWSSteamConsumer>,
ws_msg_rx: mpsc::UnboundedReceiver<DocumentServerWSData>,
stop_rx: SinkStopRx,
) -> Self {
DocumentWSStream {
doc_id: doc_id.to_owned(),
consumer,
ws_msg_rx: Some(ws_msg_rx),
stop_rx: Some(stop_rx),
}
}
pub async fn run(mut self) {
let mut receiver = self.ws_msg_rx.take().expect("Only take once");
let mut stop_rx = self.stop_rx.take().expect("Only take once");
let doc_id = self.doc_id.clone();
FutureResult::new(async move {
if let Some(server_composed_revision) = handle_remote_revision(edit_cmd_tx, rev_manager, bytes).await? {
let data = DocumentClientWSData::from_revisions(&doc_id, vec![server_composed_revision]);
shared_sink.push_back(data).await;
}
Ok(())
})
}
fn receive_ack(&self, id: String, ty: DocumentServerWSDataType) -> FutureResult<(), FlowyError> {
let shared_sink = self.shared_sink.clone();
FutureResult::new(async move { shared_sink.ack(id, ty).await })
}
fn receive_new_user_connect(&self, _new_user: NewDocumentUser) -> FutureResult<(), FlowyError> {
// the _new_user will be used later
FutureResult::new(async move { Ok(()) })
}
fn pull_revisions_in_range(&self, range: RevisionRange) -> FutureResult<(), FlowyError> {
let rev_manager = self.rev_manager.clone();
let shared_sink = self.shared_sink.clone();
let doc_id = self.doc_id.clone();
FutureResult::new(async move {
let revisions = rev_manager.get_revisions_in_range(range).await?;
let data = DocumentClientWSData::from_revisions(&doc_id, revisions);
shared_sink.push_back(data).await;
Ok(())
})
}
}
pub(crate) struct DocumentWSSinkDataProviderAdapter(pub(crate) Arc<SharedWSSinkDataProvider>);
impl DocumentWSSinkDataProvider for DocumentWSSinkDataProviderAdapter {
fn next(&self) -> FutureResult<Option<DocumentClientWSData>, FlowyError> {
let shared_sink = self.0.clone();
FutureResult::new(async move { shared_sink.next().await })
}
}
async fn transform_pushed_revisions(
revisions: Vec<Revision>,
edit_cmd: &UnboundedSender<EditorCommand>,
) -> FlowyResult<TransformDeltas> {
let (ret, rx) = oneshot::channel::<CollaborateResult<TransformDeltas>>();
let _ = edit_cmd.send(EditorCommand::TransformRevision { revisions, ret });
Ok(rx.await.map_err(internal_error)??)
}
#[tracing::instrument(level = "debug", skip(edit_cmd_tx, rev_manager, bytes))]
pub(crate) async fn handle_remote_revision(
edit_cmd_tx: UnboundedSender<EditorCommand>,
rev_manager: Arc<DocumentRevisionManager>,
bytes: Bytes,
) -> FlowyResult<Option<Revision>> {
let mut revisions = RepeatedRevision::try_from(bytes)?.into_inner();
if revisions.is_empty() {
return Ok(None);
}
let first_revision = revisions.first().unwrap();
if let Some(local_revision) = rev_manager.get_revision(first_revision.rev_id).await {
if local_revision.md5 == first_revision.md5 {
// The local revision is equal to the pushed revision. Just ignore it.
revisions = revisions.split_off(1);
if revisions.is_empty() {
return Ok(None);
}
} else {
return Ok(None);
}
}
let TransformDeltas {
client_prime,
server_prime,
} = transform_pushed_revisions(revisions.clone(), &edit_cmd_tx).await?;
match server_prime {
None => {
// The server_prime is None means the client local revisions conflict with the
// server, and it needs to override the client delta.
let (ret, rx) = oneshot::channel();
let _ = edit_cmd_tx.send(EditorCommand::OverrideDelta {
revisions,
delta: client_prime,
ret,
});
let _ = rx.await.map_err(internal_error)??;
Ok(None)
},
Some(server_prime) => {
let (ret, rx) = oneshot::channel();
let _ = edit_cmd_tx.send(EditorCommand::ComposeRemoteDelta {
revisions,
client_delta: client_prime,
server_delta: server_prime,
ret,
});
Ok(rx.await.map_err(internal_error)??)
},
}
}
#[derive(Clone)]
enum SourceType {
Shared,
Revision,
}
#[derive(Clone)]
pub(crate) struct SharedWSSinkDataProvider {
shared: Arc<RwLock<VecDeque<DocumentClientWSData>>>,
rev_manager: Arc<DocumentRevisionManager>,
source_ty: Arc<RwLock<SourceType>>,
}
impl SharedWSSinkDataProvider {
pub(crate) fn new(rev_manager: Arc<DocumentRevisionManager>) -> Self {
SharedWSSinkDataProvider {
shared: Arc::new(RwLock::new(VecDeque::new())),
rev_manager,
source_ty: Arc::new(RwLock::new(SourceType::Shared)),
}
}
#[allow(dead_code)]
pub(crate) async fn push_front(&self, data: DocumentClientWSData) { self.shared.write().await.push_front(data); }
async fn push_back(&self, data: DocumentClientWSData) { self.shared.write().await.push_back(data); }
async fn next(&self) -> FlowyResult<Option<DocumentClientWSData>> {
let source_ty = self.source_ty.read().await.clone();
match source_ty {
SourceType::Shared => match self.shared.read().await.front() {
None => {
*self.source_ty.write().await = SourceType::Revision;
Ok(None)
},
Some(data) => {
tracing::debug!("[SharedWSSinkDataProvider]: {}:{:?}", data.doc_id, data.ty);
Ok(Some(data.clone()))
},
},
SourceType::Revision => {
if !self.shared.read().await.is_empty() {
*self.source_ty.write().await = SourceType::Shared;
return Ok(None);
}
match self.rev_manager.next_sync_revision().await? {
Some(rev) => {
let doc_id = rev.doc_id.clone();
Ok(Some(DocumentClientWSData::from_revisions(&doc_id, vec![rev])))
},
None => {
//
let doc_id = self.rev_manager.doc_id.clone();
let latest_rev_id = self.rev_manager.rev_id();
Ok(Some(DocumentClientWSData::ping(&doc_id, latest_rev_id)))
},
}
},
}
}
async fn ack(&self, id: String, _ty: DocumentServerWSDataType) -> FlowyResult<()> {
// let _ = self.rev_manager.ack_revision(id).await?;
let source_ty = self.source_ty.read().await.clone();
match source_ty {
SourceType::Shared => {
let should_pop = match self.shared.read().await.front() {
None => false,
Some(val) => {
let expected_id = val.id();
if expected_id == id {
true
} else {
tracing::error!("The front element's {} is not equal to the {}", expected_id, id);
false
let stream = stream! {
loop {
tokio::select! {
result = receiver.recv() => {
match result {
Some(msg) => {
yield msg
},
None => {
tracing::debug!("[DocumentStream:{}] loop exit", doc_id);
break;
},
}
},
_ = stop_rx.recv() => {
tracing::debug!("[DocumentStream:{}] loop exit", doc_id);
break
},
};
if should_pop {
let _ = self.shared.write().await.pop_front();
}
};
stream
.for_each(|msg| async {
match self.handle_message(msg).await {
Ok(_) => {},
Err(e) => log::error!("[DocumentStream:{}] error: {}", self.doc_id, e),
}
})
.await;
}
async fn handle_message(&self, msg: DocumentServerWSData) -> FlowyResult<()> {
let DocumentServerWSData { doc_id: _, ty, data } = msg;
let bytes = spawn_blocking(move || Bytes::from(data))
.await
.map_err(internal_error)?;
tracing::trace!("[DocumentStream]: new message: {:?}", ty);
match ty {
DocumentServerWSDataType::ServerPushRev => {
let _ = self.consumer.receive_push_revision(bytes).await?;
},
SourceType::Revision => {
match id.parse::<i64>() {
Ok(rev_id) => {
let _ = self.rev_manager.ack_revision(rev_id).await?;
},
Err(e) => {
tracing::error!("Parse rev_id from {} failed. {}", id, e);
},
};
DocumentServerWSDataType::ServerPullRev => {
let range = RevisionRange::try_from(bytes)?;
let _ = self.consumer.pull_revisions_in_range(range).await?;
},
DocumentServerWSDataType::ServerAck => {
let rev_id = RevId::try_from(bytes).unwrap().value;
let _ = self.consumer.receive_ack(rev_id.to_string(), ty).await;
},
DocumentServerWSDataType::UserConnect => {
let new_user = NewDocumentUser::try_from(bytes)?;
let _ = self.consumer.receive_new_user_connect(new_user).await;
// Notify the user that someone has connected to this document
},
}
Ok(())
}
}
pub type Tick = ();
pub type SinkStopRx = broadcast::Receiver<()>;
pub type SinkStopTx = broadcast::Sender<()>;
pub trait DocumentWSSinkDataProvider: Send + Sync {
fn next(&self) -> FutureResult<Option<DocumentClientWSData>, FlowyError>;
}
pub struct DocumentWSSink {
provider: Arc<dyn DocumentWSSinkDataProvider>,
ws_sender: Arc<dyn DocumentWebSocket>,
stop_rx: Option<SinkStopRx>,
doc_id: String,
}
impl DocumentWSSink {
pub fn new(
doc_id: &str,
provider: Arc<dyn DocumentWSSinkDataProvider>,
ws_sender: Arc<dyn DocumentWebSocket>,
stop_rx: SinkStopRx,
) -> Self {
Self {
provider,
ws_sender,
stop_rx: Some(stop_rx),
doc_id: doc_id.to_owned(),
}
}
pub async fn run(mut self) {
let (tx, mut rx) = mpsc::unbounded_channel();
let mut stop_rx = self.stop_rx.take().expect("Only take once");
let doc_id = self.doc_id.clone();
tokio::spawn(tick(tx));
let stream = stream! {
loop {
tokio::select! {
result = rx.recv() => {
match result {
Some(msg) => yield msg,
None => break,
}
},
_ = stop_rx.recv() => {
tracing::debug!("[DocumentSink:{}] loop exit", doc_id);
break
},
};
}
};
stream
.for_each(|_| async {
match self.send_next_revision().await {
Ok(_) => {},
Err(e) => log::error!("[DocumentSink] send failed, {:?}", e),
}
})
.await;
}
async fn send_next_revision(&self) -> FlowyResult<()> {
match self.provider.next().await? {
None => {
tracing::trace!("Finish synchronizing revisions");
Ok(())
},
Some(data) => {
tracing::trace!("[DocumentSink] send: {}:{}-{:?}", data.doc_id, data.id(), data.ty);
self.ws_sender.send(data)
},
}
}
}
async fn tick(sender: mpsc::UnboundedSender<Tick>) {
let mut interval = interval(Duration::from_millis(SYNC_INTERVAL_IN_MILLIS));
while sender.send(()).is_ok() {
interval.tick().await;
}
}

View File

@ -7,7 +7,7 @@ edition = "2018"
[dependencies]
lib-dispatch = { path = "../lib-dispatch" }
flowy-error = { path = "../flowy-error" }
flowy-error = { path = "../flowy-error", features = ["collaboration"] }
flowy-derive = { path = "../../../shared-lib/flowy-derive" }
flowy-collaboration = { path = "../../../shared-lib/flowy-collaboration"}
backend-service = { path = "../../../shared-lib/backend-service" }

View File

@ -1,4 +1,4 @@
use crate::{entities::NetworkState, services::ws_conn::FlowyWebSocketConnect};
use crate::{entities::NetworkState, ws::connection::FlowyWebSocketConnect};
use flowy_error::FlowyError;
use lib_dispatch::prelude::{Data, Unit};
use std::sync::Arc;

View File

@ -4,4 +4,4 @@ mod event;
mod handlers;
pub mod module;
pub mod protobuf;
pub mod services;
pub mod ws;

View File

@ -1,4 +1,4 @@
use crate::{event::NetworkEvent, handlers::*, services::ws_conn::FlowyWebSocketConnect};
use crate::{event::NetworkEvent, handlers::*, ws::connection::FlowyWebSocketConnect};
use lib_dispatch::prelude::*;
use std::sync::Arc;

View File

@ -1,5 +0,0 @@
mod local_server;
mod local_ws;
mod persistence;
pub use local_ws::*;

View File

@ -1,74 +0,0 @@
use dashmap::DashMap;
use flowy_collaboration::{
entities::doc::DocumentInfo,
errors::CollaborateError,
protobuf::{RepeatedRevision as RepeatedRevisionPB, Revision as RevisionPB},
sync::*,
util::repeated_revision_from_repeated_revision_pb,
};
use lib_infra::future::BoxResultFuture;
use std::{
fmt::{Debug, Formatter},
sync::Arc,
};
pub(crate) struct LocalServerDocumentPersistence {
// For the moment, we use memory to cache the data, it will be implemented with other storage.
// Like the Firestore,Dropbox.etc.
inner: Arc<DashMap<String, DocumentInfo>>,
}
impl Debug for LocalServerDocumentPersistence {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.write_str("LocalDocServerPersistence") }
}
impl std::default::Default for LocalServerDocumentPersistence {
fn default() -> Self {
LocalServerDocumentPersistence {
inner: Arc::new(DashMap::new()),
}
}
}
impl ServerDocumentPersistence for LocalServerDocumentPersistence {
fn read_document(&self, doc_id: &str) -> BoxResultFuture<DocumentInfo, CollaborateError> {
let inner = self.inner.clone();
let doc_id = doc_id.to_owned();
Box::pin(async move {
match inner.get(&doc_id) {
None => Err(CollaborateError::record_not_found()),
Some(val) => {
//
Ok(val.value().clone())
},
}
})
}
fn create_document(
&self,
doc_id: &str,
repeated_revision: RepeatedRevisionPB,
) -> BoxResultFuture<DocumentInfo, CollaborateError> {
let doc_id = doc_id.to_owned();
let inner = self.inner.clone();
Box::pin(async move {
let repeated_revision = repeated_revision_from_repeated_revision_pb(repeated_revision)?;
let document_info = DocumentInfo::from_revisions(&doc_id, repeated_revision.into_inner())?;
inner.insert(doc_id, document_info.clone());
Ok(document_info)
})
}
fn read_revisions(
&self,
_doc_id: &str,
_rev_ids: Option<Vec<i64>>,
) -> BoxResultFuture<Vec<RevisionPB>, CollaborateError> {
Box::pin(async move { Ok(vec![]) })
}
fn reset_document(&self, _doc_id: &str, _revisions: RepeatedRevisionPB) -> BoxResultFuture<(), CollaborateError> {
unimplemented!()
}
}

View File

@ -94,31 +94,27 @@ impl FlowyWebSocketConnect {
#[tracing::instrument(level = "debug", skip(ws_conn))]
pub fn listen_on_websocket(ws_conn: Arc<FlowyWebSocketConnect>) {
if cfg!(feature = "http_server") {
let ws = ws_conn.inner.clone();
let mut notify = ws_conn.inner.subscribe_connect_state();
let _ = tokio::spawn(async move {
loop {
match notify.recv().await {
Ok(state) => {
tracing::info!("Websocket state changed: {}", state);
match state {
WSConnectState::Init => {},
WSConnectState::Connected => {},
WSConnectState::Connecting => {},
WSConnectState::Disconnected => retry_connect(ws.clone(), 100).await,
}
},
Err(e) => {
tracing::error!("Websocket state notify error: {:?}", e);
break;
},
}
let ws = ws_conn.inner.clone();
let mut notify = ws_conn.inner.subscribe_connect_state();
let _ = tokio::spawn(async move {
loop {
match notify.recv().await {
Ok(state) => {
tracing::info!("Websocket state changed: {}", state);
match state {
WSConnectState::Init => {},
WSConnectState::Connected => {},
WSConnectState::Connecting => {},
WSConnectState::Disconnected => retry_connect(ws.clone(), 100).await,
}
},
Err(e) => {
tracing::error!("Websocket state notify error: {:?}", e);
break;
},
}
});
} else {
// do nothing
};
}
});
}
async fn retry_connect(ws: Arc<dyn FlowyRawWebSocket>, count: usize) {

View File

@ -1,4 +1,4 @@
use crate::services::ws_conn::{FlowyRawWebSocket, FlowyWSSender};
use crate::ws::connection::{FlowyRawWebSocket, FlowyWSSender};
use flowy_error::internal_error;
pub use flowy_error::FlowyError;
use lib_infra::future::FutureResult;

View File

@ -1,4 +1,4 @@
use crate::services::local::persistence::LocalServerDocumentPersistence;
use crate::ws::local::persistence::LocalDocumentCloudPersistence;
use bytes::Bytes;
use flowy_collaboration::{
entities::ws::{DocumentClientWSData, DocumentClientWSDataType},
@ -13,12 +13,12 @@ use tokio::sync::{mpsc, mpsc::UnboundedSender};
pub struct LocalDocumentServer {
pub doc_manager: Arc<ServerDocumentManager>,
sender: mpsc::UnboundedSender<WebSocketRawMessage>,
persistence: Arc<dyn ServerDocumentPersistence>,
persistence: Arc<LocalDocumentCloudPersistence>,
}
impl LocalDocumentServer {
pub fn new(sender: mpsc::UnboundedSender<WebSocketRawMessage>) -> Self {
let persistence = Arc::new(LocalServerDocumentPersistence::default());
let persistence = Arc::new(LocalDocumentCloudPersistence::default());
let doc_manager = Arc::new(ServerDocumentManager::new(persistence.clone()));
LocalDocumentServer {
doc_manager,
@ -41,7 +41,6 @@ impl LocalDocumentServer {
let user = Arc::new(LocalDocumentUser {
user_id,
ws_sender: self.sender.clone(),
persistence: self.persistence.clone(),
});
let ty = client_data.ty.clone();
let document_client_data: DocumentClientWSDataPB = client_data.try_into().unwrap();
@ -64,7 +63,6 @@ impl LocalDocumentServer {
struct LocalDocumentUser {
user_id: String,
ws_sender: mpsc::UnboundedSender<WebSocketRawMessage>,
persistence: Arc<dyn ServerDocumentPersistence>,
}
impl RevisionUser for LocalDocumentUser {
@ -105,9 +103,6 @@ impl RevisionUser for LocalDocumentUser {
};
send_fn(sender, msg);
},
SyncResponse::NewRevision(mut _repeated_revision) => {
// unimplemented!()
},
}
});
}

View File

@ -1,6 +1,6 @@
use crate::services::{
use crate::ws::{
connection::{FlowyRawWebSocket, FlowyWSSender},
local::local_server::LocalDocumentServer,
ws_conn::{FlowyRawWebSocket, FlowyWSSender},
};
use bytes::Bytes;
use dashmap::DashMap;

View File

@ -0,0 +1,24 @@
mod local_server;
mod local_ws;
mod persistence;
use flowy_collaboration::errors::CollaborateError;
pub use local_ws::*;
use flowy_collaboration::protobuf::RepeatedRevision as RepeatedRevisionPB;
use lib_infra::future::BoxResultFuture;
pub trait DocumentCloudStorage: Send + Sync {
fn set_revisions(&self, repeated_revision: RepeatedRevisionPB) -> BoxResultFuture<(), CollaborateError>;
fn get_revisions(
&self,
doc_id: &str,
rev_ids: Option<Vec<i64>>,
) -> BoxResultFuture<RepeatedRevisionPB, CollaborateError>;
fn reset_document(
&self,
doc_id: &str,
repeated_revision: RepeatedRevisionPB,
) -> BoxResultFuture<(), CollaborateError>;
}

View File

@ -0,0 +1,130 @@
use crate::ws::local::DocumentCloudStorage;
use dashmap::DashMap;
use flowy_collaboration::{
entities::doc::DocumentInfo,
errors::CollaborateError,
protobuf::{RepeatedRevision as RepeatedRevisionPB, Revision as RevisionPB},
sync::*,
util::{make_doc_from_revisions, repeated_revision_from_repeated_revision_pb},
};
use lib_infra::future::BoxResultFuture;
use std::{
convert::TryInto,
fmt::{Debug, Formatter},
sync::Arc,
};
pub(crate) struct LocalDocumentCloudPersistence {
// For the moment, we use memory to cache the data, it will be implemented with other storage.
// Like the Firestore,Dropbox.etc.
storage: Arc<dyn DocumentCloudStorage>,
}
impl Debug for LocalDocumentCloudPersistence {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.write_str("LocalDocServerPersistence") }
}
impl std::default::Default for LocalDocumentCloudPersistence {
fn default() -> Self {
LocalDocumentCloudPersistence {
storage: Arc::new(MemoryDocumentCloudStorage::default()),
}
}
}
impl DocumentCloudPersistence for LocalDocumentCloudPersistence {
fn enable_sync(&self) -> bool { false }
fn read_document(&self, doc_id: &str) -> BoxResultFuture<DocumentInfo, CollaborateError> {
let storage = self.storage.clone();
let doc_id = doc_id.to_owned();
Box::pin(async move {
let repeated_revision = storage.get_revisions(&doc_id, None).await?;
match make_doc_from_revisions(&doc_id, repeated_revision) {
Ok(Some(mut document_info_pb)) => {
let document_info: DocumentInfo = (&mut document_info_pb)
.try_into()
.map_err(|e| CollaborateError::internal().context(e))?;
Ok(document_info)
},
Ok(None) => Err(CollaborateError::record_not_found()),
Err(e) => Err(CollaborateError::internal().context(e)),
}
})
}
fn create_document(
&self,
doc_id: &str,
repeated_revision: RepeatedRevisionPB,
) -> BoxResultFuture<DocumentInfo, CollaborateError> {
let doc_id = doc_id.to_owned();
let storage = self.storage.clone();
Box::pin(async move {
let _ = storage.set_revisions(repeated_revision.clone()).await?;
let repeated_revision = repeated_revision_from_repeated_revision_pb(repeated_revision)?;
let document_info = DocumentInfo::from_revisions(&doc_id, repeated_revision.into_inner())?;
Ok(document_info)
})
}
fn read_revisions(
&self,
doc_id: &str,
rev_ids: Option<Vec<i64>>,
) -> BoxResultFuture<Vec<RevisionPB>, CollaborateError> {
let doc_id = doc_id.to_owned();
let storage = self.storage.clone();
Box::pin(async move {
let mut repeated_revision = storage.get_revisions(&doc_id, rev_ids).await?;
let revisions: Vec<RevisionPB> = repeated_revision.take_items().into();
Ok(revisions)
})
}
fn save_revisions(&self, repeated_revision: RepeatedRevisionPB) -> BoxResultFuture<(), CollaborateError> {
let storage = self.storage.clone();
Box::pin(async move {
let _ = storage.set_revisions(repeated_revision).await?;
Ok(())
})
}
fn reset_document(&self, doc_id: &str, revisions: RepeatedRevisionPB) -> BoxResultFuture<(), CollaborateError> {
let storage = self.storage.clone();
let doc_id = doc_id.to_owned();
Box::pin(async move {
let _ = storage.reset_document(&doc_id, revisions).await?;
Ok(())
})
}
}
struct MemoryDocumentCloudStorage {}
impl std::default::Default for MemoryDocumentCloudStorage {
fn default() -> Self { Self {} }
}
impl DocumentCloudStorage for MemoryDocumentCloudStorage {
fn set_revisions(&self, _repeated_revision: RepeatedRevisionPB) -> BoxResultFuture<(), CollaborateError> {
Box::pin(async move { Ok(()) })
}
fn get_revisions(
&self,
_doc_id: &str,
_rev_ids: Option<Vec<i64>>,
) -> BoxResultFuture<RepeatedRevisionPB, CollaborateError> {
Box::pin(async move {
let repeated_revisions = RepeatedRevisionPB::new();
Ok(repeated_revisions)
})
}
fn reset_document(
&self,
_doc_id: &str,
_repeated_revision: RepeatedRevisionPB,
) -> BoxResultFuture<(), CollaborateError> {
Box::pin(async move { Ok(()) })
}
}

View File

@ -1,3 +1,3 @@
pub mod connection;
pub mod http;
pub mod local;
pub mod ws_conn;

View File

@ -13,7 +13,7 @@ use flowy_document::{
};
use flowy_net::{
cloud::document::{DocumentHttpCloudService, DocumentLocalCloudService},
services::ws_conn::FlowyWebSocketConnect,
ws::connection::FlowyWebSocketConnect,
};
use flowy_user::services::UserSession;
use lib_infra::future::FutureResult;

View File

@ -6,9 +6,9 @@ use flowy_core::{context::CoreContext, errors::FlowyError, module::init_core};
use flowy_document::context::DocumentContext;
use flowy_net::{
entities::NetworkType,
services::{
ws::{
connection::{listen_on_websocket, FlowyRawWebSocket, FlowyWebSocketConnect},
local::LocalWebSocket,
ws_conn::{listen_on_websocket, FlowyRawWebSocket, FlowyWebSocketConnect},
},
};
use flowy_user::services::{notifier::UserStatus, UserSession, UserSessionConfig};

View File

@ -1,5 +1,5 @@
use flowy_core::context::CoreContext;
use flowy_net::services::ws_conn::FlowyWebSocketConnect;
use flowy_net::ws::connection::FlowyWebSocketConnect;
use flowy_user::services::UserSession;
use lib_dispatch::prelude::Module;
use std::sync::Arc;