build doc from local revision or fetch from remote

This commit is contained in:
appflowy 2021-10-02 17:19:54 +08:00
parent 4e3ebf8876
commit af6afafd0f
29 changed files with 764 additions and 540 deletions

View File

@ -114,7 +114,7 @@ fn user_scope() -> Scope {
.route(web::patch().to(view::update_handler))
)
.service(web::resource("/doc")
.route(web::get().to(doc::create_handler))
.route(web::post().to(doc::create_handler))
.route(web::get().to(doc::read_handler))
.route(web::patch().to(doc::update_handler))
)

View File

@ -1,8 +1,8 @@
use crate::service::{
doc::{
actor::{DocWsMsg, DocWsMsgActor},
edit::EditDoc,
edit::DocHandle,
read_doc,
ws_actor::{DocWsActor, DocWsMsg},
},
ws::{WsBizHandler, WsClientData},
};
@ -27,7 +27,7 @@ impl DocBiz {
pub fn new(pg_pool: Data<PgPool>) -> Self {
let manager = Arc::new(DocManager::new());
let (tx, rx) = mpsc::channel(100);
let actor = DocWsMsgActor::new(rx, manager.clone());
let actor = DocWsActor::new(rx, manager.clone());
tokio::task::spawn(actor.run());
Self {
manager,
@ -58,7 +58,7 @@ impl WsBizHandler for DocBiz {
}
pub struct DocManager {
docs_map: DashMap<String, Arc<EditDoc>>,
docs_map: DashMap<String, Arc<DocHandle>>,
}
impl DocManager {
@ -68,7 +68,7 @@ impl DocManager {
}
}
pub async fn get(&self, doc_id: &str, pg_pool: Data<PgPool>) -> Result<Option<Arc<EditDoc>>, ServerError> {
pub async fn get(&self, doc_id: &str, pg_pool: Data<PgPool>) -> Result<Option<Arc<DocHandle>>, ServerError> {
match self.docs_map.get(doc_id) {
None => {
let params = QueryDocParams {
@ -76,10 +76,12 @@ impl DocManager {
..Default::default()
};
let doc = read_doc(pg_pool.get_ref(), params).await?;
let edit_doc = spawn_blocking(|| EditDoc::new(doc)).await.map_err(internal_error)?;
let edit_doc = Arc::new(edit_doc?);
self.docs_map.insert(doc_id.to_string(), edit_doc.clone());
Ok(Some(edit_doc))
let handle = spawn_blocking(|| DocHandle::new(doc, pg_pool))
.await
.map_err(internal_error)?;
let handle = Arc::new(handle?);
self.docs_map.insert(doc_id.to_string(), handle.clone());
Ok(Some(handle))
},
Some(ctx) => Ok(Some(ctx.clone())),
}

View File

@ -1,194 +0,0 @@
use crate::service::{doc::edit::actor::EditUser, util::md5, ws::WsMessageAdaptor};
use byteorder::{BigEndian, WriteBytesExt};
use bytes::Bytes;
use dashmap::DashMap;
use flowy_document::{
entities::ws::{WsDataType, WsDocumentData},
protobuf::{Doc, RevType, Revision, RevisionRange, UpdateDocParams},
services::doc::Document,
};
use flowy_net::errors::{internal_error, ServerError};
use flowy_ot::{
core::{Delta, OperationTransformable},
errors::OTError,
};
use flowy_ws::WsMessage;
use parking_lot::RwLock;
use protobuf::Message;
use std::{
convert::TryInto,
sync::{
atomic::{AtomicI64, Ordering::SeqCst},
Arc,
},
time::Duration,
};
pub struct EditDocContext {
doc_id: String,
rev_id: AtomicI64,
document: Arc<RwLock<Document>>,
users: DashMap<String, EditUser>,
}
impl EditDocContext {
pub fn new(doc: Doc) -> Result<Self, ServerError> {
let delta = Delta::from_bytes(&doc.data).map_err(internal_error)?;
let document = Arc::new(RwLock::new(Document::from_delta(delta)));
let users = DashMap::new();
Ok(Self {
doc_id: doc.id.clone(),
rev_id: AtomicI64::new(doc.rev_id),
document,
users,
})
}
pub fn document_json(&self) -> String { self.document.read().to_json() }
pub async fn apply_revision(&self, user: EditUser, revision: Revision) -> Result<(), ServerError> {
// Opti: find out another way to keep the user socket available.
self.users.insert(user.id(), user.clone());
log::debug!(
"cur_base_rev_id: {}, expect_base_rev_id: {} rev_id: {}",
self.rev_id.load(SeqCst),
revision.base_rev_id,
revision.rev_id
);
let cur_rev_id = self.rev_id.load(SeqCst);
if cur_rev_id > revision.rev_id {
// The client document is outdated. Transform the client revision delta and then
// send the prime delta to the client. Client should compose the this prime
// delta.
let (cli_prime, server_prime) = self.transform(&revision.delta_data).map_err(internal_error)?;
let _ = self.update_document_delta(server_prime)?;
log::debug!("{} client delta: {}", self.doc_id, cli_prime.to_json());
let cli_revision = self.mk_revision(revision.rev_id, cli_prime);
let ws_cli_revision = mk_push_rev_ws_message(&self.doc_id, cli_revision);
user.socket.do_send(ws_cli_revision).map_err(internal_error)?;
Ok(())
} else if cur_rev_id < revision.rev_id {
if cur_rev_id != revision.base_rev_id {
// The server document is outdated, try to get the missing revision from the
// client.
user.socket
.do_send(mk_pull_rev_ws_message(&self.doc_id, cur_rev_id, revision.rev_id))
.map_err(internal_error)?;
} else {
let delta = Delta::from_bytes(&revision.delta_data).map_err(internal_error)?;
let _ = self.update_document_delta(delta)?;
user.socket
.do_send(mk_acked_ws_message(&revision))
.map_err(internal_error)?;
self.rev_id.fetch_update(SeqCst, SeqCst, |_e| Some(revision.rev_id));
let _ = self.save_revision(&revision).await?;
}
Ok(())
} else {
log::error!("Client rev_id should not equal to server rev_id");
Ok(())
}
}
fn mk_revision(&self, base_rev_id: i64, delta: Delta) -> Revision {
let delta_data = delta.to_bytes().to_vec();
let md5 = md5(&delta_data);
let revision = Revision {
base_rev_id,
rev_id: self.rev_id.load(SeqCst),
delta_data,
md5,
doc_id: self.doc_id.to_string(),
ty: RevType::Remote,
..Default::default()
};
revision
}
#[tracing::instrument(level = "debug", skip(self, delta_data))]
fn transform(&self, delta_data: &Vec<u8>) -> Result<(Delta, Delta), OTError> {
log::debug!("Document: {}", self.document.read().to_json());
let doc_delta = self.document.read().delta().clone();
let cli_delta = Delta::from_bytes(delta_data)?;
log::debug!("Compose delta: {}", cli_delta);
let (cli_prime, server_prime) = doc_delta.transform(&cli_delta)?;
Ok((cli_prime, server_prime))
}
#[tracing::instrument(level = "debug", skip(self, delta))]
fn update_document_delta(&self, delta: Delta) -> Result<(), ServerError> {
// Opti: push each revision into queue and process it one by one.
match self.document.try_write_for(Duration::from_millis(300)) {
None => {
log::error!("Failed to acquire write lock of document");
},
Some(mut write_guard) => {
let _ = write_guard.compose_delta(&delta).map_err(internal_error)?;
log::debug!("Document: {}", write_guard.to_json());
},
}
Ok(())
}
#[tracing::instrument(level = "debug", skip(self, revision))]
async fn save_revision(&self, revision: &Revision) -> Result<(), ServerError> {
// Opti: save with multiple revisions
let mut params = UpdateDocParams::new();
params.set_doc_id(self.doc_id.clone());
params.set_data(self.document.read().to_json());
params.set_rev_id(revision.rev_id);
// let _ = update_doc(self.pg_pool.get_ref(), params).await?;
Ok(())
}
}
fn mk_push_rev_ws_message(doc_id: &str, revision: Revision) -> WsMessageAdaptor {
let bytes = revision.write_to_bytes().unwrap();
let data = WsDocumentData {
id: doc_id.to_string(),
ty: WsDataType::PushRev,
data: bytes,
};
mk_ws_message(data)
}
fn mk_pull_rev_ws_message(doc_id: &str, from_rev_id: i64, to_rev_id: i64) -> WsMessageAdaptor {
let range = RevisionRange {
doc_id: doc_id.to_string(),
from_rev_id,
to_rev_id,
..Default::default()
};
let bytes = range.write_to_bytes().unwrap();
let data = WsDocumentData {
id: doc_id.to_string(),
ty: WsDataType::PullRev,
data: bytes,
};
mk_ws_message(data)
}
fn mk_acked_ws_message(revision: &Revision) -> WsMessageAdaptor {
let mut wtr = vec![];
let _ = wtr.write_i64::<BigEndian>(revision.rev_id);
let data = WsDocumentData {
id: revision.doc_id.clone(),
ty: WsDataType::Acked,
data: wtr,
};
mk_ws_message(data)
}
fn mk_ws_message<T: Into<WsMessage>>(data: T) -> WsMessageAdaptor {
let msg: WsMessage = data.into();
let bytes: Bytes = msg.try_into().unwrap();
WsMessageAdaptor(bytes)
}

View File

@ -1,11 +1,13 @@
use crate::service::{
doc::edit::EditDocContext,
doc::edit::ServerEditDoc,
ws::{entities::Socket, WsUser},
};
use actix_web::web::Data;
use async_stream::stream;
use flowy_document::protobuf::Revision;
use flowy_net::errors::{internal_error, Result as DocResult};
use flowy_document::protobuf::{Doc, Revision};
use flowy_net::errors::{internal_error, Result as DocResult, ServerError};
use futures::stream::StreamExt;
use sqlx::PgPool;
use std::sync::Arc;
use tokio::{
sync::{mpsc, oneshot},
@ -37,15 +39,18 @@ pub enum EditMsg {
pub struct EditDocActor {
receiver: Option<mpsc::Receiver<EditMsg>>,
edit_context: Arc<EditDocContext>,
edit_doc: Arc<ServerEditDoc>,
pg_pool: Data<PgPool>,
}
impl EditDocActor {
pub fn new(receiver: mpsc::Receiver<EditMsg>, edit_context: Arc<EditDocContext>) -> Self {
Self {
pub fn new(receiver: mpsc::Receiver<EditMsg>, doc: Doc, pg_pool: Data<PgPool>) -> Result<Self, ServerError> {
let edit_doc = Arc::new(ServerEditDoc::new(doc)?);
Ok(Self {
receiver: Some(receiver),
edit_context,
}
edit_doc,
pg_pool,
})
}
pub async fn run(mut self) {
@ -78,10 +83,10 @@ impl EditDocActor {
user: user.clone(),
socket: socket.clone(),
};
let _ = ret.send(self.edit_context.apply_revision(user, revision).await);
let _ = ret.send(self.edit_doc.apply_revision(user, revision, self.pg_pool.clone()).await);
},
EditMsg::DocumentJson { ret } => {
let edit_context = self.edit_context.clone();
let edit_context = self.edit_doc.clone();
let json = spawn_blocking(move || edit_context.document_json())
.await
.map_err(internal_error);

View File

@ -1,56 +1,206 @@
use crate::service::{
doc::edit::{
actor::{EditDocActor, EditMsg},
EditDocContext,
},
ws::{entities::Socket, WsUser},
doc::{edit::edit_actor::EditUser, update_doc},
util::md5,
ws::WsMessageAdaptor,
};
use flowy_document::protobuf::{Doc, Revision};
use flowy_net::errors::{internal_error, Result as DocResult, ServerError};
use std::sync::Arc;
use tokio::sync::{mpsc, oneshot};
pub struct EditDoc {
sender: mpsc::Sender<EditMsg>,
use actix_web::web::Data;
use byteorder::{BigEndian, WriteBytesExt};
use bytes::Bytes;
use dashmap::DashMap;
use flowy_document::{
entities::ws::{WsDataType, WsDocumentData},
protobuf::{Doc, RevType, Revision, RevisionRange, UpdateDocParams},
services::doc::Document,
};
use flowy_net::errors::{internal_error, ServerError};
use flowy_ot::{
core::{Delta, OperationTransformable},
errors::OTError,
};
use flowy_ws::WsMessage;
use parking_lot::RwLock;
use protobuf::Message;
use sqlx::PgPool;
use std::{
convert::TryInto,
sync::{
atomic::{AtomicI64, Ordering::SeqCst},
Arc,
},
time::Duration,
};
pub struct ServerEditDoc {
doc_id: String,
rev_id: AtomicI64,
document: Arc<RwLock<Document>>,
users: DashMap<String, EditUser>,
}
impl EditDoc {
impl ServerEditDoc {
pub fn new(doc: Doc) -> Result<Self, ServerError> {
let (sender, receiver) = mpsc::channel(100);
let edit_context = Arc::new(EditDocContext::new(doc)?);
let actor = EditDocActor::new(receiver, edit_context);
tokio::task::spawn(actor.run());
Ok(Self { sender })
let delta = Delta::from_bytes(&doc.data).map_err(internal_error)?;
let document = Arc::new(RwLock::new(Document::from_delta(delta)));
let users = DashMap::new();
Ok(Self {
doc_id: doc.id.clone(),
rev_id: AtomicI64::new(doc.rev_id),
document,
users,
})
}
#[tracing::instrument(level = "debug", skip(self, user, socket, revision))]
pub fn document_json(&self) -> String { self.document.read().to_json() }
pub async fn apply_revision(
&self,
user: Arc<WsUser>,
socket: Socket,
user: EditUser,
revision: Revision,
pg_pool: Data<PgPool>,
) -> Result<(), ServerError> {
let (ret, rx) = oneshot::channel();
let msg = EditMsg::Revision {
user,
socket,
revision,
ret,
// Opti: find out another way to keep the user socket available.
self.users.insert(user.id(), user.clone());
log::debug!(
"cur_base_rev_id: {}, expect_base_rev_id: {} rev_id: {}",
self.rev_id.load(SeqCst),
revision.base_rev_id,
revision.rev_id
);
let cur_rev_id = self.rev_id.load(SeqCst);
if cur_rev_id > revision.rev_id {
// The client document is outdated. Transform the client revision delta and then
// send the prime delta to the client. Client should compose the this prime
// delta.
let (cli_prime, server_prime) = self.transform(&revision.delta_data).map_err(internal_error)?;
let _ = self.update_document_delta(server_prime)?;
log::debug!("{} client delta: {}", self.doc_id, cli_prime.to_json());
let cli_revision = self.mk_revision(revision.rev_id, cli_prime);
let ws_cli_revision = mk_push_rev_ws_message(&self.doc_id, cli_revision);
user.socket.do_send(ws_cli_revision).map_err(internal_error)?;
Ok(())
} else if cur_rev_id < revision.rev_id {
if cur_rev_id != revision.base_rev_id {
// The server document is outdated, try to get the missing revision from the
// client.
user.socket
.do_send(mk_pull_rev_ws_message(&self.doc_id, cur_rev_id, revision.rev_id))
.map_err(internal_error)?;
} else {
let delta = Delta::from_bytes(&revision.delta_data).map_err(internal_error)?;
let _ = self.update_document_delta(delta)?;
user.socket
.do_send(mk_acked_ws_message(&revision))
.map_err(internal_error)?;
self.rev_id.fetch_update(SeqCst, SeqCst, |_e| Some(revision.rev_id));
let _ = self.save_revision(&revision, pg_pool).await?;
}
Ok(())
} else {
log::error!("Client rev_id should not equal to server rev_id");
Ok(())
}
}
fn mk_revision(&self, base_rev_id: i64, delta: Delta) -> Revision {
let delta_data = delta.to_bytes().to_vec();
let md5 = md5(&delta_data);
let revision = Revision {
base_rev_id,
rev_id: self.rev_id.load(SeqCst),
delta_data,
md5,
doc_id: self.doc_id.to_string(),
ty: RevType::Remote,
..Default::default()
};
let _ = self.send(msg, rx).await?;
revision
}
#[tracing::instrument(level = "debug", skip(self, delta_data))]
fn transform(&self, delta_data: &Vec<u8>) -> Result<(Delta, Delta), OTError> {
log::debug!("Document: {}", self.document.read().to_json());
let doc_delta = self.document.read().delta().clone();
let cli_delta = Delta::from_bytes(delta_data)?;
log::debug!("Compose delta: {}", cli_delta);
let (cli_prime, server_prime) = doc_delta.transform(&cli_delta)?;
Ok((cli_prime, server_prime))
}
#[tracing::instrument(level = "debug", skip(self), err)]
fn update_document_delta(&self, delta: Delta) -> Result<(), ServerError> {
// Opti: push each revision into queue and process it one by one.
match self.document.try_write_for(Duration::from_millis(300)) {
None => {
log::error!("Failed to acquire write lock of document");
},
Some(mut write_guard) => {
let _ = write_guard.compose_delta(&delta).map_err(internal_error)?;
log::debug!("Document: {}", write_guard.to_json());
},
}
Ok(())
}
pub async fn document_json(&self) -> DocResult<String> {
let (ret, rx) = oneshot::channel();
let msg = EditMsg::DocumentJson { ret };
self.send(msg, rx).await?
}
async fn send<T>(&self, msg: EditMsg, rx: oneshot::Receiver<T>) -> DocResult<T> {
let _ = self.sender.send(msg).await.map_err(internal_error)?;
let result = rx.await?;
Ok(result)
#[tracing::instrument(level = "debug", skip(self, pg_pool), err)]
async fn save_revision(&self, revision: &Revision, pg_pool: Data<PgPool>) -> Result<(), ServerError> {
// Opti: save with multiple revisions
let mut params = UpdateDocParams::new();
params.set_doc_id(self.doc_id.clone());
params.set_data(self.document.read().to_json());
params.set_rev_id(revision.rev_id);
let _ = update_doc(pg_pool.get_ref(), params).await?;
Ok(())
}
}
fn mk_push_rev_ws_message(doc_id: &str, revision: Revision) -> WsMessageAdaptor {
let bytes = revision.write_to_bytes().unwrap();
let data = WsDocumentData {
id: doc_id.to_string(),
ty: WsDataType::PushRev,
data: bytes,
};
mk_ws_message(data)
}
fn mk_pull_rev_ws_message(doc_id: &str, from_rev_id: i64, to_rev_id: i64) -> WsMessageAdaptor {
let range = RevisionRange {
doc_id: doc_id.to_string(),
from_rev_id,
to_rev_id,
..Default::default()
};
let bytes = range.write_to_bytes().unwrap();
let data = WsDocumentData {
id: doc_id.to_string(),
ty: WsDataType::PullRev,
data: bytes,
};
mk_ws_message(data)
}
fn mk_acked_ws_message(revision: &Revision) -> WsMessageAdaptor {
let mut wtr = vec![];
let _ = wtr.write_i64::<BigEndian>(revision.rev_id);
let data = WsDocumentData {
id: revision.doc_id.clone(),
ty: WsDataType::Acked,
data: wtr,
};
mk_ws_message(data)
}
fn mk_ws_message<T: Into<WsMessage>>(data: T) -> WsMessageAdaptor {
let msg: WsMessage = data.into();
let bytes: Bytes = msg.try_into().unwrap();
WsMessageAdaptor(bytes)
}

View File

@ -1,6 +1,6 @@
mod actor;
mod context;
mod edit_actor;
mod edit_doc;
mod open_handle;
pub use context::*;
pub use edit_doc::*;
pub use open_handle::*;

View File

@ -0,0 +1,57 @@
use crate::service::{
doc::edit::{
edit_actor::{EditDocActor, EditMsg},
ServerEditDoc,
},
ws::{entities::Socket, WsUser},
};
use actix_web::web::Data;
use flowy_document::protobuf::{Doc, Revision};
use flowy_net::errors::{internal_error, Result as DocResult, ServerError};
use sqlx::PgPool;
use std::sync::Arc;
use tokio::sync::{mpsc, oneshot};
pub struct DocHandle {
sender: mpsc::Sender<EditMsg>,
}
impl DocHandle {
pub fn new(doc: Doc, pg_pool: Data<PgPool>) -> Result<Self, ServerError> {
let (sender, receiver) = mpsc::channel(100);
let actor = EditDocActor::new(receiver, doc, pg_pool)?;
tokio::task::spawn(actor.run());
Ok(Self { sender })
}
#[tracing::instrument(level = "debug", skip(self, user, socket, revision))]
pub async fn apply_revision(
&self,
user: Arc<WsUser>,
socket: Socket,
revision: Revision,
) -> Result<(), ServerError> {
let (ret, rx) = oneshot::channel();
let msg = EditMsg::Revision {
user,
socket,
revision,
ret,
};
let _ = self.send(msg, rx).await?;
Ok(())
}
pub async fn document_json(&self) -> DocResult<String> {
let (ret, rx) = oneshot::channel();
let msg = EditMsg::DocumentJson { ret };
self.send(msg, rx).await?
}
async fn send<T>(&self, msg: EditMsg, rx: oneshot::Receiver<T>) -> DocResult<T> {
let _ = self.sender.send(msg).await.map_err(internal_error)?;
let result = rx.await?;
Ok(result)
}
}

View File

@ -1,8 +1,8 @@
pub(crate) use crud::*;
pub use router::*;
mod actor;
pub mod crud;
pub mod doc;
mod edit;
pub mod router;
mod ws_actor;

View File

@ -29,6 +29,7 @@ pub async fn create_handler(payload: Payload, pool: Data<PgPool>) -> Result<Http
Ok(FlowyResponse::success().into())
}
#[tracing::instrument(level = "debug", skip(payload, pool), err)]
pub async fn read_handler(payload: Payload, pool: Data<PgPool>) -> Result<HttpResponse, ServerError> {
let params: QueryDocParams = parse_from_payload(payload).await?;
let doc = read_doc(pool.get_ref(), params).await?;

View File

@ -21,12 +21,12 @@ pub enum DocWsMsg {
},
}
pub struct DocWsMsgActor {
pub struct DocWsActor {
receiver: Option<mpsc::Receiver<DocWsMsg>>,
doc_manager: Arc<DocManager>,
}
impl DocWsMsgActor {
impl DocWsActor {
pub fn new(receiver: mpsc::Receiver<DocWsMsg>, manager: Arc<DocManager>) -> Self {
Self {
receiver: Some(receiver),

View File

@ -6,7 +6,7 @@ use sqlx::PgPool;
use tokio::time::{sleep, Duration};
use backend::service::doc::doc::DocManager;
use flowy_document::{entities::doc::QueryDocParams, services::doc::edit::EditDocContext as ClientEditDocContext};
use flowy_document::{entities::doc::QueryDocParams, services::doc::edit::ClientEditDoc as ClientEditDocContext};
use flowy_net::config::ServerConfig;
use flowy_test::{workspace::ViewTest, FlowyTest};
use flowy_user::services::user::UserSession;

View File

@ -1,5 +1,6 @@
use crate::services::util::md5;
use flowy_derive::{ProtoBuf, ProtoBuf_Enum};
use flowy_ot::core::Delta;
#[derive(Debug, ProtoBuf_Enum, Clone, Eq, PartialEq)]
pub enum RevType {
@ -11,7 +12,7 @@ impl std::default::Default for RevType {
fn default() -> Self { RevType::Local }
}
#[derive(Debug, Clone, Default, ProtoBuf)]
#[derive(Clone, Default, ProtoBuf)]
pub struct Revision {
#[pb(index = 1)]
pub base_rev_id: i64,
@ -32,6 +33,22 @@ pub struct Revision {
pub ty: RevType,
}
impl std::fmt::Debug for Revision {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
f.write_fmt(format_args!("doc_id {}, ", self.doc_id));
f.write_fmt(format_args!("rev_id {}, ", self.rev_id));
match Delta::from_bytes(&self.delta_data) {
Ok(delta) => {
f.write_fmt(format_args!("delta {:?}", delta.to_json()));
},
Err(e) => {
f.write_fmt(format_args!("delta {:?}", e));
},
}
Ok(())
}
}
impl Revision {
pub fn new(base_rev_id: i64, rev_id: i64, delta_data: Vec<u8>, doc_id: &str, ty: RevType) -> Revision {
let md5 = md5(&delta_data);

View File

@ -10,7 +10,7 @@ use crate::{
entities::doc::{CreateDocParams, Doc, DocDelta, QueryDocParams},
errors::DocError,
services::{
doc::{doc_controller::DocController, edit::EditDocContext},
doc::{doc_controller::DocController, edit::ClientEditDoc},
server::construct_doc_server,
ws::WsDocumentManager,
},
@ -51,7 +51,7 @@ impl FlowyDocument {
&self,
params: QueryDocParams,
pool: Arc<ConnectionPool>,
) -> Result<Arc<EditDocContext>, DocError> {
) -> Result<Arc<ClientEditDoc>, DocError> {
let edit_context = self.doc_ctrl.open(params, pool).await?;
Ok(edit_context)
}

View File

@ -4,25 +4,25 @@ use dashmap::DashMap;
use crate::{
errors::DocError,
services::doc::edit::{DocId, EditDocContext},
services::doc::edit::{ClientEditDoc, DocId},
};
pub(crate) struct DocCache {
inner: DashMap<DocId, Arc<EditDocContext>>,
inner: DashMap<DocId, Arc<ClientEditDoc>>,
}
impl DocCache {
pub(crate) fn new() -> Self { Self { inner: DashMap::new() } }
#[allow(dead_code)]
pub(crate) fn all_docs(&self) -> Vec<Arc<EditDocContext>> {
pub(crate) fn all_docs(&self) -> Vec<Arc<ClientEditDoc>> {
self.inner
.iter()
.map(|kv| kv.value().clone())
.collect::<Vec<Arc<EditDocContext>>>()
.collect::<Vec<Arc<ClientEditDoc>>>()
}
pub(crate) fn set(&self, doc: Arc<EditDocContext>) {
pub(crate) fn set(&self, doc: Arc<ClientEditDoc>) {
let doc_id = doc.doc_id.clone();
if self.inner.contains_key(&doc_id) {
log::warn!("Doc:{} already exists in cache", &doc_id);
@ -32,7 +32,7 @@ impl DocCache {
pub(crate) fn is_opened(&self, doc_id: &str) -> bool { self.inner.get(doc_id).is_some() }
pub(crate) fn get(&self, doc_id: &str) -> Result<Arc<EditDocContext>, DocError> {
pub(crate) fn get(&self, doc_id: &str) -> Result<Arc<ClientEditDoc>, DocError> {
if !self.is_opened(&doc_id) {
return Err(doc_not_found());
}

View File

@ -5,15 +5,24 @@ use parking_lot::RwLock;
use tokio::time::{interval, Duration};
use flowy_database::{ConnectionPool, SqliteConnection};
use flowy_infra::future::{wrap_future, FnFuture};
use flowy_infra::future::{wrap_future, FnFuture, ResultFuture};
use crate::{
entities::doc::{CreateDocParams, Doc, DocDelta, QueryDocParams},
errors::{internal_error, DocError},
errors::{internal_error, DocError, DocResult},
module::DocumentUser,
services::{cache::DocCache, doc::edit::EditDocContext, server::Server, ws::WsDocumentManager},
services::{
cache::DocCache,
doc::{
edit::ClientEditDoc,
revision::{DocRevision, RevisionServer},
},
server::Server,
ws::WsDocumentManager,
},
sql_tables::doc::{DocTable, DocTableSql},
};
use flowy_ot::core::Delta;
pub(crate) struct DocController {
server: Server,
@ -53,7 +62,7 @@ impl DocController {
&self,
params: QueryDocParams,
pool: Arc<ConnectionPool>,
) -> Result<Arc<EditDocContext>, DocError> {
) -> Result<Arc<ClientEditDoc>, DocError> {
if self.cache.is_opened(&params.doc_id) == false {
let edit_ctx = self.make_edit_context(&params.doc_id, pool.clone()).await?;
return Ok(edit_ctx);
@ -105,21 +114,24 @@ impl DocController {
Ok(())
}
async fn make_edit_context(
&self,
doc_id: &str,
pool: Arc<ConnectionPool>,
) -> Result<Arc<EditDocContext>, DocError> {
async fn make_edit_context(&self, doc_id: &str, pool: Arc<ConnectionPool>) -> Result<Arc<ClientEditDoc>, DocError> {
// Opti: require upgradable_read lock and then upgrade to write lock using
// RwLockUpgradableReadGuard::upgrade(xx) of ws
let doc = self.read_doc(doc_id, pool.clone()).await?;
// let doc = self.read_doc(doc_id, pool.clone()).await?;
let ws_sender = self.ws.read().sender();
let edit_ctx = Arc::new(EditDocContext::new(doc, pool, ws_sender).await?);
let token = self.user.token()?;
let server = Arc::new(RevisionServerImpl {
token,
server: self.server.clone(),
});
let edit_ctx = Arc::new(ClientEditDoc::new(doc_id, pool, ws_sender, server).await?);
self.ws.write().register_handler(doc_id, edit_ctx.clone());
self.cache.set(edit_ctx.clone());
Ok(edit_ctx)
}
#[allow(dead_code)]
#[tracing::instrument(level = "debug", skip(self, pool), err)]
async fn read_doc(&self, doc_id: &str, pool: Arc<ConnectionPool>) -> Result<Doc, DocError> {
match self.doc_sql.read_doc_table(doc_id, pool.clone()) {
@ -146,6 +158,34 @@ impl DocController {
}
}
struct RevisionServerImpl {
token: String,
server: Server,
}
impl RevisionServer for RevisionServerImpl {
fn fetch_document_from_remote(&self, doc_id: &str) -> ResultFuture<DocRevision, DocError> {
let params = QueryDocParams {
doc_id: doc_id.to_string(),
};
let server = self.server.clone();
let token = self.token.clone();
ResultFuture::new(async move {
match server.read_doc(&token, params).await? {
None => Err(DocError::not_found()),
Some(doc) => {
let delta = Delta::from_bytes(doc.data)?;
Ok(DocRevision {
rev_id: doc.rev_id,
delta,
})
},
}
})
}
}
#[allow(dead_code)]
fn event_loop(_cache: Arc<DocCache>) -> FnFuture<()> {
let mut i = interval(Duration::from_secs(3));

View File

@ -152,7 +152,7 @@ impl Document {
match self.history.undo() {
None => Err(DocError::undo().context("Undo stack is empty")),
Some(undo_delta) => {
let (new_delta, inverted_delta) = self.invert_change(&undo_delta)?;
let (new_delta, inverted_delta) = self.invert(&undo_delta)?;
let result = UndoResult::success(new_delta.target_len as usize);
self.set_delta(new_delta);
self.history.add_redo(inverted_delta);
@ -166,7 +166,7 @@ impl Document {
match self.history.redo() {
None => Err(DocError::redo()),
Some(redo_delta) => {
let (new_delta, inverted_delta) = self.invert_change(&redo_delta)?;
let (new_delta, inverted_delta) = self.invert(&redo_delta)?;
let result = UndoResult::success(new_delta.target_len as usize);
self.set_delta(new_delta);
@ -178,13 +178,13 @@ impl Document {
}
impl Document {
fn invert_change(&self, change: &Delta) -> Result<(Delta, Delta), DocError> {
fn invert(&self, delta: &Delta) -> Result<(Delta, Delta), DocError> {
// c = a.compose(b)
// d = b.invert(a)
// a = c.compose(d)
log::trace!("👉invert change {}", change);
let new_delta = self.delta.compose(change)?;
let inverted_delta = change.invert(&self.delta);
log::trace!("Invert {}", delta);
let new_delta = self.delta.compose(delta)?;
let inverted_delta = delta.invert(&self.delta);
Ok((new_delta, inverted_delta))
}
}

View File

@ -7,9 +7,10 @@ use crate::{
services::{
doc::{
edit::{actor::DocumentEditActor, message::EditMsg},
rev_manager::RevisionManager,
revision::{RevisionManager, RevisionServer},
UndoResult,
},
server::Server,
util::bytes_to_rev_id,
ws::{WsDocumentHandler, WsDocumentSender},
},
@ -22,27 +23,28 @@ use tokio::sync::{mpsc, mpsc::UnboundedSender, oneshot};
pub type DocId = String;
pub struct EditDocContext {
pub struct ClientEditDoc {
pub doc_id: DocId,
rev_manager: Arc<RevisionManager>,
document: UnboundedSender<EditMsg>,
pool: Arc<ConnectionPool>,
}
impl EditDocContext {
impl ClientEditDoc {
pub(crate) async fn new(
doc: Doc,
doc_id: &str,
pool: Arc<ConnectionPool>,
ws_sender: Arc<dyn WsDocumentSender>,
) -> Result<Self, DocError> {
let delta = Delta::from_bytes(doc.data)?;
server: Arc<dyn RevisionServer>,
) -> DocResult<Self> {
let (rev_manager, delta) = RevisionManager::new(doc_id, pool.clone(), ws_sender, server).await?;
let rev_manager = Arc::new(rev_manager);
let (sender, receiver) = mpsc::unbounded_channel::<EditMsg>();
let edit_actor = DocumentEditActor::new(&doc.id, delta, pool.clone(), receiver);
let edit_actor = DocumentEditActor::new(doc_id, delta, pool.clone(), receiver);
tokio::spawn(edit_actor.run());
let rev_manager = Arc::new(RevisionManager::new(&doc.id, doc.rev_id, pool.clone(), ws_sender));
let edit_context = Self {
doc_id: doc.id,
doc_id: doc_id.to_string(),
rev_manager,
document: sender,
pool,
@ -166,7 +168,7 @@ impl EditDocContext {
}
}
impl WsDocumentHandler for EditDocContext {
impl WsDocumentHandler for ClientEditDoc {
fn receive(&self, doc_data: WsDocumentData) {
let document = self.document.clone();
let rev_manager = self.rev_manager.clone();

View File

@ -1,5 +1,5 @@
mod actor;
mod context;
mod edit_doc;
mod message;
pub use context::*;
pub use edit_doc::*;

View File

@ -9,4 +9,4 @@ mod view;
pub(crate) mod doc_controller;
pub mod edit;
pub mod extensions;
mod rev_manager;
mod revision;

View File

@ -1,5 +0,0 @@
mod rev_manager;
mod store;
mod util;
pub use rev_manager::*;

View File

@ -1,191 +0,0 @@
use crate::{
entities::doc::{Revision, RevisionRange},
errors::{internal_error, DocError, DocResult},
services::doc::rev_manager::util::RevisionOperation,
sql_tables::{OpTableSql, RevChangeset, RevState},
};
use async_stream::stream;
use dashmap::DashMap;
use flowy_database::ConnectionPool;
use futures::stream::StreamExt;
use std::{cell::RefCell, sync::Arc, time::Duration};
use tokio::{
sync::{mpsc, oneshot, RwLock},
task::JoinHandle,
};
pub enum StoreMsg {
Revision {
revision: Revision,
},
AckRevision {
rev_id: i64,
},
SendRevisions {
range: RevisionRange,
ret: oneshot::Sender<DocResult<Vec<Revision>>>,
},
}
pub struct Store {
doc_id: String,
op_sql: Arc<OpTableSql>,
pool: Arc<ConnectionPool>,
revs: Arc<DashMap<i64, RevisionOperation>>,
delay_save: RwLock<Option<JoinHandle<()>>>,
receiver: Option<mpsc::Receiver<StoreMsg>>,
}
impl Store {
pub fn new(doc_id: &str, pool: Arc<ConnectionPool>, receiver: mpsc::Receiver<StoreMsg>) -> Store {
let op_sql = Arc::new(OpTableSql {});
let revs = Arc::new(DashMap::new());
let doc_id = doc_id.to_owned();
Self {
doc_id,
op_sql,
pool,
revs,
delay_save: RwLock::new(None),
receiver: Some(receiver),
}
}
pub async fn run(mut self) {
let mut receiver = self.receiver.take().expect("Should only call once");
let stream = stream! {
loop {
match receiver.recv().await {
Some(msg) => yield msg,
None => break,
}
}
};
stream.for_each(|msg| self.handle_message(msg)).await;
}
async fn handle_message(&self, msg: StoreMsg) {
match msg {
StoreMsg::Revision { revision } => {
self.handle_new_revision(revision).await;
},
StoreMsg::AckRevision { rev_id } => {
self.handle_revision_acked(rev_id).await;
},
StoreMsg::SendRevisions { range: _, ret: _ } => {
unimplemented!()
},
}
}
async fn handle_new_revision(&self, revision: Revision) {
let mut operation = RevisionOperation::new(&revision);
let _receiver = operation.receiver();
self.revs.insert(revision.rev_id, operation);
self.save_revisions().await;
}
async fn handle_revision_acked(&self, rev_id: i64) {
match self.revs.get_mut(&rev_id) {
None => {},
Some(mut rev) => rev.value_mut().finish(),
}
self.save_revisions().await;
}
pub fn revs_in_range(&self, _range: RevisionRange) -> DocResult<Vec<Revision>> { unimplemented!() }
async fn save_revisions(&self) {
if let Some(handler) = self.delay_save.write().await.take() {
handler.abort();
}
if self.revs.is_empty() {
return;
}
let revs = self.revs.clone();
let pool = self.pool.clone();
let op_sql = self.op_sql.clone();
*self.delay_save.write().await = Some(tokio::spawn(async move {
tokio::time::sleep(Duration::from_millis(300)).await;
let ids = revs.iter().map(|kv| kv.key().clone()).collect::<Vec<i64>>();
let revisions = revs
.iter()
.map(|kv| ((*kv.value()).clone(), kv.state))
.collect::<Vec<(Revision, RevState)>>();
let conn = &*pool.get().map_err(internal_error).unwrap();
let result = conn.immediate_transaction::<_, DocError, _>(|| {
let _ = op_sql.create_rev_table(revisions, conn).unwrap();
Ok(())
});
match result {
Ok(_) => revs.retain(|k, _| !ids.contains(k)),
Err(e) => log::error!("Save revision failed: {:?}", e),
}
}));
}
// fn update_revisions(&self) {
// let rev_ids = self
// .revs
// .iter()
// .flat_map(|kv| match kv.state == RevState::Acked {
// true => None,
// false => Some(kv.key().clone()),
// })
// .collect::<Vec<i64>>();
//
// if rev_ids.is_empty() {
// return;
// }
//
// log::debug!("Try to update {:?} state", rev_ids);
// match self.update(&rev_ids) {
// Ok(_) => {
// self.revs.retain(|k, _| !rev_ids.contains(k));
// },
// Err(e) => log::error!("Save revision failed: {:?}", e),
// }
// }
//
// fn update(&self, rev_ids: &Vec<i64>) -> Result<(), DocError> {
// let conn = &*self.pool.get().map_err(internal_error).unwrap();
// let result = conn.immediate_transaction::<_, DocError, _>(|| {
// for rev_id in rev_ids {
// let changeset = RevChangeset {
// doc_id: self.doc_id.clone(),
// rev_id: rev_id.clone(),
// state: RevState::Acked,
// };
// let _ = self.op_sql.update_rev_table(changeset, conn)?;
// }
// Ok(())
// });
//
// result
// }
// fn delete_revision(&self, rev_id: i64) {
// let op_sql = self.op_sql.clone();
// let pool = self.pool.clone();
// let doc_id = self.doc_id.clone();
// tokio::spawn(async move {
// let conn = &*pool.get().map_err(internal_error).unwrap();
// let result = conn.immediate_transaction::<_, DocError, _>(|| {
// let _ = op_sql.delete_rev_table(&doc_id, rev_id, conn)?;
// Ok(())
// });
//
// match result {
// Ok(_) => {},
// Err(e) => log::error!("Delete revision failed: {:?}", e),
// }
// });
// }
}

View File

@ -2,56 +2,76 @@ use crate::{
entities::doc::{RevType, Revision, RevisionRange},
errors::DocError,
services::{
doc::rev_manager::store::{Store, StoreMsg},
doc::revision::store::{RevisionStore, StoreCmd},
util::RevIdCounter,
ws::WsDocumentSender,
},
};
use crate::{entities::doc::Doc, errors::DocResult, services::server::Server};
use flowy_database::ConnectionPool;
use flowy_infra::future::ResultFuture;
use flowy_ot::core::Delta;
use parking_lot::RwLock;
use std::{collections::VecDeque, sync::Arc};
use tokio::sync::{mpsc, oneshot};
use tokio::sync::{mpsc, oneshot, oneshot::error::RecvError};
pub struct DocRevision {
pub rev_id: i64,
pub delta: Delta,
}
pub trait RevisionServer: Send + Sync {
fn fetch_document_from_remote(&self, doc_id: &str) -> ResultFuture<DocRevision, DocError>;
}
pub struct RevisionManager {
doc_id: String,
rev_id_counter: RevIdCounter,
ws_sender: Arc<dyn WsDocumentSender>,
store_sender: mpsc::Sender<StoreMsg>,
ws: Arc<dyn WsDocumentSender>,
store: mpsc::Sender<StoreCmd>,
pending_revs: RwLock<VecDeque<Revision>>,
}
// tokio::time::timeout
impl RevisionManager {
pub fn new(doc_id: &str, rev_id: i64, pool: Arc<ConnectionPool>, ws_sender: Arc<dyn WsDocumentSender>) -> Self {
let (sender, receiver) = mpsc::channel::<StoreMsg>(50);
let store = Store::new(doc_id, pool, receiver);
pub async fn new(
doc_id: &str,
pool: Arc<ConnectionPool>,
ws_sender: Arc<dyn WsDocumentSender>,
server: Arc<dyn RevisionServer>,
) -> DocResult<(Self, Delta)> {
let (sender, receiver) = mpsc::channel::<StoreCmd>(50);
let store = RevisionStore::new(doc_id, pool, receiver, server);
tokio::spawn(store.run());
let DocRevision { rev_id, delta } = fetch_document(sender.clone()).await?;
let doc_id = doc_id.to_string();
let rev_id_counter = RevIdCounter::new(rev_id);
let pending_revs = RwLock::new(VecDeque::new());
Self {
let manager = Self {
doc_id,
rev_id_counter,
ws_sender,
ws: ws_sender,
pending_revs,
store_sender: sender,
}
store: sender,
};
Ok((manager, delta))
}
pub fn push_compose_revision(&self, revision: Revision) { self.pending_revs.write().push_front(revision); }
pub fn next_compose_revision(&self) -> Option<Revision> { self.pending_revs.write().pop_front() }
#[tracing::instrument(level = "debug", skip(self, revision))]
#[tracing::instrument(level = "debug", skip(self))]
pub async fn add_revision(&self, revision: Revision) -> Result<(), DocError> {
let msg = StoreMsg::Revision {
let cmd = StoreCmd::Revision {
revision: revision.clone(),
};
let _ = self.store_sender.send(msg).await;
let _ = self.store.send(cmd).await;
match revision.ty {
RevType::Local => match self.ws_sender.send(revision.into()) {
RevType::Local => match self.ws.send(revision.into()) {
Ok(_) => {},
Err(e) => log::error!("Send delta failed: {:?}", e),
},
@ -64,9 +84,9 @@ impl RevisionManager {
}
pub fn ack_rev(&self, rev_id: i64) -> Result<(), DocError> {
let sender = self.store_sender.clone();
let sender = self.store.clone();
tokio::spawn(async move {
let _ = sender.send(StoreMsg::AckRevision { rev_id }).await;
let _ = sender.send(StoreCmd::AckRevision { rev_id }).await;
});
Ok(())
}
@ -82,12 +102,25 @@ impl RevisionManager {
pub fn send_revisions(&self, range: RevisionRange) -> Result<(), DocError> {
debug_assert!(&range.doc_id == &self.doc_id);
let (ret, _rx) = oneshot::channel();
let sender = self.store_sender.clone();
let sender = self.store.clone();
tokio::spawn(async move {
let _ = sender.send(StoreMsg::SendRevisions { range, ret }).await;
let _ = sender.send(StoreCmd::SendRevisions { range, ret }).await;
});
unimplemented!()
}
}
async fn fetch_document(sender: mpsc::Sender<StoreCmd>) -> DocResult<DocRevision> {
let (ret, rx) = oneshot::channel();
let _ = sender.send(StoreCmd::DocumentDelta { ret }).await;
match rx.await {
Ok(result) => Ok(result?),
Err(e) => {
log::error!("fetch_document: {}", e);
Err(DocError::internal().context(format!("fetch_document: {}", e)))
},
}
}

View File

@ -0,0 +1,5 @@
mod manager;
mod store;
mod util;
pub use manager::*;

View File

@ -0,0 +1,301 @@
use crate::{
entities::doc::{Doc, Revision, RevisionRange},
errors::{internal_error, DocError, DocResult},
services::{
doc::revision::{util::RevisionOperation, DocRevision, RevisionServer},
server::Server,
},
sql_tables::{DocTableSql, RevChangeset, RevState, RevTableSql},
};
use async_stream::stream;
use dashmap::DashMap;
use flowy_database::{ConnectionPool, SqliteConnection};
use flowy_ot::{
core::{Attributes, Delta, OperationTransformable},
errors::OTError,
};
use futures::{stream::StreamExt, TryFutureExt};
use std::{cell::RefCell, sync::Arc, time::Duration};
use tokio::{
sync::{mpsc, oneshot, RwLock},
task::{spawn_blocking, JoinHandle},
};
pub enum StoreCmd {
Revision {
revision: Revision,
},
AckRevision {
rev_id: i64,
},
SendRevisions {
range: RevisionRange,
ret: oneshot::Sender<DocResult<Vec<Revision>>>,
},
DocumentDelta {
ret: oneshot::Sender<DocResult<DocRevision>>,
},
}
pub struct RevisionStore {
doc_id: String,
persistence: Arc<Persistence>,
revs: Arc<DashMap<i64, RevisionOperation>>,
delay_save: RwLock<Option<JoinHandle<()>>>,
receiver: Option<mpsc::Receiver<StoreCmd>>,
server: Arc<dyn RevisionServer>,
}
impl RevisionStore {
pub fn new(
doc_id: &str,
pool: Arc<ConnectionPool>,
receiver: mpsc::Receiver<StoreCmd>,
server: Arc<dyn RevisionServer>,
) -> RevisionStore {
let persistence = Arc::new(Persistence::new(pool));
let revs = Arc::new(DashMap::new());
let doc_id = doc_id.to_owned();
Self {
doc_id,
persistence,
revs,
delay_save: RwLock::new(None),
receiver: Some(receiver),
server,
}
}
pub async fn run(mut self) {
let mut receiver = self.receiver.take().expect("Should only call once");
let stream = stream! {
loop {
match receiver.recv().await {
Some(msg) => yield msg,
None => break,
}
}
};
stream.for_each(|msg| self.handle_message(msg)).await;
}
async fn handle_message(&self, cmd: StoreCmd) {
match cmd {
StoreCmd::Revision { revision } => {
self.handle_new_revision(revision).await;
},
StoreCmd::AckRevision { rev_id } => {
self.handle_revision_acked(rev_id).await;
},
StoreCmd::SendRevisions { range, ret } => {
let result = revs_in_range(&self.doc_id, self.persistence.clone(), range).await;
let _ = ret.send(result);
},
StoreCmd::DocumentDelta { ret } => {
let delta = fetch_document(&self.doc_id, self.server.clone(), self.persistence.clone()).await;
let _ = ret.send(delta);
},
}
}
async fn handle_new_revision(&self, revision: Revision) {
let mut operation = RevisionOperation::new(&revision);
let _receiver = operation.receiver();
self.revs.insert(revision.rev_id, operation);
self.save_revisions().await;
}
async fn handle_revision_acked(&self, rev_id: i64) {
match self.revs.get_mut(&rev_id) {
None => {},
Some(mut rev) => rev.value_mut().finish(),
}
self.save_revisions().await;
}
async fn save_revisions(&self) {
if let Some(handler) = self.delay_save.write().await.take() {
handler.abort();
}
if self.revs.is_empty() {
return;
}
let revs = self.revs.clone();
let persistence = self.persistence.clone();
*self.delay_save.write().await = Some(tokio::spawn(async move {
tokio::time::sleep(Duration::from_millis(300)).await;
let ids = revs.iter().map(|kv| kv.key().clone()).collect::<Vec<i64>>();
let revisions = revs
.iter()
.map(|kv| ((*kv.value()).clone(), kv.state))
.collect::<Vec<(Revision, RevState)>>();
// TODO: Ok to unwrap?
let conn = &*persistence.pool.get().map_err(internal_error).unwrap();
let result = conn.immediate_transaction::<_, DocError, _>(|| {
let _ = persistence.rev_sql.create_rev_table(revisions, conn).unwrap();
Ok(())
});
match result {
Ok(_) => revs.retain(|k, _| !ids.contains(k)),
Err(e) => log::error!("Save revision failed: {:?}", e),
}
}));
}
}
async fn fetch_document(
doc_id: &str,
server: Arc<dyn RevisionServer>,
persistence: Arc<Persistence>,
) -> DocResult<DocRevision> {
let fetch_from_remote = server.fetch_document_from_remote(doc_id).or_else(|result| {
log::error!(
"Fetch document delta from remote failed: {:?}, try to fetch from local",
result
);
fetch_from_local(doc_id, persistence.clone())
});
let fetch_from_local = fetch_from_local(doc_id, persistence.clone()).or_else(|result| async move {
log::error!(
"Fetch document delta from local failed: {:?}, try to fetch from remote",
result
);
server.fetch_document_from_remote(doc_id).await
});
tokio::select! {
result = fetch_from_remote => {
log::debug!("Finish fetching document from remote");
result
},
result = fetch_from_local => {
log::debug!("Finish fetching document from local");
result
},
}
}
async fn fetch_from_local(doc_id: &str, persistence: Arc<Persistence>) -> DocResult<DocRevision> {
let doc_id = doc_id.to_owned();
spawn_blocking(move || {
// tokio::time::timeout
let conn = &*persistence.pool.get().map_err(internal_error)?;
let revisions = persistence.rev_sql.read_rev_tables(&doc_id, None, conn)?;
if revisions.is_empty() {
return Err(DocError::not_found());
}
let rev_id = revisions.last().unwrap().rev_id;
let mut delta = Delta::new();
for revision in revisions {
match Delta::from_bytes(revision.delta_data) {
Ok(local_delta) => {
delta = delta.compose(&local_delta)?;
},
Err(e) => {
log::error!("Deserialize delta from revision failed: {}", e);
},
}
}
delta.insert("\n", Attributes::default());
Result::<DocRevision, DocError>::Ok(DocRevision { rev_id, delta })
})
.await
.map_err(internal_error)?
}
async fn revs_in_range(doc_id: &str, persistence: Arc<Persistence>, range: RevisionRange) -> DocResult<Vec<Revision>> {
let doc_id = doc_id.to_owned();
let result = spawn_blocking(move || {
let conn = &*persistence.pool.get().map_err(internal_error)?;
let revisions = persistence.rev_sql.read_rev_tables_with_range(&doc_id, range, conn)?;
Ok(revisions)
})
.await
.map_err(internal_error)?;
result
}
struct Persistence {
rev_sql: Arc<RevTableSql>,
doc_sql: Arc<DocTableSql>,
pool: Arc<ConnectionPool>,
}
impl Persistence {
fn new(pool: Arc<ConnectionPool>) -> Self {
let rev_sql = Arc::new(RevTableSql {});
let doc_sql = Arc::new(DocTableSql {});
Self { rev_sql, doc_sql, pool }
}
}
// fn update_revisions(&self) {
// let rev_ids = self
// .revs
// .iter()
// .flat_map(|kv| match kv.state == RevState::Acked {
// true => None,
// false => Some(kv.key().clone()),
// })
// .collect::<Vec<i64>>();
//
// if rev_ids.is_empty() {
// return;
// }
//
// log::debug!("Try to update {:?} state", rev_ids);
// match self.update(&rev_ids) {
// Ok(_) => {
// self.revs.retain(|k, _| !rev_ids.contains(k));
// },
// Err(e) => log::error!("Save revision failed: {:?}", e),
// }
// }
//
// fn update(&self, rev_ids: &Vec<i64>) -> Result<(), DocError> {
// let conn = &*self.pool.get().map_err(internal_error).unwrap();
// let result = conn.immediate_transaction::<_, DocError, _>(|| {
// for rev_id in rev_ids {
// let changeset = RevChangeset {
// doc_id: self.doc_id.clone(),
// rev_id: rev_id.clone(),
// state: RevState::Acked,
// };
// let _ = self.op_sql.update_rev_table(changeset, conn)?;
// }
// Ok(())
// });
//
// result
// }
// fn delete_revision(&self, rev_id: i64) {
// let op_sql = self.op_sql.clone();
// let pool = self.pool.clone();
// let doc_id = self.doc_id.clone();
// tokio::spawn(async move {
// let conn = &*pool.get().map_err(internal_error).unwrap();
// let result = conn.immediate_transaction::<_, DocError, _>(|| {
// let _ = op_sql.delete_rev_table(&doc_id, rev_id, conn)?;
// Ok(())
// });
//
// match result {
// Ok(_) => {},
// Err(e) => log::error!("Delete revision failed: {:?}", e),
// }
// });
// }

View File

@ -1,9 +1,9 @@
mod doc_op_sql;
mod doc_op_table;
mod doc_sql;
mod doc_table;
mod rev_sql;
mod rev_table;
pub(crate) use doc_op_sql::*;
pub(crate) use doc_op_table::*;
pub(crate) use doc_sql::*;
pub(crate) use doc_table::*;
pub(crate) use rev_sql::*;
pub(crate) use rev_table::*;

View File

@ -1,5 +1,5 @@
use crate::{
entities::doc::Revision,
entities::doc::{Revision, RevisionRange},
errors::DocError,
sql_tables::{doc::RevTable, RevChangeset, RevState, RevTableType},
};
@ -11,9 +11,9 @@ use flowy_database::{
SqliteConnection,
};
pub struct OpTableSql {}
pub struct RevTableSql {}
impl OpTableSql {
impl RevTableSql {
pub(crate) fn create_rev_table(
&self,
revisions: Vec<(Revision, RevState)>,
@ -49,16 +49,22 @@ impl OpTableSql {
Ok(())
}
pub(crate) fn read_rev_table(
pub(crate) fn read_rev_tables(
&self,
doc_id_s: &str,
rev_id_s: i64,
rev_id_s: Option<i64>,
conn: &SqliteConnection,
) -> Result<Vec<Revision>, DocError> {
let rev_tables: Vec<RevTable> = dsl::rev_table
.filter(rev_id.eq(rev_id_s))
let mut filter = dsl::rev_table
.filter(doc_id.eq(doc_id_s))
.load::<RevTable>(conn)?;
.order(rev_id.asc())
.into_boxed();
if let Some(rev_id_s) = rev_id_s {
filter = filter.filter(rev_id.eq(rev_id_s))
}
let rev_tables = filter.load::<RevTable>(conn)?;
let revisions = rev_tables
.into_iter()
@ -67,16 +73,15 @@ impl OpTableSql {
Ok(revisions)
}
pub(crate) fn read_revs_table(
pub(crate) fn read_rev_tables_with_range(
&self,
doc_id_s: &str,
from_rev_id: i64,
to_rev_id: i64,
range: RevisionRange,
conn: &SqliteConnection,
) -> Result<Vec<Revision>, DocError> {
let rev_tables = dsl::rev_table
.filter(rev_id.ge(from_rev_id))
.filter(rev_id.lt(to_rev_id))
.filter(rev_id.ge(range.from_rev_id))
.filter(rev_id.lt(range.to_rev_id))
.filter(doc_id.eq(doc_id_s))
.load::<RevTable>(conn)?;

View File

@ -2,13 +2,13 @@ use crate::{config::HEADER_TOKEN, errors::ServerError, response::FlowyResponse};
use bytes::Bytes;
use hyper::http;
use protobuf::ProtobufError;
use reqwest::{header::HeaderMap, Client, Method, Response};
use reqwest::{header::HeaderMap, Client, Error, Method, Response};
use std::{
convert::{TryFrom, TryInto},
sync::Arc,
time::Duration,
};
use tokio::sync::oneshot;
use tokio::sync::{oneshot, oneshot::error::RecvError};
pub trait ResponseMiddleware {
fn receive_response(&self, token: &Option<String>, response: &FlowyResponse);
@ -144,15 +144,11 @@ impl HttpRequestBuilder {
}
let response = builder.send().await;
match tx.send(response) {
Ok(_) => {},
Err(e) => {
log::error!("[{}] Send http request failed: {:?}", method, e);
},
}
let _ = tx.send(response);
});
let response = rx.await??;
log::trace!("Http Response: {:?}", response);
let flowy_response = flowy_response_from(response).await?;
let token = self.token();
self.middleware.iter().for_each(|middleware| {