feat: adding suffix for user data folder when current cloud type is appflowy cloud (#3918)

* fix: load database fail caused by spawning long run task

* chore: yield long run task

* chore: fmt

* chore: update client api

* feat: copy data between server

* ci: fix af cloud test
This commit is contained in:
Nathan.fooo
2023-11-12 18:00:07 +08:00
committed by GitHub
parent 3c7e636b65
commit 7eb20b232a
51 changed files with 559 additions and 386 deletions

View File

@ -71,7 +71,7 @@ impl std::convert::From<&RowDetail> for RowMetaPB {
document_id: row_detail.document_id.clone(),
icon: row_detail.meta.icon_url.clone(),
cover: row_detail.meta.cover_url.clone(),
is_document_empty: row_detail.meta.is_document_empty.clone(),
is_document_empty: row_detail.meta.is_document_empty,
}
}
}

View File

@ -1275,14 +1275,42 @@ impl DatabaseViewOperation for DatabaseViewOperationImpl {
}
fn get_rows(&self, view_id: &str) -> Fut<Vec<Arc<RowDetail>>> {
let database = self.database.lock();
let rows = database.get_rows_for_view(view_id);
let row_details = rows
.into_iter()
.flat_map(|row| database.get_row_detail(&row.id))
.collect::<Vec<RowDetail>>();
let database = self.database.clone();
let view_id = view_id.to_string();
to_fut(async move {
let cloned_database = database.clone();
// offloads the blocking operation to a thread where blocking is acceptable. This prevents
// blocking the main asynchronous runtime
let row_orders = tokio::task::spawn_blocking(move || {
cloned_database.lock().get_row_orders_for_view(&view_id)
})
.await
.unwrap_or_default();
tokio::task::yield_now().await;
to_fut(async move { row_details.into_iter().map(Arc::new).collect() })
let mut all_rows = vec![];
// Loading the rows in chunks of 10 rows in order to prevent blocking the main asynchronous runtime
for chunk in row_orders.chunks(10) {
let cloned_database = database.clone();
let chunk = chunk.to_vec();
let rows = tokio::task::spawn_blocking(move || {
let orders = cloned_database.lock().get_rows_from_row_orders(&chunk);
let lock_guard = cloned_database.lock();
orders
.into_iter()
.flat_map(|row| lock_guard.get_row_detail(&row.id))
.collect::<Vec<RowDetail>>()
})
.await
.unwrap_or_default();
all_rows.extend(rows);
tokio::task::yield_now().await;
}
all_rows.into_iter().map(Arc::new).collect()
})
}
fn get_cells_for_field(&self, view_id: &str, field_id: &str) -> Fut<Vec<Arc<RowCell>>> {

View File

@ -7,6 +7,7 @@ use collab_database::fields::{Field, TypeOptionData};
use collab_database::rows::{Cells, Row, RowDetail, RowId};
use collab_database::views::{DatabaseLayout, DatabaseView};
use tokio::sync::{broadcast, RwLock};
use tracing::instrument;
use flowy_error::{FlowyError, FlowyResult};
use lib_dispatch::prelude::af_spawn;
@ -256,6 +257,7 @@ impl DatabaseViewEditor {
.await
}
#[instrument(level = "info", skip(self))]
pub async fn v_get_rows(&self) -> Vec<Arc<RowDetail>> {
let mut rows = self.delegate.get_rows(&self.view_id).await;
self.v_filter_rows(&mut rows).await;

View File

@ -7,7 +7,6 @@ use collab_database::views::{
use strum::IntoEnumIterator;
use crate::entities::FieldVisibility;
use crate::services::field_settings::{FieldSettings, VISIBILITY};
/// Helper struct to create a new field setting
@ -52,7 +51,7 @@ pub fn default_field_visibility(layout_type: DatabaseLayout) -> FieldVisibility
}
pub fn default_field_settings_for_fields(
fields: &Vec<Field>,
fields: &[Field],
layout_type: DatabaseLayout,
) -> FieldSettingsByFieldIdMap {
fields

View File

@ -462,14 +462,20 @@ fn merge_groups(
) -> MergeGroupResult {
let mut merge_result = MergeGroupResult::new();
// group_map is a helper map is used to filter out the new groups.
let mut new_group_map: IndexMap<String, Group> = IndexMap::new();
new_groups.into_iter().for_each(|group_rev| {
new_group_map.insert(group_rev.id.clone(), group_rev);
});
let mut new_group_map: IndexMap<String, Group> = new_groups
.into_iter()
.map(|group| (group.id.clone(), group))
.collect();
// The group is ordered in old groups. Add them before adding the new groups
for old in old_groups {
if let Some(new) = new_group_map.remove(&old.id) {
if let Some(index) = new_group_map.get_index_of(&old.id) {
let right = new_group_map.split_off(index);
merge_result.all_groups.extend(new_group_map.into_values());
new_group_map = right;
}
if let Some(new) = new_group_map.shift_remove(&old.id) {
merge_result.all_groups.push(new.clone());
} else {
merge_result.deleted_groups.push(old);