feat: Import appflowy data (#4236)

* refactor: traits

* feat: import data

* chore: track database view

* fix: import

* refactor: collab doc state

* refactor: get collab doc state

* feat: batch create collab object

* fix: test

* ci: run docker compose if the server is not up

* chore: bump collab

* chore: update ci

* chore: update ci

* chore: update ci

* chore: implement ui

* chore: implement ui

* chore: implement ui
This commit is contained in:
Nathan.fooo 2023-12-29 13:02:27 +08:00 committed by GitHub
parent c821b8c4fe
commit 69469e9989
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
100 changed files with 2728 additions and 886 deletions

View File

@ -253,6 +253,7 @@ jobs:
- name: Run Docker-Compose
working-directory: AppFlowy-Cloud
run: |
#docker compose down -v --remove-orphans
docker compose up -d
- name: Checkout source code

View File

@ -90,7 +90,6 @@ jobs:
- name: Run Docker-Compose
working-directory: AppFlowy-Cloud
run: |
docker compose down -v --remove-orphans
docker compose up -d
- name: Run rust-lib tests

View File

@ -0,0 +1,58 @@
import 'package:appflowy_backend/dispatch/dispatch.dart';
import 'package:appflowy_backend/log.dart';
import 'package:appflowy_backend/protobuf/flowy-error/errors.pb.dart';
import 'package:appflowy_backend/protobuf/flowy-folder2/import.pb.dart';
import 'package:dartz/dartz.dart';
import 'package:flutter_bloc/flutter_bloc.dart';
import 'package:freezed_annotation/freezed_annotation.dart';
part 'setting_file_importer_bloc.freezed.dart';
class SettingFileImporterBloc
extends Bloc<SettingFileImportEvent, SettingFileImportState> {
SettingFileImporterBloc() : super(SettingFileImportState.initial()) {
on<SettingFileImportEvent>((event, emit) async {
await event.when(
importAppFlowyDataFolder: (String path) async {
final payload = ImportAppFlowyDataPB.create()..path = path;
final result =
await FolderEventImportAppFlowyDataFolder(payload).send();
result.fold(
(l) {
emit(
state.copyWith(
successOrFail: some(left(unit)),
),
);
},
(err) {
Log.error(err);
emit(
state.copyWith(
successOrFail: some(right(err)),
),
);
},
);
},
);
});
}
}
@freezed
class SettingFileImportEvent with _$SettingFileImportEvent {
const factory SettingFileImportEvent.importAppFlowyDataFolder(String path) =
_ImportAppFlowyDataFolder;
}
@freezed
class SettingFileImportState with _$SettingFileImportState {
const factory SettingFileImportState({
required Option<Either<Unit, FlowyError>> successOrFail,
}) = _SettingFileImportState;
factory SettingFileImportState.initial() => SettingFileImportState(
successOrFail: none(),
);
}

View File

@ -0,0 +1,110 @@
import 'package:appflowy/generated/locale_keys.g.dart';
import 'package:appflowy/startup/startup.dart';
import 'package:appflowy/workspace/application/settings/setting_file_importer_bloc.dart';
import 'package:appflowy/workspace/presentation/home/toast.dart';
import 'package:easy_localization/easy_localization.dart';
import 'package:flowy_infra/file_picker/file_picker_service.dart';
import 'package:flowy_infra_ui/flowy_infra_ui.dart';
import 'package:flutter/material.dart';
import 'package:flutter_bloc/flutter_bloc.dart';
import 'package:fluttertoast/fluttertoast.dart';
class ImportAppFlowyData extends StatefulWidget {
const ImportAppFlowyData({super.key});
@override
State<ImportAppFlowyData> createState() => _ImportAppFlowyDataState();
}
class _ImportAppFlowyDataState extends State<ImportAppFlowyData> {
final _fToast = FToast();
@override
void initState() {
super.initState();
_fToast.init(context);
}
@override
Widget build(BuildContext context) {
return BlocProvider(
create: (context) => SettingFileImporterBloc(),
child: BlocListener<SettingFileImporterBloc, SettingFileImportState>(
listener: (context, state) {
state.successOrFail.fold(
() {},
(either) {
either.fold(
(unit) {
_showToast(LocaleKeys.settings_menu_importSuccess.tr());
},
(err) {
_showToast(LocaleKeys.settings_menu_importFailed.tr());
},
);
},
);
},
child: BlocBuilder<SettingFileImporterBloc, SettingFileImportState>(
builder: (context, state) {
return Column(
children: [
const ImportAppFlowyDataButton(),
const VSpace(6),
IntrinsicHeight(
child: Opacity(
opacity: 0.6,
child: FlowyText.medium(
LocaleKeys.settings_menu_importAppFlowyDataDescription
.tr(),
maxLines: 13,
),
),
),
],
);
},
),
),
);
}
void _showToast(String message) {
_fToast.showToast(
child: FlowyMessageToast(message: message),
gravity: ToastGravity.CENTER,
);
}
}
class ImportAppFlowyDataButton extends StatefulWidget {
const ImportAppFlowyDataButton({super.key});
@override
State<ImportAppFlowyDataButton> createState() =>
_ImportAppFlowyDataButtonState();
}
class _ImportAppFlowyDataButtonState extends State<ImportAppFlowyDataButton> {
@override
Widget build(BuildContext context) {
return SizedBox(
height: 40,
child: FlowyButton(
text: FlowyText(LocaleKeys.settings_menu_importAppFlowyData.tr()),
onTap: () async {
final path = await getIt<FilePickerService>().getDirectoryPath();
if (path == null) {
return;
}
if (!mounted) {
return;
}
context
.read<SettingFileImporterBloc>()
.add(SettingFileImportEvent.importAppFlowyDataFolder(path));
},
),
);
}
}

View File

@ -1,3 +1,4 @@
import 'package:appflowy/workspace/presentation/settings/widgets/setting_file_import_appflowy_data_view.dart';
import 'package:appflowy/workspace/presentation/settings/widgets/settings_export_file_widget.dart';
import 'package:appflowy/workspace/presentation/settings/widgets/settings_file_customize_location_view.dart';
import 'package:flutter/foundation.dart';
@ -17,6 +18,7 @@ class _SettingsFileSystemViewState extends State<SettingsFileSystemView> {
const SettingsFileLocationCustomizer(),
// disable export data for v0.2.0 in release mode.
if (kDebugMode) const SettingsExportFileWidget(),
const ImportAppFlowyData(),
];
@override

View File

@ -883,7 +883,7 @@ dependencies = [
[[package]]
name = "collab"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=c74b3c3fe500305cc114a06fce911be9269b61bd#c74b3c3fe500305cc114a06fce911be9269b61bd"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=bdc26b9a37399c9bc02e2309c54e31c664a9574d#bdc26b9a37399c9bc02e2309c54e31c664a9574d"
dependencies = [
"anyhow",
"async-trait",
@ -902,7 +902,7 @@ dependencies = [
[[package]]
name = "collab-database"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=c74b3c3fe500305cc114a06fce911be9269b61bd#c74b3c3fe500305cc114a06fce911be9269b61bd"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=bdc26b9a37399c9bc02e2309c54e31c664a9574d#bdc26b9a37399c9bc02e2309c54e31c664a9574d"
dependencies = [
"anyhow",
"async-trait",
@ -932,7 +932,7 @@ dependencies = [
[[package]]
name = "collab-derive"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=c74b3c3fe500305cc114a06fce911be9269b61bd#c74b3c3fe500305cc114a06fce911be9269b61bd"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=bdc26b9a37399c9bc02e2309c54e31c664a9574d#bdc26b9a37399c9bc02e2309c54e31c664a9574d"
dependencies = [
"proc-macro2",
"quote",
@ -944,7 +944,7 @@ dependencies = [
[[package]]
name = "collab-document"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=c74b3c3fe500305cc114a06fce911be9269b61bd#c74b3c3fe500305cc114a06fce911be9269b61bd"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=bdc26b9a37399c9bc02e2309c54e31c664a9574d#bdc26b9a37399c9bc02e2309c54e31c664a9574d"
dependencies = [
"anyhow",
"collab",
@ -963,7 +963,7 @@ dependencies = [
[[package]]
name = "collab-entity"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=c74b3c3fe500305cc114a06fce911be9269b61bd#c74b3c3fe500305cc114a06fce911be9269b61bd"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=bdc26b9a37399c9bc02e2309c54e31c664a9574d#bdc26b9a37399c9bc02e2309c54e31c664a9574d"
dependencies = [
"anyhow",
"bytes",
@ -977,7 +977,7 @@ dependencies = [
[[package]]
name = "collab-folder"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=c74b3c3fe500305cc114a06fce911be9269b61bd#c74b3c3fe500305cc114a06fce911be9269b61bd"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=bdc26b9a37399c9bc02e2309c54e31c664a9574d#bdc26b9a37399c9bc02e2309c54e31c664a9574d"
dependencies = [
"anyhow",
"chrono",
@ -1019,7 +1019,7 @@ dependencies = [
[[package]]
name = "collab-persistence"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=c74b3c3fe500305cc114a06fce911be9269b61bd#c74b3c3fe500305cc114a06fce911be9269b61bd"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=bdc26b9a37399c9bc02e2309c54e31c664a9574d#bdc26b9a37399c9bc02e2309c54e31c664a9574d"
dependencies = [
"anyhow",
"async-trait",
@ -1040,7 +1040,7 @@ dependencies = [
[[package]]
name = "collab-plugins"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=c74b3c3fe500305cc114a06fce911be9269b61bd#c74b3c3fe500305cc114a06fce911be9269b61bd"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=bdc26b9a37399c9bc02e2309c54e31c664a9574d#bdc26b9a37399c9bc02e2309c54e31c664a9574d"
dependencies = [
"anyhow",
"async-trait",
@ -1066,7 +1066,7 @@ dependencies = [
[[package]]
name = "collab-user"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=c74b3c3fe500305cc114a06fce911be9269b61bd#c74b3c3fe500305cc114a06fce911be9269b61bd"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=bdc26b9a37399c9bc02e2309c54e31c664a9574d#bdc26b9a37399c9bc02e2309c54e31c664a9574d"
dependencies = [
"anyhow",
"collab",
@ -2010,6 +2010,7 @@ name = "flowy-database-deps"
version = "0.1.0"
dependencies = [
"anyhow",
"collab",
"collab-entity",
"flowy-error",
"lib-infra",
@ -2097,6 +2098,7 @@ name = "flowy-document-deps"
version = "0.1.0"
dependencies = [
"anyhow",
"collab",
"collab-document",
"flowy-error",
"lib-infra",
@ -2183,6 +2185,8 @@ name = "flowy-folder-deps"
version = "0.1.0"
dependencies = [
"anyhow",
"collab",
"collab-entity",
"collab-folder",
"flowy-error",
"lib-infra",
@ -2217,6 +2221,7 @@ dependencies = [
"tracing",
"unicode-segmentation",
"uuid",
"validator",
]
[[package]]
@ -2276,6 +2281,7 @@ dependencies = [
"tracing",
"url",
"uuid",
"yrs",
]
[[package]]
@ -2384,13 +2390,16 @@ version = "0.1.0"
dependencies = [
"anyhow",
"chrono",
"collab",
"collab-entity",
"flowy-error",
"flowy-folder-deps",
"lib-infra",
"serde",
"serde_json",
"serde_repr",
"tokio",
"tokio-stream",
"uuid",
]
@ -3518,6 +3527,7 @@ dependencies = [
"rand 0.8.5",
"tempfile",
"tokio",
"validator",
"walkdir",
"zip",
]

View File

@ -67,14 +67,14 @@ client-api = { git = "https://github.com/AppFlowy-IO/AppFlowy-Cloud", rev = "a45
# To switch to the local path, run:
# scripts/tool/update_collab_source.sh
# ⚠️⚠️⚠️️
collab = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "c74b3c3fe500305cc114a06fce911be9269b61bd" }
collab-folder = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "c74b3c3fe500305cc114a06fce911be9269b61bd" }
collab-document = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "c74b3c3fe500305cc114a06fce911be9269b61bd" }
collab-database = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "c74b3c3fe500305cc114a06fce911be9269b61bd" }
collab-plugins = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "c74b3c3fe500305cc114a06fce911be9269b61bd" }
collab-user = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "c74b3c3fe500305cc114a06fce911be9269b61bd" }
collab-entity = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "c74b3c3fe500305cc114a06fce911be9269b61bd" }
collab-persistence = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "c74b3c3fe500305cc114a06fce911be9269b61bd" }
collab = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "bdc26b9a37399c9bc02e2309c54e31c664a9574d" }
collab-folder = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "bdc26b9a37399c9bc02e2309c54e31c664a9574d" }
collab-document = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "bdc26b9a37399c9bc02e2309c54e31c664a9574d" }
collab-database = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "bdc26b9a37399c9bc02e2309c54e31c664a9574d" }
collab-plugins = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "bdc26b9a37399c9bc02e2309c54e31c664a9574d" }
collab-user = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "bdc26b9a37399c9bc02e2309c54e31c664a9574d" }
collab-entity = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "bdc26b9a37399c9bc02e2309c54e31c664a9574d" }
collab-persistence = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "bdc26b9a37399c9bc02e2309c54e31c664a9574d" }

View File

@ -299,7 +299,11 @@
"historicalUserList": "User login history",
"historicalUserListTooltip": "This list displays your anonymous accounts. You can click on an account to view its details. Anonymous accounts are created by clicking the 'Get Started' button",
"openHistoricalUser": "Click to open the anonymous account",
"customPathPrompt": "Storing the AppFlowy data folder in a cloud-synced folder such as Google Drive can pose risks. If the database within this folder is accessed or modified from multiple locations at the same time, it may result in synchronization conflicts and potential data corruption"
"customPathPrompt": "Storing the AppFlowy data folder in a cloud-synced folder such as Google Drive can pose risks. If the database within this folder is accessed or modified from multiple locations at the same time, it may result in synchronization conflicts and potential data corruption",
"importAppFlowyData": "Import Data from External AppFlowy Folder",
"importAppFlowyDataDescription": "Copy data from an external AppFlowy data folder and import it into the current AppFlowy data folder",
"importSuccess": "Successfully imported the AppFlowy data folder",
"importFailed": "Importing the AppFlowy data folder failed"
},
"notifications": {
"enableNotifications": {

View File

@ -733,7 +733,7 @@ dependencies = [
[[package]]
name = "collab"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=c74b3c3fe500305cc114a06fce911be9269b61bd#c74b3c3fe500305cc114a06fce911be9269b61bd"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=bdc26b9a37399c9bc02e2309c54e31c664a9574d#bdc26b9a37399c9bc02e2309c54e31c664a9574d"
dependencies = [
"anyhow",
"async-trait",
@ -752,7 +752,7 @@ dependencies = [
[[package]]
name = "collab-database"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=c74b3c3fe500305cc114a06fce911be9269b61bd#c74b3c3fe500305cc114a06fce911be9269b61bd"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=bdc26b9a37399c9bc02e2309c54e31c664a9574d#bdc26b9a37399c9bc02e2309c54e31c664a9574d"
dependencies = [
"anyhow",
"async-trait",
@ -782,7 +782,7 @@ dependencies = [
[[package]]
name = "collab-derive"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=c74b3c3fe500305cc114a06fce911be9269b61bd#c74b3c3fe500305cc114a06fce911be9269b61bd"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=bdc26b9a37399c9bc02e2309c54e31c664a9574d#bdc26b9a37399c9bc02e2309c54e31c664a9574d"
dependencies = [
"proc-macro2",
"quote",
@ -794,7 +794,7 @@ dependencies = [
[[package]]
name = "collab-document"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=c74b3c3fe500305cc114a06fce911be9269b61bd#c74b3c3fe500305cc114a06fce911be9269b61bd"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=bdc26b9a37399c9bc02e2309c54e31c664a9574d#bdc26b9a37399c9bc02e2309c54e31c664a9574d"
dependencies = [
"anyhow",
"collab",
@ -813,7 +813,7 @@ dependencies = [
[[package]]
name = "collab-entity"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=c74b3c3fe500305cc114a06fce911be9269b61bd#c74b3c3fe500305cc114a06fce911be9269b61bd"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=bdc26b9a37399c9bc02e2309c54e31c664a9574d#bdc26b9a37399c9bc02e2309c54e31c664a9574d"
dependencies = [
"anyhow",
"bytes",
@ -827,7 +827,7 @@ dependencies = [
[[package]]
name = "collab-folder"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=c74b3c3fe500305cc114a06fce911be9269b61bd#c74b3c3fe500305cc114a06fce911be9269b61bd"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=bdc26b9a37399c9bc02e2309c54e31c664a9574d#bdc26b9a37399c9bc02e2309c54e31c664a9574d"
dependencies = [
"anyhow",
"chrono",
@ -869,7 +869,7 @@ dependencies = [
[[package]]
name = "collab-persistence"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=c74b3c3fe500305cc114a06fce911be9269b61bd#c74b3c3fe500305cc114a06fce911be9269b61bd"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=bdc26b9a37399c9bc02e2309c54e31c664a9574d#bdc26b9a37399c9bc02e2309c54e31c664a9574d"
dependencies = [
"anyhow",
"async-trait",
@ -890,7 +890,7 @@ dependencies = [
[[package]]
name = "collab-plugins"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=c74b3c3fe500305cc114a06fce911be9269b61bd#c74b3c3fe500305cc114a06fce911be9269b61bd"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=bdc26b9a37399c9bc02e2309c54e31c664a9574d#bdc26b9a37399c9bc02e2309c54e31c664a9574d"
dependencies = [
"anyhow",
"async-trait",
@ -916,7 +916,7 @@ dependencies = [
[[package]]
name = "collab-user"
version = "0.1.0"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=c74b3c3fe500305cc114a06fce911be9269b61bd#c74b3c3fe500305cc114a06fce911be9269b61bd"
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=bdc26b9a37399c9bc02e2309c54e31c664a9574d#bdc26b9a37399c9bc02e2309c54e31c664a9574d"
dependencies = [
"anyhow",
"collab",
@ -1812,6 +1812,7 @@ name = "flowy-database-deps"
version = "0.1.0"
dependencies = [
"anyhow",
"collab",
"collab-entity",
"flowy-error",
"lib-infra",
@ -1900,6 +1901,7 @@ name = "flowy-document-deps"
version = "0.1.0"
dependencies = [
"anyhow",
"collab",
"collab-document",
"flowy-error",
"lib-infra",
@ -1988,9 +1990,12 @@ name = "flowy-folder-deps"
version = "0.1.0"
dependencies = [
"anyhow",
"collab",
"collab-entity",
"collab-folder",
"flowy-error",
"lib-infra",
"tokio",
"uuid",
]
@ -2022,6 +2027,7 @@ dependencies = [
"tracing",
"unicode-segmentation",
"uuid",
"validator",
]
[[package]]
@ -2204,13 +2210,16 @@ version = "0.1.0"
dependencies = [
"anyhow",
"chrono",
"collab",
"collab-entity",
"flowy-error",
"flowy-folder-deps",
"lib-infra",
"serde",
"serde_json",
"serde_repr",
"tokio",
"tokio-stream",
"uuid",
]
@ -3012,6 +3021,7 @@ dependencies = [
"rand 0.8.5",
"tempfile",
"tokio",
"validator",
"walkdir",
"zip",
]

View File

@ -109,11 +109,11 @@ client-api = { git = "https://github.com/AppFlowy-IO/AppFlowy-Cloud", rev = "a45
# To switch to the local path, run:
# scripts/tool/update_collab_source.sh
# ⚠️⚠️⚠️️
collab = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "c74b3c3fe500305cc114a06fce911be9269b61bd" }
collab-folder = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "c74b3c3fe500305cc114a06fce911be9269b61bd" }
collab-document = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "c74b3c3fe500305cc114a06fce911be9269b61bd" }
collab-database = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "c74b3c3fe500305cc114a06fce911be9269b61bd" }
collab-plugins = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "c74b3c3fe500305cc114a06fce911be9269b61bd" }
collab-user = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "c74b3c3fe500305cc114a06fce911be9269b61bd" }
collab-entity = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "c74b3c3fe500305cc114a06fce911be9269b61bd" }
collab-persistence = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "c74b3c3fe500305cc114a06fce911be9269b61bd" }
collab = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "bdc26b9a37399c9bc02e2309c54e31c664a9574d" }
collab-folder = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "bdc26b9a37399c9bc02e2309c54e31c664a9574d" }
collab-document = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "bdc26b9a37399c9bc02e2309c54e31c664a9574d" }
collab-database = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "bdc26b9a37399c9bc02e2309c54e31c664a9574d" }
collab-plugins = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "bdc26b9a37399c9bc02e2309c54e31c664a9574d" }
collab-user = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "bdc26b9a37399c9bc02e2309c54e31c664a9574d" }
collab-entity = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "bdc26b9a37399c9bc02e2309c54e31c664a9574d" }
collab-persistence = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "bdc26b9a37399c9bc02e2309c54e31c664a9574d" }

View File

@ -2,7 +2,7 @@ use std::fmt::Debug;
use std::sync::{Arc, Weak};
use anyhow::Error;
use collab::core::collab::{CollabRawData, MutexCollab};
use collab::core::collab::{CollabDocState, MutexCollab};
use collab::preclude::{CollabBuilder, CollabPlugin};
use collab_entity::{CollabObject, CollabType};
use collab_persistence::kv::rocks_kv::RocksCollabDB;
@ -153,7 +153,7 @@ impl AppFlowyCollabBuilder {
/// - `uid`: The user ID associated with the collaboration.
/// - `object_id`: A string reference representing the ID of the object.
/// - `object_type`: The type of the collaboration, defined by the [CollabType] enum.
/// - `raw_data`: The raw data of the collaboration object, defined by the [CollabRawData] type.
/// - `raw_data`: The raw data of the collaboration object, defined by the [CollabDocState] type.
/// - `collab_db`: A weak reference to the [RocksCollabDB].
///
pub async fn build(
@ -161,7 +161,7 @@ impl AppFlowyCollabBuilder {
uid: i64,
object_id: &str,
object_type: CollabType,
doc_state: CollabRawData,
collab_doc_state: CollabDocState,
collab_db: Weak<RocksCollabDB>,
build_config: CollabBuilderConfig,
) -> Result<Arc<MutexCollab>, Error> {
@ -172,7 +172,7 @@ impl AppFlowyCollabBuilder {
object_id,
object_type,
collab_db,
doc_state,
collab_doc_state,
&persistence_config,
build_config,
)
@ -190,7 +190,7 @@ impl AppFlowyCollabBuilder {
/// - `uid`: The user ID associated with the collaboration.
/// - `object_id`: A string reference representing the ID of the object.
/// - `object_type`: The type of the collaboration, defined by the [CollabType] enum.
/// - `raw_data`: The raw data of the collaboration object, defined by the [CollabRawData] type.
/// - `raw_data`: The raw data of the collaboration object, defined by the [CollabDocState] type.
/// - `collab_db`: A weak reference to the [RocksCollabDB].
///
#[allow(clippy::too_many_arguments)]
@ -200,13 +200,13 @@ impl AppFlowyCollabBuilder {
object_id: &str,
object_type: CollabType,
collab_db: Weak<RocksCollabDB>,
doc_state: CollabRawData,
collab_doc_state: CollabDocState,
persistence_config: &CollabPersistenceConfig,
build_config: CollabBuilderConfig,
) -> Result<Arc<MutexCollab>, Error> {
let collab = Arc::new(
CollabBuilder::new(uid, object_id)
.with_raw_data(doc_state)
.with_doc_state(collab_doc_state)
.with_plugin(RocksdbDiskPlugin::new_with_config(
uid,
collab_db.clone(),

View File

@ -1,9 +1,9 @@
use std::convert::TryFrom;
use bytes::Bytes;
use flowy_database2::entities::*;
use flowy_database2::event_map::DatabaseEvent;
use flowy_database2::services::share::csv::CSVFormat;
use flowy_folder2::entities::*;
use flowy_folder2::event_map::FolderEvent;
use flowy_user::errors::FlowyError;
@ -12,6 +12,18 @@ use crate::event_builder::EventBuilder;
use crate::EventIntegrationTest;
impl EventIntegrationTest {
pub async fn get_database_export_data(&self, database_view_id: &str) -> String {
self
.appflowy_core
.database_manager
.get_database_with_view_id(database_view_id)
.await
.unwrap()
.export_csv(CSVFormat::Original)
.await
.unwrap()
}
pub async fn create_grid(&self, parent_id: &str, name: String, initial_data: Vec<u8>) -> ViewPB {
let payload = CreateViewPayloadPB {
parent_view_id: parent_id.to_string(),

View File

@ -3,9 +3,10 @@ use std::sync::Arc;
use collab::core::collab::MutexCollab;
use collab::core::origin::CollabOrigin;
use collab::preclude::updates::decoder::Decode;
use collab::preclude::{merge_updates_v1, Update};
use collab::preclude::Update;
use collab_document::blocks::DocumentData;
use collab_document::document::Document;
use collab_entity::CollabType;
use flowy_document2::entities::{DocumentDataPB, OpenDocumentPayloadPB};
use flowy_document2::event_map::DocumentEvent;
@ -87,23 +88,10 @@ impl EventIntegrationTest {
}
pub async fn get_document_doc_state(&self, document_id: &str) -> Vec<u8> {
let workspace_id = self.user_manager.workspace_id().unwrap();
let cloud_service = self.document_manager.get_cloud_service().clone();
let remote_updates = cloud_service
.get_document_doc_state(document_id, &workspace_id)
self
.get_collab_doc_state(document_id, CollabType::Document)
.await
.unwrap();
if remote_updates.is_empty() {
return vec![];
}
let updates = remote_updates
.iter()
.map(|update| update.as_ref())
.collect::<Vec<&[u8]>>();
merge_updates_v1(&updates).unwrap()
.unwrap()
}
}

View File

@ -1,3 +1,8 @@
use collab::core::collab::CollabDocState;
use collab::core::origin::CollabOrigin;
use collab_document::blocks::DocumentData;
use collab_document::document::Document;
use collab_entity::CollabType;
use std::env::temp_dir;
use std::path::PathBuf;
use std::sync::Arc;
@ -13,6 +18,7 @@ use flowy_core::AppFlowyCore;
use flowy_notification::register_notification_sender;
use flowy_server::AppFlowyServer;
use flowy_user::entities::AuthenticatorPB;
use flowy_user::errors::FlowyError;
use crate::user_event::TestNotificationSender;
@ -34,16 +40,23 @@ pub struct EventIntegrationTest {
impl EventIntegrationTest {
pub async fn new() -> Self {
Self::new_with_name(nanoid!(6)).await
}
pub async fn new_with_name<T: ToString>(name: T) -> Self {
let temp_dir = temp_dir().join(nanoid!(6));
std::fs::create_dir_all(&temp_dir).unwrap();
Self::new_with_user_data_path(temp_dir, nanoid!(6)).await
Self::new_with_user_data_path(temp_dir, name.to_string()).await
}
pub async fn new_with_user_data_path(path_buf: PathBuf, name: String) -> Self {
let path = path_buf.to_str().unwrap().to_string();
let device_id = uuid::Uuid::new_v4().to_string();
let level = "info";
std::env::set_var("RUST_LOG", level);
let config = AppFlowyCoreConfig::new(path.clone(), path, device_id, name).log_filter(
"trace",
level,
vec![
"flowy_test".to_string(),
"tokio".to_string(),
@ -102,6 +115,32 @@ impl EventIntegrationTest {
}
}
}
pub async fn get_collab_doc_state(
&self,
oid: &str,
collay_type: CollabType,
) -> Result<CollabDocState, FlowyError> {
let server = self.server_provider.get_appflowy_cloud_server().unwrap();
let workspace_id = self.get_current_workspace().await.id;
let uid = self.get_user_profile().await?.id;
let doc_state = server
.folder_service()
.get_collab_doc_state_f(&workspace_id, uid, collay_type, oid)
.await?;
Ok(doc_state)
}
}
pub fn document_data_from_document_doc_state(
doc_id: &str,
doc_state: CollabDocState,
) -> DocumentData {
Document::from_doc_state(CollabOrigin::Empty, doc_state, doc_id, vec![])
.unwrap()
.get_document_data()
.unwrap()
}
#[cfg(feature = "single_thread")]

View File

@ -3,6 +3,8 @@ use std::convert::TryFrom;
use std::sync::Arc;
use bytes::Bytes;
use flowy_folder2::entities::ImportAppFlowyDataPB;
use flowy_folder2::event_map::FolderEvent;
use nanoid::nanoid;
use protobuf::ProtobufError;
use tokio::sync::broadcast::{channel, Sender};
@ -187,6 +189,18 @@ impl EventIntegrationTest {
Ok(user_profile)
}
pub async fn import_appflowy_data(&self, path: String, name: &str) {
let payload = ImportAppFlowyDataPB {
path,
import_container_name: name.to_string(),
};
EventBuilder::new(self.clone())
.event(FolderEvent::ImportAppFlowyDataFolder)
.payload(payload)
.async_send()
.await;
}
}
#[derive(Clone)]

View File

@ -4,7 +4,7 @@ use assert_json_diff::assert_json_eq;
use collab::core::collab::MutexCollab;
use collab::core::origin::CollabOrigin;
use collab::preclude::updates::decoder::Decode;
use collab::preclude::{merge_updates_v1, JsonValue, Update};
use collab::preclude::{JsonValue, Update};
use collab_entity::CollabType;
use event_integration::event_builder::EventBuilder;
@ -69,21 +69,10 @@ impl FlowySupabaseDatabaseTest {
pub async fn get_database_collab_update(&self, database_id: &str) -> Vec<u8> {
let workspace_id = self.user_manager.workspace_id().unwrap();
let cloud_service = self.database_manager.get_cloud_service().clone();
let remote_updates = cloud_service
.get_collab_update(database_id, CollabType::Database, &workspace_id)
cloud_service
.get_collab_doc_state_db(database_id, CollabType::Database, &workspace_id)
.await
.unwrap();
if remote_updates.is_empty() {
return vec![];
}
let updates = remote_updates
.iter()
.map(|update| update.as_ref())
.collect::<Vec<&[u8]>>();
merge_updates_v1(&updates).unwrap()
.unwrap()
}
}

View File

@ -1,11 +1,14 @@
use collab_document::blocks::DocumentData;
use serde_json::json;
use std::time::Duration;
use event_integration::document_event::assert_document_data_equal;
use event_integration::user_event::user_localhost_af_cloud;
use event_integration::EventIntegrationTest;
use flowy_core::DEFAULT_NAME;
use flowy_document2::entities::DocumentSyncStatePB;
use crate::util::receive_with_timeout;
use crate::util::{receive_with_timeout, unzip_history_user_db};
#[tokio::test]
async fn af_cloud_edit_document_test() {
@ -35,3 +38,219 @@ async fn af_cloud_edit_document_test() {
assert!(!doc_state.is_empty());
assert_document_data_equal(&doc_state, &document_id, document_data);
}
#[tokio::test]
async fn af_cloud_sync_anon_user_document_test() {
let (cleaner, user_db_path) =
unzip_history_user_db("./tests/asset", "040_sync_local_document").unwrap();
user_localhost_af_cloud().await;
let test =
EventIntegrationTest::new_with_user_data_path(user_db_path.clone(), DEFAULT_NAME.to_string())
.await;
test.af_cloud_sign_up().await;
test.wait_ws_connected().await;
// In the 040_sync_local_document, the structure is:
// workspace:
// view: SyncDocument
let views = test.get_all_workspace_views().await;
assert_eq!(views.len(), 1);
let document_id = views[0].id.clone();
test.open_document(document_id.clone()).await;
// wait all update are send to the remote
let rx = test
.notification_sender
.subscribe_with_condition::<DocumentSyncStatePB, _>(&document_id, |pb| !pb.is_syncing);
let _ = receive_with_timeout(rx, Duration::from_secs(30)).await;
let doc_state = test.get_document_doc_state(&document_id).await;
assert_document_data_equal(
&doc_state,
&document_id,
expected_040_sync_local_document_data(),
);
drop(cleaner);
}
fn expected_040_sync_local_document_data() -> DocumentData {
serde_json::from_value(json!( {
"blocks": {
"2hYJqg": {
"children": "AdDT7G",
"data": {
"delta": [
{
"insert": "bullet list format"
}
]
},
"external_id": null,
"external_type": null,
"id": "2hYJqg",
"parent": "beEtQt9xw6",
"ty": "bulleted_list"
},
"9GWi-3": {
"children": "osttqJ",
"data": {
"delta": [
{
"insert": "quote format"
}
]
},
"external_id": null,
"external_type": null,
"id": "9GWi-3",
"parent": "beEtQt9xw6",
"ty": "quote"
},
"RB-9fj": {
"children": "GNv1Bx",
"data": {
"delta": [
{
"insert": "number list format"
}
]
},
"external_id": null,
"external_type": null,
"id": "RB-9fj",
"parent": "beEtQt9xw6",
"ty": "numbered_list"
},
"TtoXrhXQKK": {
"children": "xVai4jK835",
"data": {
"delta": [
{
"insert": "Syncing the document content between server and the local."
}
]
},
"external_id": "-qBAb5hSHZ",
"external_type": "text",
"id": "TtoXrhXQKK",
"parent": "beEtQt9xw6",
"ty": "paragraph"
},
"beEtQt9xw6": {
"children": "e8O8NqDFSa",
"data": {},
"external_id": null,
"external_type": null,
"id": "beEtQt9xw6",
"parent": "",
"ty": "page"
},
"m59P6g": {
"children": "x2Nypz",
"data": {
"delta": [
{
"insert": "Header one format"
}
],
"level": 1
},
"external_id": null,
"external_type": null,
"id": "m59P6g",
"parent": "beEtQt9xw6",
"ty": "heading"
},
"mvGqkR": {
"children": "k7Pozf",
"data": {
"delta": [
{
"insert": "Header two format"
}
],
"level": 2
},
"external_id": null,
"external_type": null,
"id": "mvGqkR",
"parent": "beEtQt9xw6",
"ty": "heading"
},
"otbxLc": {
"children": "QJGGOs",
"data": {
"checked": false,
"delta": [
{
"insert": "checkbox format"
}
]
},
"external_id": null,
"external_type": null,
"id": "otbxLc",
"parent": "beEtQt9xw6",
"ty": "todo_list"
},
"qOb8PS": {
"children": "fbEQ-2",
"data": {
"delta": [
{
"insert": "It contains lots of formats."
}
]
},
"external_id": null,
"external_type": null,
"id": "qOb8PS",
"parent": "beEtQt9xw6",
"ty": "paragraph"
}
},
"meta": {
"children_map": {
"AdDT7G": [],
"GNv1Bx": [],
"QJGGOs": [],
"e8O8NqDFSa": [
"TtoXrhXQKK",
"qOb8PS",
"m59P6g",
"mvGqkR",
"RB-9fj",
"2hYJqg",
"otbxLc",
"9GWi-3"
],
"fbEQ-2": [],
"k7Pozf": [],
"osttqJ": [],
"x2Nypz": [],
"xVai4jK835": []
},
"text_map": {
"-qBAb5hSHZ": "[{\"insert\":\"Syncing the document content between server and the local.\"}]",
"0qTSZK": "[]",
"1aO3pe": "[]",
"5PVbjJ": "[{\"insert\":\"It contains lots of formats.\"}]",
"6Up-3y": "[]",
"GkpKE6": "[{\"insert\":\"number list format\"}]",
"Mhpd_J": "[{\"insert\":\"Header one format\"}]",
"OvsPP4": "[]",
"Ozaw6E": "[]",
"Q2lcja": "[]",
"YrAL0L": "[{\"insert\":\"Header two format\"}]",
"cQHJvj": "[]",
"eiHaS2": "[]",
"hHGl05": "[{\"insert\":\"quote format\"}]",
"ht7dE4": "[{\"insert\":\"bullet list format\"}]",
"iWrg77": "[{\"insert\":\"checkbox format\"}]",
"xSTRAY": "[]"
}
},
"page_id": "beEtQt9xw6"
})).unwrap()
}

View File

@ -4,7 +4,8 @@ use assert_json_diff::assert_json_eq;
use collab::core::collab::MutexCollab;
use collab::core::origin::CollabOrigin;
use collab::preclude::updates::decoder::Decode;
use collab::preclude::{merge_updates_v1, JsonValue, Update};
use collab::preclude::{JsonValue, Update};
use collab_entity::CollabType;
use collab_folder::FolderData;
use event_integration::event_builder::EventBuilder;
@ -49,21 +50,15 @@ impl FlowySupabaseFolderTest {
pub async fn get_collab_update(&self, workspace_id: &str) -> Vec<u8> {
let cloud_service = self.folder_manager.get_cloud_service().clone();
let remote_updates = cloud_service
.get_folder_doc_state(workspace_id, self.user_manager.user_id().unwrap())
cloud_service
.get_collab_doc_state_f(
workspace_id,
self.user_manager.user_id().unwrap(),
CollabType::Folder,
workspace_id,
)
.await
.unwrap();
if remote_updates.is_empty() {
return vec![];
}
let updates = remote_updates
.iter()
.map(|update| update.as_ref())
.collect::<Vec<&[u8]>>();
merge_updates_v1(&updates).unwrap()
.unwrap()
}
}

View File

@ -111,9 +111,10 @@ async fn migrate_anon_user_data_to_af_cloud_test() {
assert_eq!(anon_third_level_views.len(), 2);
assert_eq!(user_third_level_views[0].name, "Grid1".to_string());
assert_eq!(user_third_level_views[1].name, "Grid2".to_string());
drop(cleaner);
// check the trash
assert_eq!(user_trash.items.len(), 1);
assert_eq!(user_trash.items[0].name, anon_trash.items[0].name);
drop(cleaner);
}

View File

@ -1,3 +1,4 @@
mod anon_user_test;
mod auth_test;
mod member_test;
mod sync_third_party_data_test;

View File

@ -0,0 +1,304 @@
use crate::util::unzip_history_user_db;
use assert_json_diff::assert_json_include;
use collab_entity::CollabType;
use event_integration::user_event::user_localhost_af_cloud;
use event_integration::{document_data_from_document_doc_state, EventIntegrationTest};
use flowy_core::DEFAULT_NAME;
use serde_json::{json, Value};
#[tokio::test]
async fn import_appflowy_data_folder_test() {
let import_container_name = "040_local".to_string();
let (cleaner, user_db_path) =
unzip_history_user_db("./tests/asset", &import_container_name).unwrap();
// In the 040_local, the structure is:
// workspace:
// view: Document1
// view: Document2
// view: Grid1
// view: Grid2
user_localhost_af_cloud().await;
let test = EventIntegrationTest::new_with_name(DEFAULT_NAME).await;
let _ = test.af_cloud_sign_up().await;
// after sign up, the initial workspace is created, so the structure is:
// workspace:
// view: Getting Started
test
.import_appflowy_data(
user_db_path.to_str().unwrap().to_string(),
&import_container_name,
)
.await;
// after import, the structure is:
// workspace:
// view: Getting Started
// view: 040_local
// view: Document1
// view: Document2
// view: Grid1
// view: Grid2
let views = test.get_all_workspace_views().await;
assert_eq!(views.len(), 2);
assert_eq!(views[1].name, import_container_name);
let local_child_views = test.get_views(&views[1].id).await.child_views;
assert_eq!(local_child_views.len(), 1);
assert_eq!(local_child_views[0].name, "Document1");
let document1_child_views = test.get_views(&local_child_views[0].id).await.child_views;
assert_eq!(document1_child_views.len(), 1);
assert_eq!(document1_child_views[0].name, "Document2");
let document2_child_views = test
.get_views(&document1_child_views[0].id)
.await
.child_views;
assert_eq!(document2_child_views.len(), 2);
assert_eq!(document2_child_views[0].name, "Grid1");
assert_eq!(document2_child_views[1].name, "Grid2");
drop(cleaner);
}
#[tokio::test]
async fn import_appflowy_data_folder_test2() {
let import_container_name = "040_local_2".to_string();
let (cleaner, user_db_path) =
unzip_history_user_db("./tests/asset", &import_container_name).unwrap();
user_localhost_af_cloud().await;
let test = EventIntegrationTest::new_with_name(DEFAULT_NAME).await;
let _ = test.af_cloud_sign_up().await;
test
.import_appflowy_data(
user_db_path.to_str().unwrap().to_string(),
&import_container_name,
)
.await;
let views = test.get_all_workspace_views().await;
assert_eq!(views.len(), 2);
assert_eq!(views[1].name, import_container_name);
assert_040_local_2_import_content(&test, &views[1].id).await;
drop(cleaner);
}
#[tokio::test]
async fn import_appflowy_data_folder_multiple_times_test() {
let import_container_name = "040_local_2".to_string();
let (cleaner, user_db_path) =
unzip_history_user_db("./tests/asset", &import_container_name).unwrap();
// In the 040_local_2, the structure is:
// Getting Started
// Doc1
// Doc2
// Grid1
// Doc3
// Doc3_grid_1
// Doc3_grid_2
// Doc3_calendar_1
user_localhost_af_cloud().await;
let test = EventIntegrationTest::new_with_name(DEFAULT_NAME).await;
let _ = test.af_cloud_sign_up().await;
test
.import_appflowy_data(
user_db_path.to_str().unwrap().to_string(),
&import_container_name,
)
.await;
// after import, the structure is:
// Getting Started
// 040_local_2
let views = test.get_all_workspace_views().await;
assert_eq!(views.len(), 2);
assert_eq!(views[1].name, import_container_name);
assert_040_local_2_import_content(&test, &views[1].id).await;
test
.import_appflowy_data(
user_db_path.to_str().unwrap().to_string(),
&import_container_name,
)
.await;
// after import, the structure is:
// Getting Started
// 040_local_2
// Getting started
// 040_local_2
// Getting started
let views = test.get_all_workspace_views().await;
assert_eq!(views.len(), 3);
assert_eq!(views[2].name, import_container_name);
assert_040_local_2_import_content(&test, &views[1].id).await;
assert_040_local_2_import_content(&test, &views[2].id).await;
drop(cleaner);
}
async fn assert_040_local_2_import_content(test: &EventIntegrationTest, view_id: &str) {
// 040_local_2
// Getting started
// Doc1
// Doc2
// Grid1
// Doc3
// Doc3_grid_1
// Doc3_grid_2
// Doc3_calendar_1
let _local_2_child_views = test.get_views(view_id).await.child_views;
assert_eq!(_local_2_child_views.len(), 1);
assert_eq!(_local_2_child_views[0].name, "Getting started");
let local_2_getting_started_child_views = test
.get_views(&_local_2_child_views[0].id)
.await
.child_views;
// Check doc 1 local content
let doc_1 = local_2_getting_started_child_views[0].clone();
assert_eq!(doc_1.name, "Doc1");
let data = test.get_document_data(&doc_1.id).await;
assert_json_include!(actual: json!(data), expected: expected_doc_1_json());
// Check doc 1 remote content
let doc_1_doc_state = test
.get_collab_doc_state(&doc_1.id, CollabType::Document)
.await
.unwrap();
assert_json_include!(actual:document_data_from_document_doc_state(&doc_1.id, doc_1_doc_state), expected: expected_doc_1_json());
// Check doc 2 local content
let doc_2 = local_2_getting_started_child_views[1].clone();
assert_eq!(doc_2.name, "Doc2");
let data = test.get_document_data(&doc_2.id).await;
assert_json_include!(actual: json!(data), expected: expected_doc_2_json());
// Check doc 2 remote content
let doc_2_doc_state = test.get_document_doc_state(&doc_2.id).await;
assert_json_include!(actual:document_data_from_document_doc_state(&doc_2.id, doc_2_doc_state), expected: expected_doc_2_json());
let grid_1 = local_2_getting_started_child_views[2].clone();
assert_eq!(grid_1.name, "Grid1");
assert_eq!(
test.get_database_export_data(&grid_1.id).await,
"Name,Type,Done\n1,A,Yes\n2,,Yes\n3,,No\n"
);
assert_eq!(local_2_getting_started_child_views[3].name, "Doc3");
let doc_3_child_views = test
.get_views(&local_2_getting_started_child_views[3].id)
.await
.child_views;
assert_eq!(doc_3_child_views.len(), 3);
assert_eq!(doc_3_child_views[0].name, "doc3_grid_1");
let doc3_grid_2 = doc_3_child_views[1].clone();
assert_eq!(doc3_grid_2.name, "doc3_grid_2");
assert_eq!(
test.get_database_export_data(&doc3_grid_2.id).await,
"Name,Type,Done\n1,A,Yes\n2,,\n,,\n"
);
assert_eq!(doc_3_child_views[2].name, "doc3_calendar_1");
}
fn expected_doc_1_json() -> Value {
json!({
"blocks": {
"Rnslggtr6s": {
"children": "CoT14jXwTV",
"data": {
"delta": [
{
"insert": "Hello Document 1"
}
]
},
"external_id": "hUDq6PrdP1",
"external_type": "text",
"id": "Rnslggtr6s",
"parent": "vxWayiyi2Q",
"ty": "paragraph"
},
"vxWayiyi2Q": {
"children": "hAgnEMJtU2",
"data": {},
"external_id": null,
"external_type": null,
"id": "vxWayiyi2Q",
"parent": "",
"ty": "page"
}
},
"meta": {
"children_map": {
"CoT14jXwTV": [],
"hAgnEMJtU2": [
"Rnslggtr6s"
]
},
"text_map": {
"hUDq6PrdP1": "[{\"insert\":\"Hello Document 1\"}]",
"ujncfD": "[]"
}
},
"page_id": "vxWayiyi2Q"
})
}
fn expected_doc_2_json() -> Value {
json!({
"blocks": {
"ZVogdaK9yO": {
"children": "cc20wCE77N",
"data": {},
"external_id": null,
"external_type": null,
"id": "ZVogdaK9yO",
"parent": "",
"ty": "page"
},
"bVRuGAvyfp": {
"children": "pOVd5xKBal",
"data": {
"delta": [
{
"insert": "Hello Document 2"
}
]
},
"external_id": "m7mwLgXzDF",
"external_type": "text",
"id": "bVRuGAvyfp",
"parent": "ZVogdaK9yO",
"ty": "paragraph"
},
"ng2b4I": {
"children": "YMaDFs",
"data": {
"delta": []
},
"external_id": null,
"external_type": null,
"id": "ng2b4I",
"parent": "ZVogdaK9yO",
"ty": "paragraph"
}
},
"meta": {
"children_map": {
"YMaDFs": [],
"cc20wCE77N": [
"bVRuGAvyfp",
"ng2b4I"
],
"pOVd5xKBal": []
},
"text_map": {
"m7mwLgXzDF": "[{\"insert\":\"Hello Document 2\"}]",
"qXQmuS": "[]"
}
},
"page_id": "ZVogdaK9yO"
})
}

View File

@ -390,7 +390,7 @@ async fn migrate_anon_data_on_cloud_signup() {
}
assert!(cloud_service
.get_collab_update(&database_id, CollabType::Database, &workspace_id)
.get_collab_doc_state_db(&database_id, CollabType::Database, &workspace_id)
.await
.is_ok());
}

View File

@ -8,6 +8,7 @@ use std::time::Duration;
use anyhow::Error;
use collab_folder::FolderData;
use collab_plugins::cloud_storage::RemoteCollabStorage;
use nanoid::nanoid;
use tokio::sync::mpsc::Receiver;
use tokio::time::timeout;
@ -24,10 +25,9 @@ use flowy_server::{AppFlowyEncryption, EncryptionImpl};
use flowy_server_config::supabase_config::SupabaseConfiguration;
use flowy_user::entities::{AuthenticatorPB, UpdateUserProfilePayloadPB};
use flowy_user::errors::FlowyError;
use flowy_user::event_map::UserCloudServiceProvider;
use flowy_user::event_map::UserEvent::*;
use flowy_user_deps::cloud::UserCloudService;
use flowy_user_deps::cloud::{UserCloudService, UserCloudServiceProvider};
use flowy_user_deps::entities::Authenticator;
pub fn get_supabase_config() -> Option<SupabaseConfiguration> {
@ -167,7 +167,7 @@ pub fn unzip_history_user_db(root: &str, folder_name: &str) -> std::io::Result<(
// Open the zip file
let zip_file_path = format!("{}/{}.zip", root, folder_name);
let reader = File::open(zip_file_path)?;
let output_folder_path = format!("{}/unit_test_{}", root, Uuid::new_v4());
let output_folder_path = format!("{}/unit_test_{}", root, nanoid!(6));
// Create a ZipArchive from the file
let mut archive = ZipArchive::new(reader)?;

View File

@ -6,7 +6,7 @@ use tracing::{error, info};
use flowy_server_config::af_cloud_config::AFCloudConfiguration;
use flowy_server_config::supabase_config::SupabaseConfiguration;
use flowy_user::manager::URL_SAFE_ENGINE;
use flowy_user::services::entities::URL_SAFE_ENGINE;
use lib_infra::file_util::copy_dir_recursive;
use crate::integrate::log::create_log_filter;

View File

@ -3,10 +3,12 @@ use std::convert::TryFrom;
use std::sync::{Arc, Weak};
use bytes::Bytes;
use collab_entity::CollabType;
use tokio::sync::RwLock;
use tracing::info;
use collab_integrate::collab_builder::AppFlowyCollabBuilder;
use collab_integrate::RocksCollabDB;
use collab_integrate::{PersistenceError, RocksCollabDB, YrsDocAction};
use flowy_database2::entities::DatabaseLayoutPB;
use flowy_database2::services::share::csv::CSVFormat;
use flowy_database2::template::{make_default_board, make_default_calendar, make_default_grid};
@ -14,17 +16,21 @@ use flowy_database2::DatabaseManager;
use flowy_document2::entities::DocumentDataPB;
use flowy_document2::manager::DocumentManager;
use flowy_document2::parser::json::parser::JsonToDocumentParser;
use flowy_error::FlowyError;
use flowy_error::{internal_error, ErrorCode, FlowyError};
use flowy_folder2::entities::ViewLayoutPB;
use flowy_folder2::manager::{FolderManager, FolderUser};
use flowy_folder2::share::ImportType;
use flowy_folder2::view_operation::{
FolderOperationHandler, FolderOperationHandlers, View, WorkspaceViewBuilder,
};
use flowy_folder2::view_operation::{FolderOperationHandler, FolderOperationHandlers, View};
use flowy_folder2::ViewLayout;
use flowy_folder_deps::cloud::FolderCloudService;
use flowy_folder_deps::cloud::{FolderCloudService, FolderCollabParams};
use flowy_folder_deps::entities::ImportData;
use flowy_folder_deps::folder_builder::{ParentChildViews, WorkspaceViewBuilder};
use flowy_user::manager::UserManager;
use flowy_user::services::data_import::{load_collab_by_oid, ImportDataSource};
use crate::integrate::server::ServerProvider;
use lib_dispatch::prelude::ToBytes;
use lib_infra::async_trait::async_trait;
use lib_infra::future::FutureResult;
pub struct FolderDepsResolver();
@ -34,15 +40,24 @@ impl FolderDepsResolver {
document_manager: &Arc<DocumentManager>,
database_manager: &Arc<DatabaseManager>,
collab_builder: Arc<AppFlowyCollabBuilder>,
folder_cloud: Arc<dyn FolderCloudService>,
server_provider: Arc<ServerProvider>,
) -> Arc<FolderManager> {
let user: Arc<dyn FolderUser> = Arc::new(FolderUserImpl(user_manager.clone()));
let user: Arc<dyn FolderUser> = Arc::new(FolderUserImpl {
user_manager: user_manager.clone(),
database_manager: Arc::downgrade(database_manager),
server_provider: server_provider.clone(),
});
let handlers = folder_operation_handlers(document_manager.clone(), database_manager.clone());
Arc::new(
FolderManager::new(user.clone(), collab_builder, handlers, folder_cloud)
.await
.unwrap(),
FolderManager::new(
user.clone(),
collab_builder,
handlers,
server_provider.clone(),
)
.await
.unwrap(),
)
}
}
@ -63,11 +78,17 @@ fn folder_operation_handlers(
Arc::new(map)
}
struct FolderUserImpl(Weak<UserManager>);
struct FolderUserImpl {
user_manager: Weak<UserManager>,
database_manager: Weak<DatabaseManager>,
server_provider: Arc<ServerProvider>,
}
#[async_trait]
impl FolderUser for FolderUserImpl {
fn user_id(&self) -> Result<i64, FlowyError> {
self
.0
.user_manager
.upgrade()
.ok_or(FlowyError::internal().with_context("Unexpected error: UserSession is None"))?
.user_id()
@ -75,7 +96,7 @@ impl FolderUser for FolderUserImpl {
fn token(&self) -> Result<Option<String>, FlowyError> {
self
.0
.user_manager
.upgrade()
.ok_or(FlowyError::internal().with_context("Unexpected error: UserSession is None"))?
.token()
@ -83,11 +104,160 @@ impl FolderUser for FolderUserImpl {
fn collab_db(&self, uid: i64) -> Result<Weak<RocksCollabDB>, FlowyError> {
self
.0
.user_manager
.upgrade()
.ok_or(FlowyError::internal().with_context("Unexpected error: UserSession is None"))?
.get_collab_db(uid)
}
async fn import_appflowy_data_folder(
&self,
workspace_id: &str,
path: &str,
container_name: &str,
) -> Result<ParentChildViews, FlowyError> {
match (self.user_manager.upgrade(), self.database_manager.upgrade()) {
(Some(user_manager), Some(data_manager)) => {
let source = ImportDataSource::AppFlowyDataFolder {
path: path.to_string(),
container_name: container_name.to_string(),
};
let cloned_user_manager = user_manager.clone();
let import_data =
tokio::task::spawn_blocking(move || cloned_user_manager.import_data(source))
.await
.map_err(internal_error)??;
match import_data {
ImportData::AppFlowyDataFolder {
view,
database_view_ids_by_database_id,
row_object_ids,
database_object_ids,
document_object_ids,
} => {
let uid = self.user_id()?;
self
.upload_collab_data(
workspace_id,
row_object_ids,
database_object_ids,
document_object_ids,
uid,
)
.await?;
data_manager
.track_database(database_view_ids_by_database_id)
.await?;
Ok(view)
},
}
},
_ => Err(FlowyError::internal().with_context("Unexpected error: UserSession is None")),
}
}
}
impl FolderUserImpl {
async fn upload_collab_data(
&self,
workspace_id: &str,
row_object_ids: Vec<String>,
database_object_ids: Vec<String>,
document_object_ids: Vec<String>,
uid: i64,
) -> Result<(), FlowyError> {
// Only support uploading the collab data when the current server is AppFlowy Cloud server
if self.server_provider.get_appflowy_cloud_server().is_err() {
return Ok(());
}
let collab_db = self
.collab_db(uid)
.unwrap()
.upgrade()
.ok_or(FlowyError::new(
ErrorCode::Internal,
"Can't get the collab db",
))?;
let object_by_collab_type = tokio::task::spawn_blocking(move || {
let collab_read = collab_db.read_txn();
let mut object_by_collab_type = HashMap::new();
object_by_collab_type.insert(
CollabType::Database,
load_and_process_collab_data(uid, &collab_read, &database_object_ids),
);
object_by_collab_type.insert(
CollabType::Document,
load_and_process_collab_data(uid, &collab_read, &document_object_ids),
);
object_by_collab_type.insert(
CollabType::DatabaseRow,
load_and_process_collab_data(uid, &collab_read, &row_object_ids),
);
object_by_collab_type
})
.await
.map_err(internal_error)?;
// Upload
let mut size_counter = 0;
let mut objects: Vec<FolderCollabParams> = vec![];
let upload_size_limit = 2 * 1024 * 1024;
for (collab_type, encoded_v1_by_oid) in object_by_collab_type {
info!(
"Batch import collab:{} ids: {:?}",
collab_type,
encoded_v1_by_oid.keys(),
);
for (oid, encoded_v1) in encoded_v1_by_oid {
let obj_size = encoded_v1.len();
if size_counter + obj_size > upload_size_limit && !objects.is_empty() {
// When the limit is exceeded, batch create with the current list of objects
// and reset for the next batch.
self
.server_provider
.batch_create_collab_object(workspace_id, objects)
.await?;
objects = Vec::new();
size_counter = 0;
}
// Add the current object to the batch.
objects.push(FolderCollabParams {
object_id: oid,
encoded_collab_v1: encoded_v1,
collab_type: collab_type.clone(),
override_if_exist: false,
});
size_counter += obj_size;
}
}
// After the loop, upload any remaining objects.
if !objects.is_empty() {
info!(
"Batch create collab objects: {}, payload size: {}",
objects
.iter()
.map(|o| o.object_id.clone())
.collect::<Vec<_>>()
.join(", "),
size_counter
);
self
.server_provider
.batch_create_collab_object(workspace_id, objects)
.await?;
}
Ok(())
}
}
struct DocumentFolderOperation(Arc<DocumentManager>);
@ -411,3 +581,24 @@ pub fn layout_type_from_view_layout(layout: ViewLayoutPB) -> DatabaseLayoutPB {
ViewLayoutPB::Document => DatabaseLayoutPB::Grid,
}
}
fn load_and_process_collab_data<'a, R>(
uid: i64,
collab_read: &R,
object_ids: &[String],
) -> HashMap<String, Vec<u8>>
where
R: YrsDocAction<'a>,
PersistenceError: From<R::Error>,
{
load_collab_by_oid(uid, collab_read, object_ids)
.into_iter()
.filter_map(|(oid, collab)| {
collab
.encode_collab_v1()
.encode_to_bytes()
.ok()
.map(|encoded_v1| (oid, encoded_v1))
})
.collect()
}

View File

@ -3,6 +3,7 @@ use std::sync::Arc;
use anyhow::Error;
use bytes::Bytes;
use client_api::collab_sync::{SinkConfig, SinkStrategy, SyncObject, SyncPlugin};
use collab::core::collab::CollabDocState;
use collab::core::origin::{CollabClient, CollabOrigin};
use collab::preclude::CollabPlugin;
use collab_entity::CollabType;
@ -13,20 +14,17 @@ use collab_integrate::collab_builder::{
CollabDataSource, CollabStorageProvider, CollabStorageProviderContext,
};
use collab_integrate::postgres::SupabaseDBPlugin;
use flowy_database_deps::cloud::{
CollabObjectUpdate, CollabObjectUpdateByOid, DatabaseCloudService, DatabaseSnapshot,
};
use flowy_database_deps::cloud::{CollabDocStateByOid, DatabaseCloudService, DatabaseSnapshot};
use flowy_document2::deps::DocumentData;
use flowy_document_deps::cloud::{DocumentCloudService, DocumentSnapshot};
use flowy_error::FlowyError;
use flowy_folder_deps::cloud::{
FolderCloudService, FolderData, FolderSnapshot, Workspace, WorkspaceRecord,
FolderCloudService, FolderCollabParams, FolderData, FolderSnapshot, Workspace, WorkspaceRecord,
};
use flowy_server_config::af_cloud_config::AFCloudConfiguration;
use flowy_server_config::supabase_config::SupabaseConfiguration;
use flowy_storage::{FileStorageService, StorageObject};
use flowy_user::event_map::UserCloudServiceProvider;
use flowy_user_deps::cloud::UserCloudService;
use flowy_user_deps::cloud::{UserCloudService, UserCloudServiceProvider};
use flowy_user_deps::entities::{Authenticator, UserTokenState};
use lib_infra::future::{to_fut, Fut, FutureResult};
@ -187,17 +185,35 @@ impl FolderCloudService for ServerProvider {
})
}
fn get_folder_doc_state(
fn get_collab_doc_state_f(
&self,
workspace_id: &str,
uid: i64,
) -> FutureResult<Vec<Vec<u8>>, Error> {
collab_type: CollabType,
object_id: &str,
) -> FutureResult<CollabDocState, Error> {
let object_id = object_id.to_string();
let workspace_id = workspace_id.to_string();
let server = self.get_server(&self.get_server_type());
FutureResult::new(async move {
server?
.folder_service()
.get_folder_doc_state(&workspace_id, uid)
.get_collab_doc_state_f(&workspace_id, uid, collab_type, &object_id)
.await
})
}
fn batch_create_collab_object(
&self,
workspace_id: &str,
objects: Vec<FolderCollabParams>,
) -> FutureResult<(), Error> {
let workspace_id = workspace_id.to_string();
let server = self.get_server(&self.get_server_type());
FutureResult::new(async move {
server?
.folder_service()
.batch_create_collab_object(&workspace_id, objects)
.await
})
}
@ -211,35 +227,35 @@ impl FolderCloudService for ServerProvider {
}
impl DatabaseCloudService for ServerProvider {
fn get_collab_update(
fn get_collab_doc_state_db(
&self,
object_id: &str,
collab_type: CollabType,
workspace_id: &str,
) -> FutureResult<CollabObjectUpdate, Error> {
) -> FutureResult<CollabDocState, Error> {
let workspace_id = workspace_id.to_string();
let server = self.get_server(&self.get_server_type());
let database_id = object_id.to_string();
FutureResult::new(async move {
server?
.database_service()
.get_collab_update(&database_id, collab_type, &workspace_id)
.get_collab_doc_state_db(&database_id, collab_type, &workspace_id)
.await
})
}
fn batch_get_collab_updates(
fn batch_get_collab_doc_state_db(
&self,
object_ids: Vec<String>,
object_ty: CollabType,
workspace_id: &str,
) -> FutureResult<CollabObjectUpdateByOid, Error> {
) -> FutureResult<CollabDocStateByOid, Error> {
let workspace_id = workspace_id.to_string();
let server = self.get_server(&self.get_server_type());
FutureResult::new(async move {
server?
.database_service()
.batch_get_collab_updates(object_ids, object_ty, &workspace_id)
.batch_get_collab_doc_state_db(object_ids, object_ty, &workspace_id)
.await
})
}
@ -265,7 +281,7 @@ impl DocumentCloudService for ServerProvider {
&self,
document_id: &str,
workspace_id: &str,
) -> FutureResult<Vec<Vec<u8>>, FlowyError> {
) -> FutureResult<CollabDocState, FlowyError> {
let workspace_id = workspace_id.to_string();
let document_id = document_id.to_string();
let server = self.get_server(&self.get_server_type());

View File

@ -1,6 +1,7 @@
use std::sync::Arc;
use anyhow::Context;
use collab_entity::CollabType;
use tracing::event;
use collab_integrate::collab_builder::AppFlowyCollabBuilder;
@ -8,8 +9,8 @@ use flowy_database2::DatabaseManager;
use flowy_document2::manager::DocumentManager;
use flowy_error::FlowyResult;
use flowy_folder2::manager::{FolderInitDataSource, FolderManager};
use flowy_user::event_map::{UserCloudServiceProvider, UserStatusCallback};
use flowy_user_deps::cloud::UserCloudConfig;
use flowy_user::event_map::UserStatusCallback;
use flowy_user_deps::cloud::{UserCloudConfig, UserCloudServiceProvider};
use flowy_user_deps::entities::{Authenticator, UserProfile, UserWorkspace};
use lib_infra::future::{to_fut, Fut};
@ -69,7 +70,7 @@ impl UserStatusCallback for UserStatusCallbackImpl {
.initialize(
user_id,
user_workspace.id.clone(),
user_workspace.database_storage_id,
user_workspace.database_view_tracker_id,
)
.await?;
document_manager
@ -107,7 +108,7 @@ impl UserStatusCallback for UserStatusCallbackImpl {
.initialize(
user_id,
user_workspace.id.clone(),
user_workspace.database_storage_id,
user_workspace.database_view_tracker_id,
)
.await?;
document_manager
@ -146,7 +147,12 @@ impl UserStatusCallback for UserStatusCallbackImpl {
// for initializing a default workspace differs depending on the sign-up method used.
let data_source = match folder_manager
.cloud_service
.get_folder_doc_state(&user_workspace.id, user_profile.uid)
.get_collab_doc_state_f(
&user_workspace.id,
user_profile.uid,
CollabType::Folder,
&user_workspace.id,
)
.await
{
Ok(doc_state) => FolderInitDataSource::Cloud(doc_state),
@ -170,7 +176,7 @@ impl UserStatusCallback for UserStatusCallbackImpl {
.initialize_with_new_user(
user_profile.uid,
user_workspace.id.clone(),
user_workspace.database_storage_id,
user_workspace.database_view_tracker_id,
)
.await
.context("DatabaseManager error")?;
@ -208,7 +214,7 @@ impl UserStatusCallback for UserStatusCallbackImpl {
.initialize(
user_id,
user_workspace.id.clone(),
user_workspace.database_storage_id,
user_workspace.database_view_tracker_id,
)
.await?;
document_manager

View File

@ -14,8 +14,10 @@ use flowy_folder2::manager::FolderManager;
use flowy_sqlite::kv::StorePreferences;
use flowy_storage::FileStorageService;
use flowy_task::{TaskDispatcher, TaskRunner};
use flowy_user::event_map::UserCloudServiceProvider;
use flowy_user::manager::{UserConfig, UserManager};
use flowy_user::manager::UserManager;
use flowy_user::services::entities::UserConfig;
use flowy_user_deps::cloud::UserCloudServiceProvider;
use lib_dispatch::prelude::*;
use lib_dispatch::runtime::AFPluginRuntime;
use module::make_plugins;
@ -117,6 +119,7 @@ impl AppFlowyCore {
server_provider.clone(),
config.device_id.clone(),
));
let user_manager = init_user_manager(
&config,
&store_preference,

View File

@ -9,4 +9,5 @@ edition = "2021"
lib-infra = { path = "../../../shared-lib/lib-infra" }
flowy-error = { workspace = true }
collab-entity = { version = "0.1.0" }
collab = { version = "0.1.0" }
anyhow.workspace = true

View File

@ -1,30 +1,32 @@
use std::collections::HashMap;
use anyhow::Error;
use collab::core::collab::CollabDocState;
use collab_entity::CollabType;
use lib_infra::future::FutureResult;
pub type CollabObjectUpdateByOid = HashMap<String, CollabObjectUpdate>;
pub type CollabObjectUpdate = Vec<Vec<u8>>;
pub type CollabDocStateByOid = HashMap<String, CollabDocState>;
/// A trait for database cloud service.
/// Each kind of server should implement this trait. Check out the [AppFlowyServerProvider] of
/// [flowy-server] crate for more information.
pub trait DatabaseCloudService: Send + Sync {
fn get_collab_update(
/// The suffix 'db' in the method name serves as a workaround to avoid naming conflicts with the existing method `get_collab_doc_state`.
fn get_collab_doc_state_db(
&self,
object_id: &str,
collab_type: CollabType,
workspace_id: &str,
) -> FutureResult<CollabObjectUpdate, Error>;
) -> FutureResult<CollabDocState, Error>;
fn batch_get_collab_updates(
/// The suffix 'db' in the method name serves as a workaround to avoid naming conflicts with the existing method `get_collab_doc_state`.
fn batch_get_collab_doc_state_db(
&self,
object_ids: Vec<String>,
object_ty: CollabType,
workspace_id: &str,
) -> FutureResult<CollabObjectUpdateByOid, Error>;
) -> FutureResult<CollabDocStateByOid, Error>;
fn get_collab_snapshots(
&self,

View File

@ -1,13 +1,13 @@
use std::collections::HashMap;
use std::num::NonZeroUsize;
use std::sync::{Arc, Weak};
use collab::core::collab::{CollabRawData, MutexCollab};
use collab::core::collab::{CollabDocState, MutexCollab};
use collab_database::blocks::BlockEvent;
use collab_database::database::{DatabaseData, MutexDatabase, YrsDocAction};
use collab_database::error::DatabaseError;
use collab_database::user::{
CollabFuture, CollabObjectUpdate, CollabObjectUpdateByOid, DatabaseCollabService,
WorkspaceDatabase,
CollabDocStateByOid, CollabFuture, DatabaseCollabService, WorkspaceDatabase,
};
use collab_database::views::{CreateDatabaseParams, CreateViewParams, DatabaseLayout};
use collab_entity::CollabType;
@ -95,14 +95,14 @@ impl DatabaseManager {
cloud_service: self.cloud_service.clone(),
};
let config = CollabPersistenceConfig::new().snapshot_per_update(10);
let mut collab_raw_data = CollabRawData::default();
let mut collab_raw_data = CollabDocState::default();
// If the workspace database not exist in disk, try to fetch from remote.
if !self.is_collab_exist(uid, &collab_db, &database_views_aggregate_id) {
trace!("workspace database not exist, try to fetch from remote");
match self
.cloud_service
.get_collab_update(
.get_collab_doc_state_db(
&database_views_aggregate_id,
CollabType::WorkspaceDatabase,
&workspace_id,
@ -172,6 +172,19 @@ impl DatabaseManager {
RepeatedDatabaseDescriptionPB { items }
}
pub async fn track_database(
&self,
view_ids_by_database_id: HashMap<String, Vec<String>>,
) -> FlowyResult<()> {
let wdb = self.get_workspace_database().await?;
view_ids_by_database_id
.into_iter()
.for_each(|(database_id, view_ids)| {
wdb.track_database(&database_id, view_ids);
});
Ok(())
}
pub async fn get_database_with_view_id(&self, view_id: &str) -> FlowyResult<Arc<DatabaseEditor>> {
let database_id = self.get_database_id_with_view_id(view_id).await?;
self.get_database(&database_id).await
@ -394,11 +407,11 @@ struct UserDatabaseCollabServiceImpl {
}
impl DatabaseCollabService for UserDatabaseCollabServiceImpl {
fn get_collab_update(
fn get_collab_doc_state(
&self,
object_id: &str,
object_ty: CollabType,
) -> CollabFuture<Result<CollabObjectUpdate, DatabaseError>> {
) -> CollabFuture<Result<CollabDocState, DatabaseError>> {
let workspace_id = self.workspace_id.clone();
let object_id = object_id.to_string();
let weak_cloud_service = Arc::downgrade(&self.cloud_service);
@ -410,7 +423,7 @@ impl DatabaseCollabService for UserDatabaseCollabServiceImpl {
},
Some(cloud_service) => {
let updates = cloud_service
.get_collab_update(&object_id, object_ty, &workspace_id)
.get_collab_doc_state_db(&object_id, object_ty, &workspace_id)
.await?;
Ok(updates)
},
@ -422,18 +435,18 @@ impl DatabaseCollabService for UserDatabaseCollabServiceImpl {
&self,
object_ids: Vec<String>,
object_ty: CollabType,
) -> CollabFuture<Result<CollabObjectUpdateByOid, DatabaseError>> {
) -> CollabFuture<Result<CollabDocStateByOid, DatabaseError>> {
let workspace_id = self.workspace_id.clone();
let weak_cloud_service = Arc::downgrade(&self.cloud_service);
Box::pin(async move {
match weak_cloud_service.upgrade() {
None => {
tracing::warn!("Cloud service is dropped");
Ok(CollabObjectUpdateByOid::default())
Ok(CollabDocStateByOid::default())
},
Some(cloud_service) => {
let updates = cloud_service
.batch_get_collab_updates(object_ids, object_ty, &workspace_id)
.batch_get_collab_doc_state_db(object_ids, object_ty, &workspace_id)
.await?;
Ok(updates)
},
@ -447,7 +460,7 @@ impl DatabaseCollabService for UserDatabaseCollabServiceImpl {
object_id: &str,
object_type: CollabType,
collab_db: Weak<RocksCollabDB>,
collab_raw_data: CollabRawData,
collab_raw_data: CollabDocState,
config: &CollabPersistenceConfig,
) -> Arc<MutexCollab> {
block_on(self.collab_builder.build_with_config(

View File

@ -9,4 +9,5 @@ edition = "2021"
lib-infra = { path = "../../../shared-lib/lib-infra" }
flowy-error = { workspace = true }
collab-document = { version = "0.1.0" }
anyhow.workspace = true
anyhow.workspace = true
collab = { version = "0.1.0" }

View File

@ -1,4 +1,5 @@
use anyhow::Error;
use collab::core::collab::CollabDocState;
pub use collab_document::blocks::DocumentData;
use flowy_error::FlowyError;
@ -12,7 +13,7 @@ pub trait DocumentCloudService: Send + Sync + 'static {
&self,
document_id: &str,
workspace_id: &str,
) -> FutureResult<Vec<Vec<u8>>, FlowyError>;
) -> FutureResult<CollabDocState, FlowyError>;
fn get_document_snapshots(
&self,

View File

@ -12,9 +12,7 @@ use collab_document::blocks::{
use tracing::instrument;
use flowy_error::{FlowyError, FlowyResult};
use lib_dispatch::prelude::{
data_result_ok, AFPluginData, AFPluginDataValidator, AFPluginState, DataResult,
};
use lib_dispatch::prelude::{data_result_ok, AFPluginData, AFPluginState, DataResult};
use crate::entities::*;
use crate::parser::document_data_parser::DocumentDataParser;
@ -383,7 +381,7 @@ pub async fn convert_document_handler(
pub(crate) async fn convert_data_to_json_handler(
data: AFPluginData<ConvertDataToJsonPayloadPB>,
) -> DataResult<ConvertDataToJsonResponsePB, FlowyError> {
let payload: ConvertDataToJsonParams = data.validate()?.into_inner().try_into()?;
let payload: ConvertDataToJsonParams = data.try_into_inner()?.try_into()?;
let parser = ExternalDataToNestedJSONParser::new(payload.data, payload.input_type);
let result = match parser.to_nested_block() {

View File

@ -2,7 +2,7 @@ use std::num::NonZeroUsize;
use std::sync::Arc;
use std::sync::Weak;
use collab::core::collab::{CollabRawData, MutexCollab};
use collab::core::collab::{CollabDocState, MutexCollab};
use collab::core::collab_plugin::EncodedCollabV1;
use collab::core::origin::CollabOrigin;
use collab::preclude::Collab;
@ -102,12 +102,7 @@ impl DocumentManager {
let encoded_collab_v1 =
doc_state_from_document_data(doc_id, data.unwrap_or_else(default_document_data))?;
let collab = self
.collab_for_document(
uid,
doc_id,
vec![encoded_collab_v1.doc_state.to_vec()],
false,
)
.collab_for_document(uid, doc_id, encoded_collab_v1.doc_state.to_vec(), false)
.await?;
collab.lock().flush();
Ok(())
@ -121,15 +116,15 @@ impl DocumentManager {
return Ok(doc);
}
let mut updates = vec![];
let mut doc_state = vec![];
if !self.is_doc_exist(doc_id)? {
// Try to get the document from the cloud service
let result: Result<CollabRawData, FlowyError> = self
let result: Result<CollabDocState, FlowyError> = self
.cloud_service
.get_document_doc_state(doc_id, &self.user.workspace_id()?)
.await;
updates = match result {
doc_state = match result {
Ok(data) => data,
Err(err) => {
if err.is_record_not_found() {
@ -141,7 +136,7 @@ impl DocumentManager {
"can't find the document in the cloud, doc_id: {}",
doc_id
);
vec![default_document_collab_data(doc_id).doc_state.to_vec()]
default_document_collab_data(doc_id).doc_state.to_vec()
} else {
return Err(err);
}
@ -151,7 +146,9 @@ impl DocumentManager {
let uid = self.user.user_id()?;
event!(tracing::Level::DEBUG, "Initialize document: {}", doc_id);
let collab = self.collab_for_document(uid, doc_id, updates, true).await?;
let collab = self
.collab_for_document(uid, doc_id, doc_state, true)
.await?;
let document = Arc::new(MutexDocument::open(doc_id, collab)?);
// save the document to the memory and read it from the memory if we open the same document again.
@ -233,7 +230,7 @@ impl DocumentManager {
&self,
uid: i64,
doc_id: &str,
doc_state: Vec<Vec<u8>>,
doc_state: CollabDocState,
sync_enable: bool,
) -> FlowyResult<Arc<MutexCollab>> {
let db = self.user.collab_db(uid)?;

View File

@ -3,6 +3,7 @@ use std::sync::Arc;
use anyhow::Error;
use bytes::Bytes;
use collab::core::collab::CollabDocState;
use collab::preclude::CollabPlugin;
use collab_document::blocks::DocumentData;
use collab_document::document_data::default_document_data;
@ -110,7 +111,7 @@ pub async fn create_and_open_empty_document() -> (DocumentTest, Arc<MutexDocumen
let data = default_document_data();
let uid = test.user.user_id().unwrap();
// create a document
_ = test
test
.create_document(uid, &doc_id, Some(data.clone()))
.await
.unwrap();
@ -135,7 +136,7 @@ impl DocumentCloudService for LocalTestDocumentCloudServiceImpl {
&self,
_document_id: &str,
_workspace_id: &str,
) -> FutureResult<Vec<Vec<u8>>, FlowyError> {
) -> FutureResult<CollabDocState, FlowyError> {
FutureResult::new(async move { Ok(vec![]) })
}

View File

@ -9,5 +9,10 @@ edition = "2021"
lib-infra = { path = "../../../shared-lib/lib-infra" }
flowy-error = { workspace = true }
collab-folder = { version = "0.1.0" }
collab = { version = "0.1.0" }
collab-entity = { version = "0.1.0" }
uuid.workspace = true
anyhow.workspace = true
anyhow.workspace = true
[dev-dependencies]
tokio.workspace = true

View File

@ -1,4 +1,6 @@
pub use anyhow::Error;
use collab::core::collab::CollabDocState;
use collab_entity::CollabType;
pub use collab_folder::{Folder, FolderData, Workspace};
use uuid::Uuid;
@ -28,12 +30,32 @@ pub trait FolderCloudService: Send + Sync + 'static {
limit: usize,
) -> FutureResult<Vec<FolderSnapshot>, Error>;
fn get_folder_doc_state(&self, workspace_id: &str, uid: i64)
-> FutureResult<Vec<Vec<u8>>, Error>;
/// The suffix 'f' in the method name serves as a workaround to avoid naming conflicts with the existing method `get_collab_doc_state`.
fn get_collab_doc_state_f(
&self,
workspace_id: &str,
uid: i64,
collab_type: CollabType,
object_id: &str,
) -> FutureResult<CollabDocState, Error>;
fn batch_create_collab_object(
&self,
workspace_id: &str,
objects: Vec<FolderCollabParams>,
) -> FutureResult<(), Error>;
fn service_name(&self) -> String;
}
#[derive(Debug)]
pub struct FolderCollabParams {
pub object_id: String,
pub encoded_collab_v1: Vec<u8>,
pub collab_type: CollabType,
pub override_if_exist: bool,
}
pub struct FolderSnapshot {
pub snapshot_id: i64,
pub database_id: String,

View File

@ -0,0 +1,13 @@
use crate::folder_builder::ParentChildViews;
use std::collections::HashMap;
pub enum ImportData {
AppFlowyDataFolder {
view: ParentChildViews,
/// Used to update the [DatabaseViewTrackerList] when importing the database.
database_view_ids_by_database_id: HashMap<String, Vec<String>>,
row_object_ids: Vec<String>,
document_object_ids: Vec<String>,
database_object_ids: Vec<String>,
},
}

View File

@ -0,0 +1,317 @@
use crate::cloud::gen_view_id;
use collab_folder::{RepeatedViewIdentifier, View, ViewIcon, ViewIdentifier, ViewLayout};
use lib_infra::util::timestamp;
use std::future::Future;
/// A builder for creating a view for a workspace.
/// The views created by this builder will be the first level views of the workspace.
pub struct WorkspaceViewBuilder {
pub uid: i64,
pub workspace_id: String,
pub views: Vec<ParentChildViews>,
}
impl WorkspaceViewBuilder {
pub fn new(workspace_id: String, uid: i64) -> Self {
Self {
uid,
workspace_id,
views: vec![],
}
}
pub async fn with_view_builder<F, O>(&mut self, view_builder: F)
where
F: Fn(ViewBuilder) -> O,
O: Future<Output = ParentChildViews>,
{
let builder = ViewBuilder::new(self.uid, self.workspace_id.clone());
self.views.push(view_builder(builder).await);
}
pub fn build(&mut self) -> Vec<ParentChildViews> {
std::mem::take(&mut self.views)
}
}
/// A builder for creating a view.
/// The default layout of the view is [ViewLayout::Document]
pub struct ViewBuilder {
uid: i64,
parent_view_id: String,
view_id: String,
name: String,
desc: String,
layout: ViewLayout,
child_views: Vec<ParentChildViews>,
is_favorite: bool,
icon: Option<ViewIcon>,
}
impl ViewBuilder {
pub fn new(uid: i64, parent_view_id: String) -> Self {
Self {
uid,
parent_view_id,
view_id: gen_view_id().to_string(),
name: Default::default(),
desc: Default::default(),
layout: ViewLayout::Document,
child_views: vec![],
is_favorite: false,
icon: None,
}
}
pub fn view_id(&self) -> &str {
&self.view_id
}
pub fn with_view_id<T: ToString>(mut self, view_id: T) -> Self {
self.view_id = view_id.to_string();
self
}
pub fn with_layout(mut self, layout: ViewLayout) -> Self {
self.layout = layout;
self
}
pub fn with_name<T: ToString>(mut self, name: T) -> Self {
self.name = name.to_string();
self
}
pub fn with_desc(mut self, desc: &str) -> Self {
self.desc = desc.to_string();
self
}
pub fn with_icon(mut self, icon: &str) -> Self {
self.icon = Some(ViewIcon {
ty: collab_folder::IconType::Emoji,
value: icon.to_string(),
});
self
}
pub fn with_view(mut self, view: ParentChildViews) -> Self {
self.child_views.push(view);
self
}
pub fn with_child_views(mut self, mut views: Vec<ParentChildViews>) -> Self {
self.child_views.append(&mut views);
self
}
/// Create a child view for the current view.
/// The view created by this builder will be the next level view of the current view.
pub async fn with_child_view_builder<F, O>(mut self, child_view_builder: F) -> Self
where
F: Fn(ViewBuilder) -> O,
O: Future<Output = ParentChildViews>,
{
let builder = ViewBuilder::new(self.uid, self.view_id.clone());
self.child_views.push(child_view_builder(builder).await);
self
}
pub fn build(self) -> ParentChildViews {
let view = View {
id: self.view_id,
parent_view_id: self.parent_view_id,
name: self.name,
desc: self.desc,
created_at: timestamp(),
is_favorite: self.is_favorite,
layout: self.layout,
icon: self.icon,
created_by: Some(self.uid),
last_edited_time: 0,
children: RepeatedViewIdentifier::new(
self
.child_views
.iter()
.map(|v| ViewIdentifier {
id: v.parent_view.id.clone(),
})
.collect(),
),
last_edited_by: Some(self.uid),
};
ParentChildViews {
parent_view: view,
child_views: self.child_views,
}
}
}
#[derive(Clone)]
pub struct ParentChildViews {
pub parent_view: View,
pub child_views: Vec<ParentChildViews>,
}
impl ParentChildViews {
pub fn new(view: View) -> Self {
Self {
parent_view: view,
child_views: vec![],
}
}
pub fn flatten(self) -> Vec<View> {
FlattedViews::flatten_views(vec![self])
}
}
pub struct FlattedViews;
impl FlattedViews {
pub fn flatten_views(views: Vec<ParentChildViews>) -> Vec<View> {
let mut result = vec![];
for view in views {
result.push(view.parent_view);
result.append(&mut Self::flatten_views(view.child_views));
}
result
}
}
#[cfg(test)]
mod tests {
use crate::folder_builder::{FlattedViews, WorkspaceViewBuilder};
#[tokio::test]
async fn create_first_level_views_test() {
let workspace_id = "w1".to_string();
let mut builder = WorkspaceViewBuilder::new(workspace_id, 1);
builder
.with_view_builder(|view_builder| async { view_builder.with_name("1").build() })
.await;
builder
.with_view_builder(|view_builder| async { view_builder.with_name("2").build() })
.await;
builder
.with_view_builder(|view_builder| async { view_builder.with_name("3").build() })
.await;
let workspace_views = builder.build();
assert_eq!(workspace_views.len(), 3);
let views = FlattedViews::flatten_views(workspace_views);
assert_eq!(views.len(), 3);
}
#[tokio::test]
async fn create_view_with_child_views_test() {
let workspace_id = "w1".to_string();
let mut builder = WorkspaceViewBuilder::new(workspace_id, 1);
builder
.with_view_builder(|view_builder| async {
view_builder
.with_name("1")
.with_child_view_builder(|child_view_builder| async {
child_view_builder.with_name("1_1").build()
})
.await
.with_child_view_builder(|child_view_builder| async {
child_view_builder.with_name("1_2").build()
})
.await
.build()
})
.await;
builder
.with_view_builder(|view_builder| async {
view_builder
.with_name("2")
.with_child_view_builder(|child_view_builder| async {
child_view_builder.with_name("2_1").build()
})
.await
.build()
})
.await;
let workspace_views = builder.build();
assert_eq!(workspace_views.len(), 2);
assert_eq!(workspace_views[0].parent_view.name, "1");
assert_eq!(workspace_views[0].child_views.len(), 2);
assert_eq!(workspace_views[0].child_views[0].parent_view.name, "1_1");
assert_eq!(workspace_views[0].child_views[1].parent_view.name, "1_2");
assert_eq!(workspace_views[1].child_views.len(), 1);
assert_eq!(workspace_views[1].child_views[0].parent_view.name, "2_1");
let views = FlattedViews::flatten_views(workspace_views);
assert_eq!(views.len(), 5);
}
#[tokio::test]
async fn create_three_level_view_test() {
let workspace_id = "w1".to_string();
let mut builder = WorkspaceViewBuilder::new(workspace_id, 1);
builder
.with_view_builder(|view_builder| async {
view_builder
.with_name("1")
.with_child_view_builder(|child_view_builder| async {
child_view_builder
.with_name("1_1")
.with_child_view_builder(|b| async { b.with_name("1_1_1").build() })
.await
.with_child_view_builder(|b| async { b.with_name("1_1_2").build() })
.await
.build()
})
.await
.with_child_view_builder(|child_view_builder| async {
child_view_builder
.with_name("1_2")
.with_child_view_builder(|b| async { b.with_name("1_2_1").build() })
.await
.with_child_view_builder(|b| async { b.with_name("1_2_2").build() })
.await
.build()
})
.await
.build()
})
.await;
let workspace_views = builder.build();
assert_eq!(workspace_views.len(), 1);
assert_eq!(workspace_views[0].parent_view.name, "1");
assert_eq!(workspace_views[0].child_views.len(), 2);
assert_eq!(workspace_views[0].child_views[0].parent_view.name, "1_1");
assert_eq!(workspace_views[0].child_views[1].parent_view.name, "1_2");
assert_eq!(
workspace_views[0].child_views[0].child_views[0]
.parent_view
.name,
"1_1_1"
);
assert_eq!(
workspace_views[0].child_views[0].child_views[1]
.parent_view
.name,
"1_1_2"
);
assert_eq!(
workspace_views[0].child_views[1].child_views[0]
.parent_view
.name,
"1_2_1"
);
assert_eq!(
workspace_views[0].child_views[1].child_views[1]
.parent_view
.name,
"1_2_2"
);
let views = FlattedViews::flatten_views(workspace_views);
assert_eq!(views.len(), 7);
}
}

View File

@ -1 +1,3 @@
pub mod cloud;
pub mod entities;
pub mod folder_builder;

View File

@ -30,6 +30,7 @@ protobuf.workspace = true
uuid.workspace = true
tokio-stream = { workspace = true, features = ["sync"] }
serde_json.workspace = true
validator = "0.16.0"
[build-dependencies]
flowy-codegen = { path = "../../../shared-lib/flowy-codegen"}

View File

@ -1,9 +1,9 @@
use flowy_derive::{ProtoBuf, ProtoBuf_Enum};
use flowy_error::FlowyError;
use crate::entities::parser::empty_str::NotEmptyStr;
use crate::entities::ViewLayoutPB;
use crate::share::{ImportParams, ImportType};
use flowy_derive::{ProtoBuf, ProtoBuf_Enum};
use flowy_error::FlowyError;
use validator::Validate;
#[derive(Clone, Debug, ProtoBuf_Enum)]
pub enum ImportTypePB {
@ -84,3 +84,13 @@ impl TryInto<ImportParams> for ImportPB {
})
}
}
#[derive(ProtoBuf, Validate, Default)]
pub struct ImportAppFlowyDataPB {
#[pb(index = 1)]
#[validate(custom = "lib_infra::validator_fn::required_not_empty_str")]
pub path: String,
#[pb(index = 2)]
pub import_container_name: String,
}

View File

@ -329,3 +329,16 @@ pub(crate) async fn get_folder_snapshots_handler(
let snapshots = folder.get_folder_snapshots(&data.value, 10).await?;
data_result_ok(RepeatedFolderSnapshotPB { items: snapshots })
}
#[tracing::instrument(level = "debug", skip_all, err)]
pub async fn import_appflowy_data_folder_handler(
data: AFPluginData<ImportAppFlowyDataPB>,
folder: AFPluginState<Weak<FolderManager>>,
) -> Result<(), FlowyError> {
let folder = upgrade_folder(folder)?;
let data = data.try_into_inner()?;
folder
.import_appflowy_data(data.path, data.import_container_name)
.await?;
Ok(())
}

View File

@ -15,7 +15,6 @@ pub fn init(folder: Weak<FolderManager>) -> AFPlugin {
.event(FolderEvent::GetCurrentWorkspaceSetting, read_current_workspace_setting_handler)
.event(FolderEvent::ReadCurrentWorkspace, read_current_workspace_handler)
.event(FolderEvent::ReadWorkspaceViews, get_workspace_views_handler)
// View
.event(FolderEvent::CreateView, create_view_handler)
.event(FolderEvent::CreateOrphanView, create_orphan_view_handler)
.event(FolderEvent::GetView, read_view_handler)
@ -26,7 +25,6 @@ pub fn init(folder: Weak<FolderManager>) -> AFPlugin {
.event(FolderEvent::CloseView, close_view_handler)
.event(FolderEvent::MoveView, move_view_handler)
.event(FolderEvent::MoveNestedView, move_nested_view_handler)
// Trash
.event(FolderEvent::ListTrashItems, read_trash_handler)
.event(FolderEvent::RestoreTrashItem, putback_trash_handler)
.event(FolderEvent::PermanentlyDeleteTrashItem, delete_trash_handler)
@ -39,6 +37,7 @@ pub fn init(folder: Weak<FolderManager>) -> AFPlugin {
.event(FolderEvent::ReadRecentViews, read_recent_views_handler)
.event(FolderEvent::ToggleFavorite, toggle_favorites_handler)
.event(FolderEvent::UpdateRecentViews, update_recent_views_handler)
.event(FolderEvent::ImportAppFlowyDataFolder, import_appflowy_data_folder_handler)
}
#[derive(Clone, Copy, PartialEq, Eq, Debug, Display, Hash, ProtoBuf_Enum, Flowy_Event)]
@ -154,4 +153,7 @@ pub enum FolderEvent {
// used for add or remove recent views, like history
#[event(input = "UpdateRecentViewPayloadPB")]
UpdateRecentViews = 37,
#[event(input = "ImportAppFlowyDataPB")]
ImportAppFlowyDataFolder = 38,
}

View File

@ -2,7 +2,7 @@ use std::fmt::{Display, Formatter};
use std::ops::Deref;
use std::sync::{Arc, Weak};
use collab::core::collab::{CollabRawData, MutexCollab};
use collab::core::collab::{CollabDocState, MutexCollab};
use collab_entity::CollabType;
use collab_folder::{
Folder, FolderData, Section, SectionItem, TrashInfo, View, ViewLayout, ViewUpdate, Workspace,
@ -12,8 +12,10 @@ use tracing::{error, event, info, instrument, Level};
use collab_integrate::collab_builder::{AppFlowyCollabBuilder, CollabBuilderConfig};
use collab_integrate::{CollabPersistenceConfig, RocksCollabDB};
use flowy_error::{ErrorCode, FlowyError, FlowyResult};
use flowy_error::{internal_error, ErrorCode, FlowyError, FlowyResult};
use flowy_folder_deps::cloud::{gen_view_id, FolderCloudService};
use flowy_folder_deps::folder_builder::ParentChildViews;
use lib_infra::async_trait::async_trait;
use crate::entities::icon::UpdateViewIconParams;
use crate::entities::{
@ -28,14 +30,24 @@ use crate::notification::{
send_notification, send_workspace_setting_notification, FolderNotification,
};
use crate::share::ImportParams;
use crate::util::{folder_not_init_error, workspace_data_not_sync_error};
use crate::util::{
folder_not_init_error, insert_parent_child_views, workspace_data_not_sync_error,
};
use crate::view_operation::{create_view, FolderOperationHandler, FolderOperationHandlers};
/// [FolderUser] represents the user for folder.
#[async_trait]
pub trait FolderUser: Send + Sync {
fn user_id(&self) -> Result<i64, FlowyError>;
fn token(&self) -> Result<Option<String>, FlowyError>;
fn collab_db(&self, uid: i64) -> Result<Weak<RocksCollabDB>, FlowyError>;
async fn import_appflowy_data_folder(
&self,
workspace_id: &str,
path: &str,
container_name: &str,
) -> Result<ParentChildViews, FlowyError>;
}
pub struct FolderManager {
@ -123,7 +135,7 @@ impl FolderManager {
uid: i64,
workspace_id: &str,
collab_db: Weak<RocksCollabDB>,
raw_data: CollabRawData,
collab_doc_state: CollabDocState,
) -> Result<Arc<MutexCollab>, FlowyError> {
let collab = self
.collab_builder
@ -132,7 +144,7 @@ impl FolderManager {
workspace_id,
CollabType::Folder,
collab_db,
raw_data,
collab_doc_state,
&CollabPersistenceConfig::new().enable_snapshot(true),
CollabBuilderConfig::default().sync_enable(true),
)
@ -150,7 +162,7 @@ impl FolderManager {
) -> FlowyResult<()> {
let folder_doc_state = self
.cloud_service
.get_folder_doc_state(workspace_id, user_id)
.get_collab_doc_state_f(workspace_id, user_id, CollabType::Folder, workspace_id)
.await?;
event!(
@ -207,7 +219,7 @@ impl FolderManager {
// when the user signs up for the first time.
let result = self
.cloud_service
.get_folder_doc_state(workspace_id, user_id)
.get_collab_doc_state_f(workspace_id, user_id, CollabType::Folder, workspace_id)
.await
.map_err(FlowyError::from);
@ -341,7 +353,6 @@ impl FolderManager {
pub async fn create_view_with_params(&self, params: CreateViewParams) -> FlowyResult<View> {
let view_layout: ViewLayout = params.layout.clone().into();
let _workspace_id = self.get_current_workspace_id().await?;
let handler = self.get_handler(&view_layout)?;
let user_id = self.user.user_id()?;
let meta = params.meta.clone();
@ -463,7 +474,7 @@ impl FolderManager {
notify_child_views_changed(
view_pb_without_child_views(view),
ChildViewChangeReason::DidDeleteView,
ChildViewChangeReason::Delete,
);
}
},
@ -821,6 +832,34 @@ impl FolderManager {
Ok(())
}
pub async fn import_appflowy_data(&self, path: String, name: String) -> Result<(), FlowyError> {
let (tx, rx) = tokio::sync::oneshot::channel();
let workspace_id = self.get_current_workspace_id().await?;
let folder = self.mutex_folder.clone();
let user = self.user.clone();
tokio::spawn(async move {
match user
.import_appflowy_data_folder(&workspace_id, &path, &name)
.await
{
Ok(view) => {
if let Some(folder) = &*folder.lock() {
insert_parent_child_views(folder, view);
}
let _ = tx.send(Ok(()));
},
Err(err) => {
let _ = tx.send(Err(err));
},
}
});
rx.await.map_err(internal_error)??;
Ok(())
}
pub(crate) async fn import(&self, import_data: ImportParams) -> FlowyResult<View> {
if import_data.data.is_none() && import_data.file_path.is_none() {
return Err(FlowyError::new(
@ -1049,7 +1088,7 @@ pub enum FolderInitDataSource {
/// It means using the data stored on local disk to initialize the folder
LocalDisk { create_if_not_exist: bool },
/// If there is no data stored on local disk, we will use the data from the server to initialize the folder
Cloud(CollabRawData),
Cloud(CollabDocState),
/// If the user is new, we use the [DefaultFolderBuilder] to create the default folder.
FolderData(FolderData),
}

View File

@ -33,7 +33,7 @@ pub(crate) fn subscribe_folder_view_changed(
ViewChange::DidCreateView { view } => {
notify_child_views_changed(
view_pb_without_child_views(Arc::new(view.clone())),
ChildViewChangeReason::DidCreateView,
ChildViewChangeReason::Create,
);
notify_parent_view_did_change(folder.clone(), vec![view.parent_view_id]);
},
@ -41,7 +41,7 @@ pub(crate) fn subscribe_folder_view_changed(
for view in views {
notify_child_views_changed(
view_pb_without_child_views(view),
ChildViewChangeReason::DidDeleteView,
ChildViewChangeReason::Delete,
);
}
},
@ -49,7 +49,7 @@ pub(crate) fn subscribe_folder_view_changed(
notify_view_did_change(view.clone());
notify_child_views_changed(
view_pb_without_child_views(Arc::new(view.clone())),
ChildViewChangeReason::DidUpdateView,
ChildViewChangeReason::Update,
);
notify_parent_view_did_change(folder.clone(), vec![view.parent_view_id.clone()]);
},
@ -198,9 +198,9 @@ fn notify_view_did_change(view: View) -> Option<()> {
}
pub enum ChildViewChangeReason {
DidCreateView,
DidDeleteView,
DidUpdateView,
Create,
Delete,
Update,
}
/// Notify the the list of parent view ids that its child views were changed.
@ -213,13 +213,13 @@ pub(crate) fn notify_child_views_changed(view_pb: ViewPB, reason: ChildViewChang
};
match reason {
ChildViewChangeReason::DidCreateView => {
ChildViewChangeReason::Create => {
payload.create_child_views.push(view_pb);
},
ChildViewChangeReason::DidDeleteView => {
ChildViewChangeReason::Delete => {
payload.delete_child_views.push(view_pb.id);
},
ChildViewChangeReason::DidUpdateView => {
ChildViewChangeReason::Update => {
payload.update_child_views.push(view_pb);
},
}

View File

@ -1,14 +1,13 @@
use std::sync::Arc;
use collab_folder::{FolderData, RepeatedViewIdentifier, ViewIdentifier, Workspace};
use flowy_folder_deps::folder_builder::{FlattedViews, ParentChildViews, WorkspaceViewBuilder};
use tokio::sync::RwLock;
use lib_infra::util::timestamp;
use crate::entities::{view_pb_with_child_views, ViewPB};
use crate::view_operation::{
FlattedViews, FolderOperationHandlers, ParentChildViews, WorkspaceViewBuilder,
};
use crate::view_operation::FolderOperationHandlers;
pub struct DefaultFolderBuilder();
impl DefaultFolderBuilder {

View File

@ -1,7 +1,9 @@
use collab_folder::Folder;
use std::sync::Arc;
use collab_integrate::YrsDocAction;
use flowy_error::{ErrorCode, FlowyError, FlowyResult};
use flowy_folder_deps::folder_builder::ParentChildViews;
use crate::entities::UserFolderPB;
use crate::manager::FolderUser;
@ -29,3 +31,10 @@ pub(crate) fn workspace_data_not_sync_error(uid: i64, workspace_id: &str) -> Flo
workspace_id: workspace_id.to_string(),
})
}
pub(crate) fn insert_parent_child_views(folder: &Folder, view: ParentChildViews) {
folder.insert_view(view.parent_view, None);
for child_view in view.child_views {
insert_parent_child_views(folder, child_view);
}
}

View File

@ -1,14 +1,14 @@
use std::collections::HashMap;
use std::future::Future;
use std::sync::Arc;
use bytes::Bytes;
pub use collab_folder::View;
use collab_folder::{RepeatedViewIdentifier, ViewIcon, ViewIdentifier, ViewLayout};
use collab_folder::ViewLayout;
use tokio::sync::RwLock;
use flowy_error::FlowyError;
use flowy_folder_deps::cloud::gen_view_id;
use flowy_folder_deps::folder_builder::WorkspaceViewBuilder;
use lib_infra::future::FutureResult;
use lib_infra::util::timestamp;
@ -17,153 +17,6 @@ use crate::share::ImportType;
pub type ViewData = Bytes;
/// A builder for creating a view for a workspace.
/// The views created by this builder will be the first level views of the workspace.
pub struct WorkspaceViewBuilder {
pub uid: i64,
pub workspace_id: String,
pub views: Vec<ParentChildViews>,
}
impl WorkspaceViewBuilder {
pub fn new(workspace_id: String, uid: i64) -> Self {
Self {
uid,
workspace_id,
views: vec![],
}
}
pub async fn with_view_builder<F, O>(&mut self, view_builder: F)
where
F: Fn(ViewBuilder) -> O,
O: Future<Output = ParentChildViews>,
{
let builder = ViewBuilder::new(self.uid, self.workspace_id.clone());
self.views.push(view_builder(builder).await);
}
pub fn build(&mut self) -> Vec<ParentChildViews> {
std::mem::take(&mut self.views)
}
}
/// A builder for creating a view.
/// The default layout of the view is [ViewLayout::Document]
pub struct ViewBuilder {
uid: i64,
parent_view_id: String,
view_id: String,
name: String,
desc: String,
layout: ViewLayout,
child_views: Vec<ParentChildViews>,
is_favorite: bool,
icon: Option<ViewIcon>,
}
impl ViewBuilder {
pub fn new(uid: i64, parent_view_id: String) -> Self {
Self {
uid,
parent_view_id,
view_id: gen_view_id().to_string(),
name: Default::default(),
desc: Default::default(),
layout: ViewLayout::Document,
child_views: vec![],
is_favorite: false,
icon: None,
}
}
pub fn view_id(&self) -> &str {
&self.view_id
}
pub fn with_layout(mut self, layout: ViewLayout) -> Self {
self.layout = layout;
self
}
pub fn with_name(mut self, name: &str) -> Self {
self.name = name.to_string();
self
}
pub fn with_desc(mut self, desc: &str) -> Self {
self.desc = desc.to_string();
self
}
pub fn with_icon(mut self, icon: &str) -> Self {
self.icon = Some(ViewIcon {
ty: collab_folder::IconType::Emoji,
value: icon.to_string(),
});
self
}
/// Create a child view for the current view.
/// The view created by this builder will be the next level view of the current view.
pub async fn with_child_view_builder<F, O>(mut self, child_view_builder: F) -> Self
where
F: Fn(ViewBuilder) -> O,
O: Future<Output = ParentChildViews>,
{
let builder = ViewBuilder::new(self.uid, self.view_id.clone());
self.child_views.push(child_view_builder(builder).await);
self
}
pub fn build(self) -> ParentChildViews {
let view = View {
id: self.view_id,
parent_view_id: self.parent_view_id,
name: self.name,
desc: self.desc,
created_at: timestamp(),
is_favorite: self.is_favorite,
layout: self.layout,
icon: self.icon,
created_by: Some(self.uid),
last_edited_time: 0,
children: RepeatedViewIdentifier::new(
self
.child_views
.iter()
.map(|v| ViewIdentifier {
id: v.parent_view.id.clone(),
})
.collect(),
),
last_edited_by: Some(self.uid),
};
ParentChildViews {
parent_view: view,
child_views: self.child_views,
}
}
}
pub struct ParentChildViews {
pub parent_view: View,
pub child_views: Vec<ParentChildViews>,
}
pub struct FlattedViews;
impl FlattedViews {
pub fn flatten_views(views: Vec<ParentChildViews>) -> Vec<View> {
let mut result = vec![];
for view in views {
result.push(view.parent_view);
result.append(&mut Self::flatten_views(view.child_views));
}
result
}
}
/// The handler will be used to handler the folder operation for a specific
/// view layout. Each [ViewLayout] will have a handler. So when creating a new
/// view, the [ViewLayout] will be used to get the handler.
@ -278,140 +131,3 @@ pub(crate) fn create_view(uid: i64, params: CreateViewParams, layout: ViewLayout
last_edited_by: Some(uid),
}
}
#[cfg(test)]
mod tests {
use crate::view_operation::{FlattedViews, WorkspaceViewBuilder};
#[tokio::test]
async fn create_first_level_views_test() {
let workspace_id = "w1".to_string();
let mut builder = WorkspaceViewBuilder::new(workspace_id, 1);
builder
.with_view_builder(|view_builder| async { view_builder.with_name("1").build() })
.await;
builder
.with_view_builder(|view_builder| async { view_builder.with_name("2").build() })
.await;
builder
.with_view_builder(|view_builder| async { view_builder.with_name("3").build() })
.await;
let workspace_views = builder.build();
assert_eq!(workspace_views.len(), 3);
let views = FlattedViews::flatten_views(workspace_views);
assert_eq!(views.len(), 3);
}
#[tokio::test]
async fn create_view_with_child_views_test() {
let workspace_id = "w1".to_string();
let mut builder = WorkspaceViewBuilder::new(workspace_id, 1);
builder
.with_view_builder(|view_builder| async {
view_builder
.with_name("1")
.with_child_view_builder(|child_view_builder| async {
child_view_builder.with_name("1_1").build()
})
.await
.with_child_view_builder(|child_view_builder| async {
child_view_builder.with_name("1_2").build()
})
.await
.build()
})
.await;
builder
.with_view_builder(|view_builder| async {
view_builder
.with_name("2")
.with_child_view_builder(|child_view_builder| async {
child_view_builder.with_name("2_1").build()
})
.await
.build()
})
.await;
let workspace_views = builder.build();
assert_eq!(workspace_views.len(), 2);
assert_eq!(workspace_views[0].parent_view.name, "1");
assert_eq!(workspace_views[0].child_views.len(), 2);
assert_eq!(workspace_views[0].child_views[0].parent_view.name, "1_1");
assert_eq!(workspace_views[0].child_views[1].parent_view.name, "1_2");
assert_eq!(workspace_views[1].child_views.len(), 1);
assert_eq!(workspace_views[1].child_views[0].parent_view.name, "2_1");
let views = FlattedViews::flatten_views(workspace_views);
assert_eq!(views.len(), 5);
}
#[tokio::test]
async fn create_three_level_view_test() {
let workspace_id = "w1".to_string();
let mut builder = WorkspaceViewBuilder::new(workspace_id, 1);
builder
.with_view_builder(|view_builder| async {
view_builder
.with_name("1")
.with_child_view_builder(|child_view_builder| async {
child_view_builder
.with_name("1_1")
.with_child_view_builder(|b| async { b.with_name("1_1_1").build() })
.await
.with_child_view_builder(|b| async { b.with_name("1_1_2").build() })
.await
.build()
})
.await
.with_child_view_builder(|child_view_builder| async {
child_view_builder
.with_name("1_2")
.with_child_view_builder(|b| async { b.with_name("1_2_1").build() })
.await
.with_child_view_builder(|b| async { b.with_name("1_2_2").build() })
.await
.build()
})
.await
.build()
})
.await;
let workspace_views = builder.build();
assert_eq!(workspace_views.len(), 1);
assert_eq!(workspace_views[0].parent_view.name, "1");
assert_eq!(workspace_views[0].child_views.len(), 2);
assert_eq!(workspace_views[0].child_views[0].parent_view.name, "1_1");
assert_eq!(workspace_views[0].child_views[1].parent_view.name, "1_2");
assert_eq!(
workspace_views[0].child_views[0].child_views[0]
.parent_view
.name,
"1_1_1"
);
assert_eq!(
workspace_views[0].child_views[0].child_views[1]
.parent_view
.name,
"1_1_2"
);
assert_eq!(
workspace_views[0].child_views[1].child_views[0]
.parent_view
.name,
"1_2_1"
);
assert_eq!(
workspace_views[0].child_views[1].child_views[1]
.parent_view
.name,
"1_2_2"
);
let views = FlattedViews::flatten_views(workspace_views);
assert_eq!(views.len(), 7);
}
}

View File

@ -45,6 +45,7 @@ tokio-util = "0.7"
tokio-stream = { workspace = true, features = ["sync"] }
client-api = { version = "0.1.0", features = ["collab-sync", "test_util"] }
lib-dispatch = { workspace = true }
yrs = "0.17.1"
[dev-dependencies]
uuid.workspace = true

View File

@ -2,13 +2,12 @@ use anyhow::Error;
use client_api::entity::QueryCollabResult::{Failed, Success};
use client_api::entity::{QueryCollab, QueryCollabParams};
use client_api::error::ErrorCode::RecordNotFound;
use collab::core::collab::CollabDocState;
use collab::core::collab_plugin::EncodedCollabV1;
use collab_entity::CollabType;
use tracing::error;
use flowy_database_deps::cloud::{
CollabObjectUpdate, CollabObjectUpdateByOid, DatabaseCloudService, DatabaseSnapshot,
};
use flowy_database_deps::cloud::{CollabDocStateByOid, DatabaseCloudService, DatabaseSnapshot};
use lib_infra::future::FutureResult;
use crate::af_cloud::AFServer;
@ -19,12 +18,12 @@ impl<T> DatabaseCloudService for AFCloudDatabaseCloudServiceImpl<T>
where
T: AFServer,
{
fn get_collab_update(
fn get_collab_doc_state_db(
&self,
object_id: &str,
collab_type: CollabType,
workspace_id: &str,
) -> FutureResult<CollabObjectUpdate, Error> {
) -> FutureResult<CollabDocState, Error> {
let workspace_id = workspace_id.to_string();
let object_id = object_id.to_string();
let try_get_client = self.0.try_get_client();
@ -37,7 +36,7 @@ where
},
};
match try_get_client?.get_collab(params).await {
Ok(data) => Ok(vec![data.doc_state.to_vec()]),
Ok(data) => Ok(data.doc_state.to_vec()),
Err(err) => {
if err.code == RecordNotFound {
Ok(vec![])
@ -49,12 +48,12 @@ where
})
}
fn batch_get_collab_updates(
fn batch_get_collab_doc_state_db(
&self,
object_ids: Vec<String>,
object_ty: CollabType,
workspace_id: &str,
) -> FutureResult<CollabObjectUpdateByOid, Error> {
) -> FutureResult<CollabDocStateByOid, Error> {
let workspace_id = workspace_id.to_string();
let try_get_client = self.0.try_get_client();
FutureResult::new(async move {
@ -74,7 +73,7 @@ where
.flat_map(|(object_id, result)| match result {
Success { encode_collab_v1 } => {
match EncodedCollabV1::decode_from_bytes(&encode_collab_v1) {
Ok(encode) => Some((object_id, vec![encode.doc_state.to_vec()])),
Ok(encode) => Some((object_id, encode.doc_state.to_vec())),
Err(err) => {
error!("Failed to decode collab: {}", err);
None
@ -86,7 +85,7 @@ where
None
},
})
.collect::<CollabObjectUpdateByOid>(),
.collect::<CollabDocStateByOid>(),
)
})
}

View File

@ -1,5 +1,6 @@
use anyhow::Error;
use client_api::entity::{QueryCollab, QueryCollabParams};
use collab::core::collab::CollabDocState;
use collab::core::origin::CollabOrigin;
use collab_document::document::Document;
use collab_entity::CollabType;
@ -20,7 +21,7 @@ where
&self,
document_id: &str,
workspace_id: &str,
) -> FutureResult<Vec<Vec<u8>>, FlowyError> {
) -> FutureResult<CollabDocState, FlowyError> {
let workspace_id = workspace_id.to_string();
let try_get_client = self.0.try_get_client();
let document_id = document_id.to_string();
@ -32,13 +33,13 @@ where
collab_type: CollabType::Document,
},
};
let data = try_get_client?
let doc_state = try_get_client?
.get_collab(params)
.await
.map_err(FlowyError::from)?
.doc_state
.to_vec();
Ok(vec![data])
Ok(doc_state)
})
}
@ -74,7 +75,7 @@ where
.doc_state
.to_vec();
let document =
Document::from_updates(CollabOrigin::Empty, vec![doc_state], &document_id, vec![])?;
Document::from_doc_state(CollabOrigin::Empty, doc_state, &document_id, vec![])?;
Ok(document.get_document_data().ok())
})
}

View File

@ -1,11 +1,13 @@
use anyhow::{anyhow, Error};
use client_api::entity::{QueryCollab, QueryCollabParams};
use client_api::entity::{CollabParams, QueryCollab, QueryCollabParams};
use collab::core::collab::CollabDocState;
use collab::core::origin::CollabOrigin;
use collab_entity::CollabType;
use flowy_error::FlowyError;
use flowy_folder_deps::cloud::{
Folder, FolderCloudService, FolderData, FolderSnapshot, Workspace, WorkspaceRecord,
Folder, FolderCloudService, FolderCollabParams, FolderData, FolderSnapshot, Workspace,
WorkspaceRecord,
};
use lib_infra::future::FutureResult;
@ -72,13 +74,8 @@ where
.map_err(FlowyError::from)?
.doc_state
.to_vec();
let folder = Folder::from_collab_raw_data(
uid,
CollabOrigin::Empty,
vec![doc_state],
&workspace_id,
vec![],
)?;
let folder =
Folder::from_collab_raw_data(uid, CollabOrigin::Empty, doc_state, &workspace_id, vec![])?;
Ok(folder.get_folder_data())
})
}
@ -91,19 +88,22 @@ where
FutureResult::new(async move { Ok(vec![]) })
}
fn get_folder_doc_state(
fn get_collab_doc_state_f(
&self,
workspace_id: &str,
_uid: i64,
) -> FutureResult<Vec<Vec<u8>>, Error> {
collab_type: CollabType,
object_id: &str,
) -> FutureResult<CollabDocState, Error> {
let object_id = object_id.to_string();
let workspace_id = workspace_id.to_string();
let try_get_client = self.0.try_get_client();
FutureResult::new(async move {
let params = QueryCollabParams {
workspace_id: workspace_id.clone(),
workspace_id,
inner: QueryCollab {
object_id: workspace_id,
collab_type: CollabType::Folder,
object_id,
collab_type,
},
};
let doc_state = try_get_client?
@ -112,7 +112,32 @@ where
.map_err(FlowyError::from)?
.doc_state
.to_vec();
Ok(vec![doc_state])
Ok(doc_state)
})
}
fn batch_create_collab_object(
&self,
workspace_id: &str,
objects: Vec<FolderCollabParams>,
) -> FutureResult<(), Error> {
let workspace_id = workspace_id.to_string();
let try_get_client = self.0.try_get_client();
FutureResult::new(async move {
let params = objects
.into_iter()
.map(|object| CollabParams {
object_id: object.object_id,
encoded_collab_v1: object.encoded_collab_v1,
collab_type: object.collab_type,
override_if_exist: object.override_if_exist,
})
.collect::<Vec<_>>();
try_get_client?
.batch_create_collab(&workspace_id, params)
.await
.map_err(FlowyError::from)?;
Ok(())
})
}

View File

@ -4,6 +4,7 @@ use std::sync::Arc;
use anyhow::{anyhow, Error};
use client_api::entity::workspace_dto::{CreateWorkspaceMember, WorkspaceMemberChangeset};
use client_api::entity::{AFRole, AFWorkspace, AuthProvider, CollabParams, CreateCollabParams};
use collab::core::collab::CollabDocState;
use collab_entity::CollabObject;
use parking_lot::RwLock;
@ -215,7 +216,7 @@ where
})
}
fn get_user_awareness_updates(&self, _uid: i64) -> FutureResult<Vec<Vec<u8>>, Error> {
fn get_user_awareness_doc_state(&self, _uid: i64) -> FutureResult<CollabDocState, Error> {
FutureResult::new(async { Ok(vec![]) })
}
@ -292,7 +293,7 @@ fn to_user_workspace(af_workspace: AFWorkspace) -> UserWorkspace {
id: af_workspace.workspace_id.to_string(),
name: af_workspace.workspace_name,
created_at: af_workspace.created_at,
database_storage_id: af_workspace.database_storage_id.to_string(),
database_view_tracker_id: af_workspace.database_storage_id.to_string(),
}
}

View File

@ -1,30 +1,29 @@
use anyhow::Error;
use collab::core::collab::CollabDocState;
use collab_entity::CollabType;
use flowy_database_deps::cloud::{
CollabObjectUpdate, CollabObjectUpdateByOid, DatabaseCloudService, DatabaseSnapshot,
};
use flowy_database_deps::cloud::{CollabDocStateByOid, DatabaseCloudService, DatabaseSnapshot};
use lib_infra::future::FutureResult;
pub(crate) struct LocalServerDatabaseCloudServiceImpl();
impl DatabaseCloudService for LocalServerDatabaseCloudServiceImpl {
fn get_collab_update(
fn get_collab_doc_state_db(
&self,
_object_id: &str,
_collab_type: CollabType,
_workspace_id: &str,
) -> FutureResult<CollabObjectUpdate, Error> {
) -> FutureResult<CollabDocState, Error> {
FutureResult::new(async move { Ok(vec![]) })
}
fn batch_get_collab_updates(
fn batch_get_collab_doc_state_db(
&self,
_object_ids: Vec<String>,
_object_ty: CollabType,
_workspace_id: &str,
) -> FutureResult<CollabObjectUpdateByOid, Error> {
FutureResult::new(async move { Ok(CollabObjectUpdateByOid::default()) })
) -> FutureResult<CollabDocStateByOid, Error> {
FutureResult::new(async move { Ok(CollabDocStateByOid::default()) })
}
fn get_collab_snapshots(

View File

@ -1,4 +1,5 @@
use anyhow::Error;
use collab::core::collab::CollabDocState;
use flowy_document_deps::cloud::*;
use flowy_error::FlowyError;
@ -11,7 +12,7 @@ impl DocumentCloudService for LocalServerDocumentCloudServiceImpl {
&self,
_document_id: &str,
_workspace_id: &str,
) -> FutureResult<Vec<Vec<u8>>, FlowyError> {
) -> FutureResult<CollabDocState, FlowyError> {
FutureResult::new(async move { Ok(vec![]) })
}

View File

@ -1,9 +1,12 @@
use std::sync::Arc;
use anyhow::{anyhow, Error};
use collab::core::collab::CollabDocState;
use collab_entity::CollabType;
use flowy_folder_deps::cloud::{
gen_workspace_id, FolderCloudService, FolderData, FolderSnapshot, Workspace, WorkspaceRecord,
gen_workspace_id, FolderCloudService, FolderCollabParams, FolderData, FolderSnapshot, Workspace,
WorkspaceRecord,
};
use lib_infra::future::FutureResult;
@ -50,18 +53,28 @@ impl FolderCloudService for LocalServerFolderCloudServiceImpl {
FutureResult::new(async move { Ok(vec![]) })
}
fn get_folder_doc_state(
fn get_collab_doc_state_f(
&self,
_workspace_id: &str,
_uid: i64,
) -> FutureResult<Vec<Vec<u8>>, Error> {
_collab_type: CollabType,
_object_id: &str,
) -> FutureResult<CollabDocState, Error> {
FutureResult::new(async {
Err(anyhow!(
"Local server doesn't support get folder doc state from remote"
"Local server doesn't support get collab doc state from remote"
))
})
}
fn batch_create_collab_object(
&self,
_workspace_id: &str,
_objects: Vec<FolderCollabParams>,
) -> FutureResult<(), Error> {
FutureResult::new(async { Err(anyhow!("Local server doesn't support create collab")) })
}
fn service_name(&self) -> String {
"Local".to_string()
}

View File

@ -1,6 +1,7 @@
use std::sync::Arc;
use anyhow::Error;
use collab::core::collab::CollabDocState;
use collab_entity::CollabObject;
use lazy_static::lazy_static;
use parking_lot::Mutex;
@ -132,7 +133,7 @@ impl UserCloudService for LocalServerUserAuthServiceImpl {
FutureResult::new(async { Ok(vec![]) })
}
fn get_user_awareness_updates(&self, _uid: i64) -> FutureResult<Vec<Vec<u8>>, Error> {
fn get_user_awareness_doc_state(&self, _uid: i64) -> FutureResult<CollabDocState, Error> {
FutureResult::new(async { Ok(vec![]) })
}
@ -155,6 +156,6 @@ fn make_user_workspace() -> UserWorkspace {
id: uuid::Uuid::new_v4().to_string(),
name: "My Workspace".to_string(),
created_at: Default::default(),
database_storage_id: uuid::Uuid::new_v4().to_string(),
database_view_tracker_id: uuid::Uuid::new_v4().to_string(),
}
}

View File

@ -4,6 +4,7 @@ use std::sync::{Arc, Weak};
use anyhow::Error;
use chrono::{DateTime, Utc};
use client_api::collab_sync::collab_msg::MsgId;
use collab::core::collab::CollabDocState;
use collab::preclude::merge_updates_v1;
use collab_entity::CollabObject;
use collab_plugins::cloud_storage::{
@ -61,15 +62,15 @@ where
true
}
async fn get_all_updates(&self, object: &CollabObject) -> Result<Vec<Vec<u8>>, Error> {
async fn get_doc_state(&self, object: &CollabObject) -> Result<CollabDocState, Error> {
let postgrest = self.server.try_get_weak_postgrest()?;
let action = FetchObjectUpdateAction::new(
object.object_id.clone(),
object.collab_type.clone(),
postgrest,
);
let updates = action.run().await?;
Ok(updates)
let doc_state = action.run().await?;
Ok(doc_state)
}
async fn get_snapshots(&self, object_id: &str, limit: usize) -> Vec<RemoteCollabSnapshot> {

View File

@ -1,10 +1,9 @@
use anyhow::Error;
use collab::core::collab::CollabDocState;
use collab_entity::CollabType;
use tokio::sync::oneshot::channel;
use flowy_database_deps::cloud::{
CollabObjectUpdate, CollabObjectUpdateByOid, DatabaseCloudService, DatabaseSnapshot,
};
use flowy_database_deps::cloud::{CollabDocStateByOid, DatabaseCloudService, DatabaseSnapshot};
use lib_dispatch::prelude::af_spawn;
use lib_infra::future::FutureResult;
@ -27,12 +26,12 @@ impl<T> DatabaseCloudService for SupabaseDatabaseServiceImpl<T>
where
T: SupabaseServerService,
{
fn get_collab_update(
fn get_collab_doc_state_db(
&self,
object_id: &str,
collab_type: CollabType,
_workspace_id: &str,
) -> FutureResult<CollabObjectUpdate, Error> {
) -> FutureResult<CollabDocState, Error> {
let try_get_postgrest = self.server.try_get_weak_postgrest();
let object_id = object_id.to_string();
let (tx, rx) = channel();
@ -51,12 +50,12 @@ where
FutureResult::new(async { rx.await? })
}
fn batch_get_collab_updates(
fn batch_get_collab_doc_state_db(
&self,
object_ids: Vec<String>,
object_ty: CollabType,
_workspace_id: &str,
) -> FutureResult<CollabObjectUpdateByOid, Error> {
) -> FutureResult<CollabDocStateByOid, Error> {
let try_get_postgrest = self.server.try_get_weak_postgrest();
let (tx, rx) = channel();
af_spawn(async move {

View File

@ -1,4 +1,5 @@
use anyhow::Error;
use collab::core::collab::CollabDocState;
use collab::core::origin::CollabOrigin;
use collab_document::blocks::DocumentData;
use collab_document::document::Document;
@ -32,7 +33,7 @@ where
&self,
document_id: &str,
workspace_id: &str,
) -> FutureResult<Vec<Vec<u8>>, FlowyError> {
) -> FutureResult<CollabDocState, FlowyError> {
let try_get_postgrest = self.server.try_get_weak_postgrest();
let document_id = document_id.to_string();
let (tx, rx) = channel();
@ -41,11 +42,11 @@ where
async move {
let postgrest = try_get_postgrest?;
let action = FetchObjectUpdateAction::new(document_id, CollabType::Document, postgrest);
let updates = action.run_with_fix_interval(5, 10).await?;
if updates.is_empty() {
let collab_doc_state = action.run_with_fix_interval(5, 10).await?;
if collab_doc_state.is_empty() {
return Err(FlowyError::collab_not_sync());
}
Ok(updates)
Ok(collab_doc_state)
}
.await,
)
@ -92,9 +93,9 @@ where
let postgrest = try_get_postgrest?;
let action =
FetchObjectUpdateAction::new(document_id.clone(), CollabType::Document, postgrest);
let updates = action.run_with_fix_interval(5, 10).await?;
let doc_state = action.run_with_fix_interval(5, 10).await?;
let document =
Document::from_updates(CollabOrigin::Empty, updates, &document_id, vec![])?;
Document::from_doc_state(CollabOrigin::Empty, doc_state, &document_id, vec![])?;
Ok(document.get_document_data().ok())
}
.await,

View File

@ -1,15 +1,17 @@
use std::str::FromStr;
use anyhow::Error;
use anyhow::{anyhow, Error};
use chrono::{DateTime, Utc};
use collab::core::collab::CollabDocState;
use collab::core::origin::CollabOrigin;
use collab_entity::CollabType;
use serde_json::Value;
use tokio::sync::oneshot::channel;
use yrs::merge_updates_v1;
use flowy_folder_deps::cloud::{
gen_workspace_id, Folder, FolderCloudService, FolderData, FolderSnapshot, Workspace,
WorkspaceRecord,
gen_workspace_id, Folder, FolderCloudService, FolderCollabParams, FolderData, FolderSnapshot,
Workspace, WorkspaceRecord,
};
use lib_dispatch::prelude::af_spawn;
use lib_infra::future::FutureResult;
@ -89,18 +91,20 @@ where
let workspace_id = workspace_id.to_string();
FutureResult::new(async move {
let postgrest = try_get_postgrest?;
let updates = get_updates_from_server(&workspace_id, &CollabType::Folder, &postgrest).await?;
let updates = updates
.into_iter()
.map(|item| item.value)
.collect::<Vec<_>>();
if updates.is_empty() {
let items = get_updates_from_server(&workspace_id, &CollabType::Folder, &postgrest).await?;
if items.is_empty() {
return Ok(None);
}
let updates = items
.iter()
.map(|update| update.value.as_ref())
.collect::<Vec<&[u8]>>();
let doc_state = merge_updates_v1(&updates)
.map_err(|err| anyhow::anyhow!("merge updates failed: {:?}", err))?;
let folder =
Folder::from_collab_raw_data(uid, CollabOrigin::Empty, updates, &workspace_id, vec![])?;
Folder::from_collab_raw_data(uid, CollabOrigin::Empty, doc_state, &workspace_id, vec![])?;
Ok(folder.get_folder_data())
})
}
@ -128,19 +132,21 @@ where
})
}
fn get_folder_doc_state(
fn get_collab_doc_state_f(
&self,
workspace_id: &str,
_workspace_id: &str,
_uid: i64,
) -> FutureResult<Vec<Vec<u8>>, Error> {
collab_type: CollabType,
object_id: &str,
) -> FutureResult<CollabDocState, Error> {
let try_get_postgrest = self.server.try_get_weak_postgrest();
let workspace_id = workspace_id.to_string();
let object_id = object_id.to_string();
let (tx, rx) = channel();
af_spawn(async move {
tx.send(
async move {
let postgrest = try_get_postgrest?;
let action = FetchObjectUpdateAction::new(workspace_id, CollabType::Folder, postgrest);
let action = FetchObjectUpdateAction::new(object_id, collab_type, postgrest);
action.run_with_fix_interval(5, 10).await
}
.await,
@ -149,6 +155,18 @@ where
FutureResult::new(async { rx.await? })
}
fn batch_create_collab_object(
&self,
_workspace_id: &str,
_objects: Vec<FolderCollabParams>,
) -> FutureResult<(), Error> {
FutureResult::new(async {
Err(anyhow!(
"supabase server doesn't support batch create collab"
))
})
}
fn service_name(&self) -> String {
"Supabase".to_string()
}

View File

@ -7,13 +7,15 @@ use std::time::Duration;
use anyhow::Error;
use chrono::{DateTime, Utc};
use collab::core::collab::CollabDocState;
use collab_entity::{CollabObject, CollabType};
use collab_plugins::cloud_storage::RemoteCollabSnapshot;
use serde_json::Value;
use tokio_retry::strategy::FixedInterval;
use tokio_retry::{Action, Condition, RetryIf};
use yrs::merge_updates_v1;
use flowy_database_deps::cloud::{CollabObjectUpdate, CollabObjectUpdateByOid};
use flowy_database_deps::cloud::CollabDocStateByOid;
use lib_infra::util::md5;
use crate::response::ExtendedResponse;
@ -58,7 +60,7 @@ impl FetchObjectUpdateAction {
impl Action for FetchObjectUpdateAction {
type Future = Pin<Box<dyn Future<Output = Result<Self::Item, Self::Error>> + Send>>;
type Item = CollabObjectUpdate;
type Item = CollabDocState;
type Error = anyhow::Error;
fn run(&mut self) -> Self::Future {
@ -70,7 +72,16 @@ impl Action for FetchObjectUpdateAction {
None => Ok(vec![]),
Some(postgrest) => {
match get_updates_from_server(&object_id, &object_ty, &postgrest).await {
Ok(items) => Ok(items.into_iter().map(|item| item.value).collect()),
Ok(items) => {
let updates = items
.iter()
.map(|update| update.value.as_ref())
.collect::<Vec<&[u8]>>();
let doc_state = merge_updates_v1(&updates)
.map_err(|err| anyhow::anyhow!("merge updates failed: {:?}", err))?;
Ok(doc_state)
},
Err(err) => {
tracing::error!("Get {} updates failed with error: {:?}", object_id, err);
Err(err)
@ -110,7 +121,7 @@ impl BatchFetchObjectUpdateAction {
impl Action for BatchFetchObjectUpdateAction {
type Future = Pin<Box<dyn Future<Output = Result<Self::Item, Self::Error>> + Send>>;
type Item = CollabObjectUpdateByOid;
type Item = CollabDocStateByOid;
type Error = anyhow::Error;
fn run(&mut self) -> Self::Future {
@ -119,7 +130,7 @@ impl Action for BatchFetchObjectUpdateAction {
let object_ty = self.object_ty.clone();
Box::pin(async move {
match weak_postgrest.upgrade() {
None => Ok(CollabObjectUpdateByOid::default()),
None => Ok(CollabDocStateByOid::default()),
Some(server) => {
match batch_get_updates_from_server(object_ids.clone(), &object_ty, server).await {
Ok(updates_by_oid) => Ok(updates_by_oid),
@ -251,7 +262,7 @@ pub async fn batch_get_updates_from_server(
object_ids: Vec<String>,
object_ty: &CollabType,
postgrest: Arc<PostgresWrapper>,
) -> Result<CollabObjectUpdateByOid, Error> {
) -> Result<CollabDocStateByOid, Error> {
let json = postgrest
.from(table_name(object_ty))
.select("oid, key, value, encrypt, md5")
@ -262,19 +273,22 @@ pub async fn batch_get_updates_from_server(
.get_json()
.await?;
let mut updates_by_oid = CollabObjectUpdateByOid::new();
let mut updates_by_oid = CollabDocStateByOid::new();
if let Some(records) = json.as_array() {
for record in records {
tracing::debug!("get updates from server: {:?}", record);
if let Some(oid) = record.get("oid").and_then(|value| value.as_str()) {
match parser_updates_form_json(record.clone(), &postgrest.secret()) {
Ok(updates) => {
let object_updates = updates_by_oid
.entry(oid.to_string())
.or_insert_with(Vec::new);
for update in updates {
object_updates.push(update.value);
}
Ok(items) => {
let updates = items
.iter()
.map(|update| update.value.as_ref())
.collect::<Vec<&[u8]>>();
let doc_state = merge_updates_v1(&updates)
.map_err(|err| anyhow::anyhow!("merge updates failed: {:?}", err))?;
updates_by_oid.insert(oid.to_string(), doc_state);
},
Err(e) => {
tracing::error!("parser_updates_form_json error: {:?}", e);

View File

@ -6,7 +6,7 @@ use std::sync::{Arc, Weak};
use std::time::Duration;
use anyhow::Error;
use collab::core::collab::MutexCollab;
use collab::core::collab::{CollabDocState, MutexCollab};
use collab::core::origin::CollabOrigin;
use collab_entity::{CollabObject, CollabType};
use parking_lot::RwLock;
@ -232,7 +232,7 @@ where
Ok(user_workspaces)
})
}
fn get_user_awareness_updates(&self, uid: i64) -> FutureResult<Vec<Vec<u8>>, Error> {
fn get_user_awareness_doc_state(&self, uid: i64) -> FutureResult<CollabDocState, Error> {
let try_get_postgrest = self.server.try_get_weak_postgrest();
let awareness_id = uid.to_string();
let (tx, rx) = channel();

View File

@ -45,7 +45,7 @@ async fn supabase_create_database_test() {
}
let updates_by_oid = database_service
.batch_get_collab_updates(row_ids, CollabType::DatabaseRow, "fake_workspace_id")
.batch_get_collab_doc_state_db(row_ids, CollabType::DatabaseRow, "fake_workspace_id")
.await
.unwrap();

View File

@ -69,7 +69,12 @@ async fn supabase_get_folder_test() {
// let updates = collab_service.get_all_updates(&collab_object).await.unwrap();
let updates = folder_service
.get_folder_doc_state(&user.latest_workspace.id, user.user_id)
.get_collab_doc_state_f(
&user.latest_workspace.id,
user.user_id,
CollabType::Folder,
&user.latest_workspace.id,
)
.await
.unwrap();
assert_eq!(updates.len(), 2);
@ -80,20 +85,23 @@ async fn supabase_get_folder_test() {
.await
.unwrap();
}
let updates: Vec<Vec<u8>> = folder_service
.get_folder_doc_state(&user.latest_workspace.id, user.user_id)
let updates = folder_service
.get_collab_doc_state_f(
&user.latest_workspace.id,
user.user_id,
CollabType::Folder,
&user.latest_workspace.id,
)
.await
.unwrap();
assert_eq!(updates.len(), 1);
// Other the init sync, try to get the updates from the server.
let remote_update = updates.first().unwrap().clone();
let expected_update = doc
.transact_mut()
.encode_state_as_update_v1(&StateVector::default());
// check the update is the same as local document update.
assert_eq!(remote_update, expected_update);
assert_eq!(updates, expected_update);
}
/// This async test function checks the behavior of updates duplication in Supabase.
@ -148,13 +156,15 @@ async fn supabase_duplicate_updates_test() {
.send_init_sync(&collab_object, 3, vec![])
.await
.unwrap();
let first_init_sync_update: Vec<u8> = folder_service
.get_folder_doc_state(&user.latest_workspace.id, user.user_id)
let first_init_sync_update = folder_service
.get_collab_doc_state_f(
&user.latest_workspace.id,
user.user_id,
CollabType::Folder,
&user.latest_workspace.id,
)
.await
.unwrap()
.first()
.unwrap()
.clone();
.unwrap();
// simulate the duplicated updates.
let merged_update = merge_updates_v1(
@ -168,13 +178,16 @@ async fn supabase_duplicate_updates_test() {
.send_init_sync(&collab_object, 4, merged_update)
.await
.unwrap();
let second_init_sync_update: Vec<u8> = folder_service
.get_folder_doc_state(&user.latest_workspace.id, user.user_id)
let second_init_sync_update = folder_service
.get_collab_doc_state_f(
&user.latest_workspace.id,
user.user_id,
CollabType::Folder,
&user.latest_workspace.id,
)
.await
.unwrap()
.first()
.unwrap()
.clone();
.unwrap();
let doc_2 = Doc::new();
assert_eq!(first_init_sync_update.len(), second_init_sync_update.len());
let map = { doc_2.get_or_insert_map("map") };
@ -257,16 +270,19 @@ async fn supabase_diff_state_vector_test() {
// restore the doc with given updates.
let old_version_doc = Doc::new();
let map = { old_version_doc.get_or_insert_map("map") };
let updates: Vec<Vec<u8>> = folder_service
.get_folder_doc_state(&user.latest_workspace.id, user.user_id)
let doc_state = folder_service
.get_collab_doc_state_f(
&user.latest_workspace.id,
user.user_id,
CollabType::Folder,
&user.latest_workspace.id,
)
.await
.unwrap();
{
let mut txn = old_version_doc.transact_mut();
for update in updates {
let update = Update::decode_v1(&update).unwrap();
txn.apply_update(update);
}
let update = Update::decode_v1(&doc_state).unwrap();
txn.apply_update(update);
}
let txn = old_version_doc.transact();
let json = map.to_json(&txn);

View File

@ -21,7 +21,7 @@ async fn supabase_user_sign_up_test() {
let user: AuthResponse = user_service.sign_up(BoxAny::new(params)).await.unwrap();
assert!(!user.latest_workspace.id.is_empty());
assert!(!user.user_workspaces.is_empty());
assert!(!user.latest_workspace.database_storage_id.is_empty());
assert!(!user.latest_workspace.database_view_tracker_id.is_empty());
}
#[tokio::test]
@ -38,7 +38,7 @@ async fn supabase_user_sign_up_with_existing_uuid_test() {
.unwrap();
let user: AuthResponse = user_service.sign_up(BoxAny::new(params)).await.unwrap();
assert!(!user.latest_workspace.id.is_empty());
assert!(!user.latest_workspace.database_storage_id.is_empty());
assert!(!user.latest_workspace.database_view_tracker_id.is_empty());
assert!(!user.user_workspaces.is_empty());
}

View File

@ -120,8 +120,7 @@ pub async fn print_encryption_folder_snapshot(
.pop()
.unwrap();
let collab = Arc::new(
MutexCollab::new_with_raw_data(CollabOrigin::Empty, folder_id, vec![snapshot.blob], vec![])
.unwrap(),
MutexCollab::new_with_raw_data(CollabOrigin::Empty, folder_id, snapshot.blob, vec![]).unwrap(),
);
let folder_data = Folder::open(uid, collab, None)
.unwrap()

View File

@ -10,9 +10,12 @@ lib-infra = { path = "../../../shared-lib/lib-infra" }
flowy-error = { workspace = true }
uuid.workspace = true
serde.workspace = true
collab = { version = "0.1.0" }
collab-entity = { version = "0.1.0" }
serde_json.workspace = true
serde_repr.workspace = true
chrono = { workspace = true, default-features = false, features = ["clock", "serde"] }
anyhow.workspace = true
tokio = { workspace = true, features = ["sync"] }
tokio-stream = "0.1.14"
flowy-folder-deps.workspace = true

View File

@ -1,11 +1,14 @@
use std::collections::HashMap;
use std::fmt::{Display, Formatter};
use std::str::FromStr;
use std::sync::Arc;
use anyhow::Error;
use collab::core::collab::CollabDocState;
use collab_entity::CollabObject;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use tokio_stream::wrappers::WatchStream;
use uuid::Uuid;
use flowy_error::{ErrorCode, FlowyError};
@ -13,8 +16,8 @@ use lib_infra::box_any::BoxAny;
use lib_infra::future::FutureResult;
use crate::entities::{
AuthResponse, Role, UpdateUserProfileParams, UserCredentials, UserProfile, UserWorkspace,
WorkspaceMember,
AuthResponse, Authenticator, Role, UpdateUserProfileParams, UserCredentials, UserProfile,
UserTokenState, UserWorkspace, WorkspaceMember,
};
#[derive(Debug, Clone, Serialize, Deserialize)]
@ -52,6 +55,73 @@ impl Display for UserCloudConfig {
}
}
/// `UserCloudServiceProvider` defines a set of methods for managing user cloud services,
/// including token management, synchronization settings, network reachability, and authentication.
///
/// This trait is intended for implementation by providers that offer cloud-based services for users.
/// It includes methods for handling authentication tokens, enabling/disabling synchronization,
/// setting network reachability, managing encryption secrets, and accessing user-specific cloud services.
pub trait UserCloudServiceProvider: Send + Sync + 'static {
/// Sets the authentication token for the cloud service.
///
/// # Arguments
/// * `token`: A string slice representing the authentication token.
///
/// # Returns
/// A `Result` which is `Ok` if the token is successfully set, or a `FlowyError` otherwise.
fn set_token(&self, token: &str) -> Result<(), FlowyError>;
/// Subscribes to the state of the authentication token.
///
/// # Returns
/// An `Option` containing a `WatchStream<UserTokenState>` if available, or `None` otherwise.
/// The stream allows the caller to watch for changes in the token state.
fn subscribe_token_state(&self) -> Option<WatchStream<UserTokenState>>;
/// Sets the synchronization state for a user.
///
/// # Arguments
/// * `uid`: An i64 representing the user ID.
/// * `enable_sync`: A boolean indicating whether synchronization should be enabled or disabled.
fn set_enable_sync(&self, uid: i64, enable_sync: bool);
/// Sets the network reachability status.
///
/// # Arguments
/// * `reachable`: A boolean indicating whether the network is reachable.
fn set_network_reachable(&self, reachable: bool);
/// Sets the encryption secret for secure communication.
///
/// # Arguments
/// * `secret`: A `String` representing the encryption secret.
fn set_encrypt_secret(&self, secret: String);
/// Sets the authenticator used for authentication processes.
///
/// # Arguments
/// * `authenticator`: An `Authenticator` object.
fn set_authenticator(&self, authenticator: Authenticator);
/// Retrieves the current authenticator.
///
/// # Returns
/// The current `Authenticator` object.
fn get_authenticator(&self) -> Authenticator;
/// Retrieves the user-specific cloud service.
///
/// # Returns
/// A `Result` containing an `Arc<dyn UserCloudService>` if successful, or a `FlowyError` otherwise.
fn get_user_service(&self) -> Result<Arc<dyn UserCloudService>, FlowyError>;
/// Retrieves the service URL.
///
/// # Returns
/// A `String` representing the service URL.
fn service_url(&self) -> String;
}
/// Provide the generic interface for the user cloud service
/// The user cloud service is responsible for the user authentication and user profile management
#[allow(unused_variables)]
@ -127,7 +197,7 @@ pub trait UserCloudService: Send + Sync + 'static {
FutureResult::new(async { Ok(vec![]) })
}
fn get_user_awareness_updates(&self, uid: i64) -> FutureResult<Vec<Vec<u8>>, Error>;
fn get_user_awareness_doc_state(&self, uid: i64) -> FutureResult<CollabDocState, Error>;
fn receive_realtime_event(&self, _json: Value) {}
@ -163,10 +233,3 @@ pub fn uuid_from_map(map: &HashMap<String, String>) -> Result<Uuid, Error> {
let uuid = Uuid::from_str(uuid)?;
Ok(uuid)
}
pub type UserTokenStateReceiver = tokio::sync::broadcast::Receiver<UserTokenState>;
#[derive(Debug, Clone)]
pub enum UserTokenState {
Refresh,
Invalid,
}

View File

@ -138,7 +138,8 @@ pub struct UserWorkspace {
pub name: String,
pub created_at: DateTime<Utc>,
/// The database storage id is used indexing all the database views in current workspace.
pub database_storage_id: String,
#[serde(rename = "database_storage_id")]
pub database_view_tracker_id: String,
}
impl UserWorkspace {
@ -147,7 +148,7 @@ impl UserWorkspace {
id: workspace_id.to_string(),
name: "".to_string(),
created_at: Utc::now(),
database_storage_id: Uuid::new_v4().to_string(),
database_view_tracker_id: Uuid::new_v4().to_string(),
}
}
}

View File

@ -1,4 +1,3 @@
pub mod cloud;
pub mod entities;
pub const DEFAULT_USER_NAME: fn() -> String = || "Me".to_string();

View File

@ -10,7 +10,7 @@ use collab_database::database::{
is_database_collab, mut_database_views_with_collab, reset_inline_view_id,
};
use collab_database::rows::{database_row_document_id_from_row_id, mut_row_with_collab, RowId};
use collab_database::user::DatabaseWithViewsArray;
use collab_database::user::DatabaseViewTrackerList;
use collab_folder::{Folder, UserId};
use parking_lot::{Mutex, RwLock};
use tracing::info;
@ -53,10 +53,10 @@ pub fn migration_anon_user_on_sign_up(
// Migration of all objects except the folder and database_with_views
object_ids.retain(|id| {
id != &old_user.session.user_workspace.id
&& id != &old_user.session.user_workspace.database_storage_id
&& id != &old_user.session.user_workspace.database_view_tracker_id
});
tracing::info!("migrate collab objects: {:?}", object_ids.len());
info!("migrate collab objects: {:?}", object_ids.len());
let collab_by_oid = make_collab_by_oid(old_user, &old_collab_r_txn, &object_ids);
migrate_databases(
&old_to_new_id_map,
@ -142,24 +142,24 @@ where
{
let database_with_views_collab = Collab::new(
old_user.session.user_id,
&old_user.session.user_workspace.database_storage_id,
&old_user.session.user_workspace.database_view_tracker_id,
"phantom",
vec![],
);
database_with_views_collab.with_origin_transact_mut(|txn| {
old_collab_r_txn.load_doc_with_txn(
old_user.session.user_id,
&old_user.session.user_workspace.database_storage_id,
&old_user.session.user_workspace.database_view_tracker_id,
txn,
)
})?;
let new_uid = new_user.session.user_id;
let new_object_id = &new_user.session.user_workspace.database_storage_id;
let new_object_id = &new_user.session.user_workspace.database_view_tracker_id;
let array = DatabaseWithViewsArray::from_collab(&database_with_views_collab);
for database_view in array.get_all_databases() {
array.update_database(&database_view.database_id, |update| {
let array = DatabaseViewTrackerList::from_collab(&database_with_views_collab);
for database_view_tracker in array.get_all_database_tracker() {
array.update_database(&database_view_tracker.database_id, |update| {
let new_linked_views = update
.linked_views
.iter()

View File

@ -8,7 +8,7 @@ use collab::core::collab::MutexCollab;
use collab::preclude::Collab;
use collab_database::database::get_database_row_ids;
use collab_database::rows::database_row_document_id_from_row_id;
use collab_database::user::{get_database_with_views, DatabaseViewTracker};
use collab_database::user::{get_all_database_view_trackers, DatabaseViewTracker};
use collab_entity::{CollabObject, CollabType};
use collab_folder::{Folder, View, ViewLayout};
use parking_lot::Mutex;
@ -43,7 +43,7 @@ pub async fn sync_af_user_data_to_cloud(
uid,
&workspace_id,
device_id,
&new_user.session.user_workspace.database_storage_id,
&new_user.session.user_workspace.database_view_tracker_id,
collab_db,
user_service.clone(),
)
@ -317,7 +317,7 @@ async fn sync_database_views(
})
.map(|_| {
(
get_database_with_views(&collab),
get_all_database_view_trackers(&collab),
collab.encode_collab_v1().encode_to_bytes(),
)
})

View File

@ -8,7 +8,7 @@ use collab::core::collab::MutexCollab;
use collab::preclude::Collab;
use collab_database::database::get_database_row_ids;
use collab_database::rows::database_row_document_id_from_row_id;
use collab_database::user::{get_database_with_views, DatabaseViewTracker};
use collab_database::user::{get_all_database_view_trackers, DatabaseViewTracker};
use collab_entity::{CollabObject, CollabType};
use collab_folder::{Folder, View, ViewLayout};
use parking_lot::Mutex;
@ -43,7 +43,7 @@ pub async fn sync_supabase_user_data_to_cloud(
uid,
&workspace_id,
device_id,
&new_user.session.user_workspace.database_storage_id,
&new_user.session.user_workspace.database_view_tracker_id,
collab_db,
user_service.clone(),
)
@ -317,7 +317,7 @@ async fn sync_database_views(
})
.map(|_| {
(
get_database_with_views(&collab),
get_all_database_view_trackers(&collab),
collab.encode_collab_v1().doc_state,
)
})

View File

@ -1,5 +1,3 @@
use validator::ValidationError;
pub use auth::*;
pub use realtime::*;
pub use reminder::*;
@ -15,10 +13,3 @@ mod reminder;
mod user_profile;
mod user_setting;
mod workspace_member;
pub fn required_not_empty_str(s: &str) -> Result<(), ValidationError> {
if s.is_empty() {
return Err(ValidationError::new("should not be empty string"));
}
Ok(())
}

View File

@ -1,12 +1,10 @@
use std::convert::TryInto;
use validator::Validate;
use flowy_derive::{ProtoBuf, ProtoBuf_Enum};
use flowy_user_deps::entities::*;
use crate::entities::parser::{UserEmail, UserIcon, UserName, UserOpenaiKey, UserPassword};
use crate::entities::required_not_empty_str;
use crate::entities::AuthenticatorPB;
use crate::errors::ErrorCode;
@ -222,7 +220,7 @@ impl From<Vec<UserWorkspace>> for RepeatedUserWorkspacePB {
#[derive(ProtoBuf, Default, Debug, Clone, Validate)]
pub struct UserWorkspacePB {
#[pb(index = 1)]
#[validate(custom = "required_not_empty_str")]
#[validate(custom = "lib_infra::validator_fn::required_not_empty_str")]
pub workspace_id: String,
#[pb(index = 2)]

View File

@ -2,8 +2,7 @@ use validator::Validate;
use flowy_derive::{ProtoBuf, ProtoBuf_Enum};
use flowy_user_deps::entities::{Role, WorkspaceMember};
use crate::entities::required_not_empty_str;
use lib_infra::validator_fn::required_not_empty_str;
#[derive(ProtoBuf, Default, Clone)]
pub struct WorkspaceMemberPB {

View File

@ -445,7 +445,7 @@ pub async fn open_workspace_handler(
manager: AFPluginState<Weak<UserManager>>,
) -> Result<(), FlowyError> {
let manager = upgrade_manager(manager)?;
let params = data.validate()?.into_inner();
let params = data.try_into_inner()?;
manager.open_workspace(&params.workspace_id).await?;
Ok(())
}
@ -572,7 +572,7 @@ pub async fn add_workspace_member_handler(
data: AFPluginData<AddWorkspaceMemberPB>,
manager: AFPluginState<Weak<UserManager>>,
) -> Result<(), FlowyError> {
let data = data.validate()?.into_inner();
let data = data.try_into_inner()?;
let manager = upgrade_manager(manager)?;
manager
.add_workspace_member(data.email, data.workspace_id)
@ -585,7 +585,7 @@ pub async fn delete_workspace_member_handler(
data: AFPluginData<RemoveWorkspaceMemberPB>,
manager: AFPluginState<Weak<UserManager>>,
) -> Result<(), FlowyError> {
let data = data.validate()?.into_inner();
let data = data.try_into_inner()?;
let manager = upgrade_manager(manager)?;
manager
.remove_workspace_member(data.email, data.workspace_id)
@ -598,7 +598,7 @@ pub async fn get_workspace_member_handler(
data: AFPluginData<QueryWorkspacePB>,
manager: AFPluginState<Weak<UserManager>>,
) -> DataResult<RepeatedWorkspaceMemberPB, FlowyError> {
let data = data.validate()?.into_inner();
let data = data.try_into_inner()?;
let manager = upgrade_manager(manager)?;
let members = manager
.get_workspace_members(data.workspace_id)
@ -614,7 +614,7 @@ pub async fn update_workspace_member_handler(
data: AFPluginData<UpdateWorkspaceMemberPB>,
manager: AFPluginState<Weak<UserManager>>,
) -> Result<(), FlowyError> {
let data = data.validate()?.into_inner();
let data = data.try_into_inner()?;
let manager = upgrade_manager(manager)?;
manager
.update_workspace_member(data.email, data.workspace_id, data.role.into())

View File

@ -1,17 +1,14 @@
use std::sync::{Arc, Weak};
use std::sync::Weak;
use collab_database::database::WatchStream;
use collab_folder::FolderData;
use strum_macros::Display;
use flowy_derive::{Flowy_Event, ProtoBuf_Enum};
use flowy_error::FlowyResult;
use flowy_user_deps::cloud::{UserCloudConfig, UserCloudService};
use flowy_user_deps::cloud::UserCloudConfig;
use flowy_user_deps::entities::*;
use lib_dispatch::prelude::*;
use lib_infra::future::{to_fut, Fut};
use crate::errors::FlowyError;
use crate::event_handler::*;
use crate::manager::UserManager;
@ -192,14 +189,6 @@ pub enum UserEvent {
GetWorkspaceMember = 40,
}
pub struct SignUpContext {
/// Indicate whether the user is new or not.
pub is_new: bool,
/// If the user is sign in as guest, and the is_new is true, then the folder data will be not
/// None.
pub local_folder: Option<FolderData>,
}
pub trait UserStatusCallback: Send + Sync + 'static {
/// When the [Authenticator] changed, this method will be called. Currently, the auth type
/// will be changed when the user sign in or sign up.
@ -234,73 +223,6 @@ pub trait UserStatusCallback: Send + Sync + 'static {
fn did_update_network(&self, _reachable: bool) {}
}
/// `UserCloudServiceProvider` defines a set of methods for managing user cloud services,
/// including token management, synchronization settings, network reachability, and authentication.
///
/// This trait is intended for implementation by providers that offer cloud-based services for users.
/// It includes methods for handling authentication tokens, enabling/disabling synchronization,
/// setting network reachability, managing encryption secrets, and accessing user-specific cloud services.
pub trait UserCloudServiceProvider: Send + Sync + 'static {
/// Sets the authentication token for the cloud service.
///
/// # Arguments
/// * `token`: A string slice representing the authentication token.
///
/// # Returns
/// A `Result` which is `Ok` if the token is successfully set, or a `FlowyError` otherwise.
fn set_token(&self, token: &str) -> Result<(), FlowyError>;
/// Subscribes to the state of the authentication token.
///
/// # Returns
/// An `Option` containing a `WatchStream<UserTokenState>` if available, or `None` otherwise.
/// The stream allows the caller to watch for changes in the token state.
fn subscribe_token_state(&self) -> Option<WatchStream<UserTokenState>>;
/// Sets the synchronization state for a user.
///
/// # Arguments
/// * `uid`: An i64 representing the user ID.
/// * `enable_sync`: A boolean indicating whether synchronization should be enabled or disabled.
fn set_enable_sync(&self, uid: i64, enable_sync: bool);
/// Sets the network reachability status.
///
/// # Arguments
/// * `reachable`: A boolean indicating whether the network is reachable.
fn set_network_reachable(&self, reachable: bool);
/// Sets the encryption secret for secure communication.
///
/// # Arguments
/// * `secret`: A `String` representing the encryption secret.
fn set_encrypt_secret(&self, secret: String);
/// Sets the authenticator used for authentication processes.
///
/// # Arguments
/// * `authenticator`: An `Authenticator` object.
fn set_authenticator(&self, authenticator: Authenticator);
/// Retrieves the current authenticator.
///
/// # Returns
/// The current `Authenticator` object.
fn get_authenticator(&self) -> Authenticator;
/// Retrieves the user-specific cloud service.
///
/// # Returns
/// A `Result` containing an `Arc<dyn UserCloudService>` if successful, or a `FlowyError` otherwise.
fn get_user_service(&self) -> Result<Arc<dyn UserCloudService>, FlowyError>;
/// Retrieves the service URL.
///
/// # Returns
/// A `String` representing the service URL.
fn service_url(&self) -> String;
}
/// Acts as a placeholder [UserStatusCallback] for the user session, but does not perform any function
pub(crate) struct DefaultUserStatusCallback;
impl UserStatusCallback for DefaultUserStatusCallback {

View File

@ -1,7 +1,7 @@
#[macro_use]
extern crate flowy_sqlite;
mod anon_user_upgrade;
mod anon_user;
pub mod entities;
mod event_handler;
pub mod event_map;

View File

@ -1,12 +1,7 @@
use std::fs;
use std::path::PathBuf;
use std::string::ToString;
use std::sync::atomic::{AtomicI64, Ordering};
use std::sync::{Arc, Weak};
use base64::alphabet::URL_SAFE;
use base64::engine::general_purpose::PAD;
use base64::engine::GeneralPurpose;
use collab_user::core::MutexUserAwareness;
use serde_json::Value;
use tokio::sync::{Mutex, RwLock};
@ -16,21 +11,23 @@ use tracing::{debug, error, event, info, instrument};
use collab_integrate::collab_builder::AppFlowyCollabBuilder;
use collab_integrate::RocksCollabDB;
use flowy_error::{internal_error, ErrorCode, FlowyResult};
use flowy_folder_deps::entities::ImportData;
use flowy_server_config::AuthenticatorType;
use flowy_sqlite::kv::StorePreferences;
use flowy_sqlite::schema::user_table;
use flowy_sqlite::ConnectionPool;
use flowy_sqlite::{query_dsl::*, DBConnection, ExpressionMethods};
use flowy_user_deps::cloud::UserUpdate;
use flowy_user_deps::cloud::{UserCloudServiceProvider, UserUpdate};
use flowy_user_deps::entities::*;
use lib_dispatch::prelude::af_spawn;
use lib_infra::box_any::BoxAny;
use crate::anon_user_upgrade::{
use crate::anon_user::{
migration_anon_user_on_sign_up, sync_af_user_data_to_cloud, sync_supabase_user_data_to_cloud,
};
use crate::entities::{AuthStateChangedPB, AuthStatePB, UserProfilePB, UserSettingPB};
use crate::event_map::{DefaultUserStatusCallback, UserCloudServiceProvider, UserStatusCallback};
use crate::event_map::{DefaultUserStatusCallback, UserStatusCallback};
use crate::migrations::document_empty_content::HistoricalEmptyDocumentMigration;
use crate::migrations::migration::{UserDataMigration, UserLocalDataMigration};
use crate::migrations::session_migration::migrate_session_with_user_uuid;
@ -39,46 +36,15 @@ use crate::migrations::workspace_trash_v1::WorkspaceTrashMapToSectionMigration;
use crate::migrations::MigrationUser;
use crate::services::cloud_config::get_cloud_config;
use crate::services::collab_interact::{CollabInteract, DefaultCollabInteract};
use crate::services::db::{UserDB, UserDBPath};
use crate::services::entities::{ResumableSignUp, Session};
use crate::services::data_import::importer::{import_data, ImportDataSource};
use crate::services::db::UserDB;
use crate::services::entities::{ResumableSignUp, Session, UserConfig, UserPaths};
use crate::services::user_awareness::UserAwarenessDataSource;
use crate::services::user_encryption::validate_encryption_sign;
use crate::services::user_sql::{UserTable, UserTableChangeset};
use crate::services::user_workspace::save_user_workspaces;
use crate::{errors::FlowyError, notification::*};
pub const URL_SAFE_ENGINE: GeneralPurpose = GeneralPurpose::new(&URL_SAFE, PAD);
pub struct UserConfig {
/// Used to store the user data
storage_path: String,
/// application_path is the path of the application binary. By default, the
/// storage_path is the same as the application_path. However, when the user
/// choose a custom path for the user data, the storage_path will be different from
/// the application_path.
application_path: String,
pub device_id: String,
/// Used as the key of `Session` when saving session information to KV.
pub(crate) session_cache_key: String,
}
impl UserConfig {
/// The `root_dir` represents as the root of the user folders. It must be unique for each
/// users.
pub fn new(name: &str, storage_path: &str, application_path: &str, device_id: &str) -> Self {
let session_cache_key = format!("{}_session_cache", name);
Self {
storage_path: storage_path.to_owned(),
application_path: application_path.to_owned(),
session_cache_key,
device_id: device_id.to_owned(),
}
}
/// Returns bool whether the user choose a custom path for the user data.
pub fn is_custom_storage_path(&self) -> bool {
!self.storage_path.contains(&self.application_path)
}
}
pub struct UserManager {
database: Arc<UserDB>,
user_paths: UserPaths,
@ -106,7 +72,9 @@ impl UserManager {
let user_status_callback: RwLock<Arc<dyn UserStatusCallback>> =
RwLock::new(Arc::new(DefaultUserStatusCallback));
let current_session = Arc::new(parking_lot::RwLock::new(None));
migrate_session_with_user_uuid(&user_config, &current_session, &store_preferences);
*current_session.write() =
migrate_session_with_user_uuid(&user_config.session_cache_key, &store_preferences);
let refresh_user_profile_since = AtomicI64::new(0);
let user_manager = Arc::new(Self {
@ -699,6 +667,13 @@ impl UserManager {
}
}
pub fn import_data(&self, source: ImportDataSource) -> Result<ImportData, FlowyError> {
let session = self.get_session()?;
let collab_db = self.database.get_collab_db(session.user_id)?;
let import_result = import_data(&session, source, collab_db)?;
Ok(import_result)
}
pub(crate) fn set_session(&self, session: Option<Session>) -> Result<(), FlowyError> {
debug!("Set current user: {:?}", session);
match &session {
@ -858,20 +833,6 @@ fn current_authenticator() -> Authenticator {
}
}
fn validate_encryption_sign(user_profile: &UserProfile, encryption_sign: &str) -> bool {
// If the local user profile's encryption sign is not equal to the user update's encryption sign,
// which means the user enable encryption in another device, we should logout the current user.
let is_valid = user_profile.encryption_type.sign() == encryption_sign;
if !is_valid {
send_auth_state_notification(AuthStateChangedPB {
state: AuthStatePB::InvalidAuth,
message: "Encryption configuration was changed".to_string(),
})
.send();
}
is_valid
}
fn upsert_user_profile_change(
uid: i64,
pool: Arc<ConnectionPool>,
@ -900,39 +861,3 @@ fn save_user_token(uid: i64, pool: Arc<ConnectionPool>, token: String) -> FlowyR
let changeset = UserTableChangeset::new(params);
upsert_user_profile_change(uid, pool, changeset)
}
#[derive(Clone)]
struct UserPaths {
root: String,
}
impl UserPaths {
fn new(root: String) -> Self {
Self { root }
}
/// Returns the path to the user's data directory.
fn user_data_dir(&self, uid: i64) -> String {
format!("{}/{}", self.root, uid)
}
}
impl UserDBPath for UserPaths {
fn user_db_path(&self, uid: i64) -> PathBuf {
PathBuf::from(self.user_data_dir(uid))
}
fn collab_db_path(&self, uid: i64) -> PathBuf {
let mut path = PathBuf::from(self.user_data_dir(uid));
path.push("collab_db");
path
}
fn collab_db_history(&self, uid: i64, create_if_not_exist: bool) -> std::io::Result<PathBuf> {
let path = PathBuf::from(self.user_data_dir(uid)).join("collab_db_history");
if !path.exists() && create_if_not_exist {
fs::create_dir_all(&path)?;
}
Ok(path)
}
}

View File

@ -5,22 +5,20 @@ use uuid::Uuid;
use flowy_sqlite::kv::StorePreferences;
use crate::manager::UserConfig;
use crate::services::entities::Session;
const MIGRATION_USER_NO_USER_UUID: &str = "migration_user_no_user_uuid";
pub fn migrate_session_with_user_uuid(
user_config: &UserConfig,
session: &Arc<parking_lot::RwLock<Option<Session>>>,
session_cache_key: &str,
store_preferences: &Arc<StorePreferences>,
) {
) -> Option<Session> {
if !store_preferences.get_bool(MIGRATION_USER_NO_USER_UUID)
&& store_preferences
.set_bool(MIGRATION_USER_NO_USER_UUID, true)
.is_ok()
{
if let Some(mut value) = store_preferences.get_object::<Value>(&user_config.session_cache_key) {
if let Some(mut value) = store_preferences.get_object::<Value>(session_cache_key) {
if value.get("user_uuid").is_none() {
if let Some(map) = value.as_object_mut() {
map.insert("user_uuid".to_string(), json!(Uuid::new_v4()));
@ -28,9 +26,11 @@ pub fn migrate_session_with_user_uuid(
}
if let Ok(new_session) = serde_json::from_value::<Session>(value) {
*session.write() = Some(new_session.clone());
let _ = store_preferences.set_object(&user_config.session_cache_key, &new_session);
let _ = store_preferences.set_object(session_cache_key, &new_session);
return Some(new_session);
}
}
}
None
}

View File

@ -0,0 +1,484 @@
use crate::migrations::session_migration::migrate_session_with_user_uuid;
use crate::services::data_import::importer::load_collab_by_oid;
use crate::services::db::UserDBPath;
use crate::services::entities::{Session, UserPaths};
use crate::services::user_awareness::awareness_oid_from_user_uuid;
use anyhow::anyhow;
use collab::core::collab::{CollabDocState, MutexCollab};
use collab::core::origin::CollabOrigin;
use collab::preclude::Collab;
use collab_database::database::{
is_database_collab, mut_database_views_with_collab, reset_inline_view_id,
};
use collab_database::rows::{database_row_document_id_from_row_id, mut_row_with_collab, RowId};
use collab_database::user::DatabaseViewTrackerList;
use collab_document::document_data::default_document_collab_data;
use collab_folder::{Folder, UserId, View, ViewIdentifier, ViewLayout};
use collab_integrate::{PersistenceError, RocksCollabDB, YrsDocAction};
use flowy_folder_deps::cloud::gen_view_id;
use flowy_folder_deps::entities::ImportData;
use flowy_folder_deps::folder_builder::{ParentChildViews, ViewBuilder};
use flowy_sqlite::kv::StorePreferences;
use parking_lot::{Mutex, RwLock};
use std::collections::{HashMap, HashSet};
use std::ops::{Deref, DerefMut};
use std::sync::Arc;
/// This path refers to the directory where AppFlowy stores its data. The directory structure is as follows:
/// root folder:
/// - cache.db
/// - log (log files with unique identifiers)
/// - 2761499 (other relevant files or directories, identified by unique numbers)
pub(crate) fn import_appflowy_data_folder(
session: &Session,
path: String,
container_name: String,
collab_db: &Arc<RocksCollabDB>,
) -> anyhow::Result<ImportData> {
let user_paths = UserPaths::new(path.clone());
let other_store_preferences = Arc::new(StorePreferences::new(&path)?);
migrate_session_with_user_uuid("appflowy_session_cache", &other_store_preferences);
let other_session = other_store_preferences
.get_object::<Session>("appflowy_session_cache")
.ok_or(anyhow!(
"Can't find the session cache in the appflowy data folder at path: {}",
path
))?;
let other_collab_db = Arc::new(RocksCollabDB::open(
user_paths.collab_db_path(other_session.user_id),
)?);
let other_collab_read_txn = other_collab_db.read_txn();
let mut database_view_ids_by_database_id: HashMap<String, Vec<String>> = HashMap::new();
let row_object_ids = Mutex::new(HashSet::new());
let document_object_ids = Mutex::new(HashSet::new());
let database_object_ids = Mutex::new(HashSet::new());
let import_container_view_id = gen_view_id().to_string();
let view = collab_db.with_write_txn(|collab_write_txn| {
// use the old_to_new_id_map to keep track of the other collab object id and the new collab object id
let old_to_new_id_map = Arc::new(Mutex::new(OldToNewIdMap::new()));
let mut all_object_ids = other_collab_read_txn
.get_all_docs()
.map(|iter| iter.collect::<Vec<String>>())
.unwrap_or_default();
// when doing import, we don't want to import the user workspace, database view tracker and the user awareness
all_object_ids.retain(|id| id != &other_session.user_workspace.id);
all_object_ids.retain(|id| id != &other_session.user_workspace.database_view_tracker_id);
all_object_ids
.retain(|id| id != &awareness_oid_from_user_uuid(&other_session.user_uuid).to_string());
// import database view tracker
migrate_database_view_tracker(
&mut old_to_new_id_map.lock(),
&other_session,
&other_collab_read_txn,
&mut database_view_ids_by_database_id,
&database_object_ids,
)?;
// remove the database view ids from the object ids. Because there are no collab object for the database view
let database_view_ids: Vec<String> = database_view_ids_by_database_id
.values()
.flatten()
.cloned()
.collect();
all_object_ids.retain(|id| !database_view_ids.contains(id));
// load other collab objects
let collab_by_oid = load_collab_by_oid(
other_session.user_id,
&other_collab_read_txn,
&all_object_ids,
);
// import the database
migrate_databases(
&old_to_new_id_map,
session,
collab_write_txn,
&mut all_object_ids,
&collab_by_oid,
&row_object_ids,
&document_object_ids,
)?;
// the object ids now only contains the document collab object ids
for object_id in &all_object_ids {
if let Some(collab) = collab_by_oid.get(object_id) {
let new_object_id = old_to_new_id_map.lock().renew_id(object_id);
document_object_ids.lock().insert(new_object_id.clone());
tracing::debug!("migrate from: {}, to: {}", object_id, new_object_id,);
import_collab_object(collab, session.user_id, &new_object_id, collab_write_txn);
}
}
// create a root view that contains all the views
let child_views = import_workspace_views(
&import_container_view_id,
&mut old_to_new_id_map.lock(),
&other_session,
&other_collab_read_txn,
)?;
let name = if container_name.is_empty() {
format!(
"import_{}",
chrono::Local::now().format("%Y-%m-%d %H:%M:%S")
)
} else {
container_name
};
// create the content for the container view
let import_container_doc_state = default_document_collab_data(&import_container_view_id)
.doc_state
.to_vec();
import_collab_object_with_doc_state(
import_container_doc_state,
session.user_id,
&import_container_view_id,
collab_write_txn,
)?;
let import_container_view =
ViewBuilder::new(session.user_id, session.user_workspace.id.clone())
.with_view_id(import_container_view_id)
.with_layout(ViewLayout::Document)
.with_name(name)
.with_child_views(child_views)
.build();
Ok(import_container_view)
})?;
Ok(ImportData::AppFlowyDataFolder {
view,
database_view_ids_by_database_id,
row_object_ids: row_object_ids.into_inner().into_iter().collect(),
database_object_ids: database_object_ids.into_inner().into_iter().collect(),
document_object_ids: document_object_ids.into_inner().into_iter().collect(),
})
}
fn migrate_database_view_tracker<'a, W>(
old_to_new_id_map: &mut OldToNewIdMap,
other_session: &Session,
other_collab_read_txn: &'a W,
database_view_ids_by_database_id: &mut HashMap<String, Vec<String>>,
database_object_ids: &Mutex<HashSet<String>>,
) -> Result<(), PersistenceError>
where
W: YrsDocAction<'a>,
PersistenceError: From<W::Error>,
{
let database_view_tracker_collab = Collab::new(
other_session.user_id,
&other_session.user_workspace.database_view_tracker_id,
"phantom",
vec![],
);
database_view_tracker_collab.with_origin_transact_mut(|txn| {
other_collab_read_txn.load_doc_with_txn(
other_session.user_id,
&other_session.user_workspace.database_view_tracker_id,
txn,
)
})?;
let array = DatabaseViewTrackerList::from_collab(&database_view_tracker_collab);
for database_view_tracker in array.get_all_database_tracker() {
database_view_ids_by_database_id.insert(
old_to_new_id_map.renew_id(&database_view_tracker.database_id),
database_view_tracker
.linked_views
.into_iter()
.map(|view_id| old_to_new_id_map.renew_id(&view_id))
.collect(),
);
}
database_object_ids.lock().extend(
database_view_ids_by_database_id
.keys()
.cloned()
.collect::<Vec<String>>(),
);
Ok(())
}
fn migrate_databases<'a, W>(
old_to_new_id_map: &Arc<Mutex<OldToNewIdMap>>,
session: &Session,
collab_write_txn: &'a W,
object_ids: &mut Vec<String>,
collab_by_oid: &HashMap<String, Collab>,
row_object_ids: &Mutex<HashSet<String>>,
document_object_ids: &Mutex<HashSet<String>>,
) -> Result<(), PersistenceError>
where
W: YrsDocAction<'a>,
PersistenceError: From<W::Error>,
{
// Migrate databases
let mut database_object_ids = vec![];
let database_row_object_ids = RwLock::new(HashSet::new());
for object_id in &mut *object_ids {
if let Some(collab) = collab_by_oid.get(object_id) {
if !is_database_collab(collab) {
continue;
}
database_object_ids.push(object_id.clone());
reset_inline_view_id(collab, |old_inline_view_id| {
old_to_new_id_map.lock().renew_id(&old_inline_view_id)
});
mut_database_views_with_collab(collab, |database_view| {
let new_view_id = old_to_new_id_map.lock().renew_id(&database_view.id);
let new_database_id = old_to_new_id_map
.lock()
.renew_id(&database_view.database_id);
database_view.id = new_view_id;
database_view.database_id = new_database_id;
database_view.row_orders.iter_mut().for_each(|row_order| {
let old_row_id = String::from(row_order.id.clone());
let old_row_document_id = database_row_document_id_from_row_id(&old_row_id);
let new_row_id = old_to_new_id_map.lock().renew_id(&old_row_id);
let new_row_document_id = database_row_document_id_from_row_id(&new_row_id);
tracing::debug!("migrate row id: {} to {}", row_order.id, new_row_id);
tracing::debug!(
"migrate row document id: {} to {}",
old_row_document_id,
new_row_document_id
);
old_to_new_id_map
.lock()
.insert(old_row_document_id, new_row_document_id);
row_order.id = RowId::from(new_row_id);
database_row_object_ids.write().insert(old_row_id);
});
// collect the ids
let row_ids = database_view
.row_orders
.iter()
.map(|order| order.id.clone().into_inner())
.collect::<Vec<String>>();
let row_document_ids = row_ids
.iter()
.map(|id| database_row_document_id_from_row_id(id))
.collect::<Vec<String>>();
row_object_ids.lock().extend(row_ids);
document_object_ids.lock().extend(row_document_ids);
});
let new_object_id = old_to_new_id_map.lock().renew_id(object_id);
tracing::debug!(
"migrate database from: {}, to: {}",
object_id,
new_object_id,
);
import_collab_object(collab, session.user_id, &new_object_id, collab_write_txn);
}
}
let database_row_object_ids = database_row_object_ids.read();
// remove the database object ids from the object ids
object_ids.retain(|id| !database_object_ids.contains(id));
object_ids.retain(|id| !database_row_object_ids.contains(id));
for object_id in &*database_row_object_ids {
if let Some(collab) = collab_by_oid.get(object_id) {
let new_object_id = old_to_new_id_map.lock().renew_id(object_id);
tracing::info!(
"migrate database row from: {}, to: {}",
object_id,
new_object_id,
);
mut_row_with_collab(collab, |row_update| {
row_update.set_row_id(RowId::from(new_object_id.clone()));
});
import_collab_object(collab, session.user_id, &new_object_id, collab_write_txn);
}
}
Ok(())
}
fn import_collab_object<'a, W>(collab: &Collab, new_uid: i64, new_object_id: &str, w_txn: &'a W)
where
W: YrsDocAction<'a>,
PersistenceError: From<W::Error>,
{
let txn = collab.transact();
if let Err(err) = w_txn.create_new_doc(new_uid, &new_object_id, &txn) {
tracing::error!("import collab failed: {:?}", err);
}
}
fn import_collab_object_with_doc_state<'a, W>(
doc_state: CollabDocState,
new_uid: i64,
new_object_id: &str,
w_txn: &'a W,
) -> Result<(), anyhow::Error>
where
W: YrsDocAction<'a>,
PersistenceError: From<W::Error>,
{
let collab = Collab::new_with_raw_data(CollabOrigin::Empty, new_object_id, doc_state, vec![])?;
import_collab_object(&collab, new_uid, new_object_id, w_txn);
Ok(())
}
fn import_workspace_views<'a, 'b, W>(
parent_view_id: &str,
old_to_new_id_map: &mut OldToNewIdMap,
other_session: &Session,
other_collab_read_txn: &W,
) -> Result<Vec<ParentChildViews>, PersistenceError>
where
'a: 'b,
W: YrsDocAction<'a>,
PersistenceError: From<W::Error>,
{
let other_folder_collab = Collab::new(
other_session.user_id,
&other_session.user_workspace.id,
"phantom",
vec![],
);
other_folder_collab.with_origin_transact_mut(|txn| {
other_collab_read_txn.load_doc_with_txn(
other_session.user_id,
&other_session.user_workspace.id,
txn,
)
})?;
let other_user_id = UserId::from(other_session.user_id);
let other_folder = Folder::open(
other_user_id,
Arc::new(MutexCollab::from_collab(other_folder_collab)),
None,
)
.map_err(|err| PersistenceError::InvalidData(err.to_string()))?;
let other_folder_data = other_folder
.get_folder_data()
.ok_or(PersistenceError::Internal(anyhow!(
"Can't read the folder data"
)))?;
// replace the old parent view id of the workspace
old_to_new_id_map.0.insert(
other_session.user_workspace.id.clone(),
parent_view_id.to_string(),
);
let trash_ids = other_folder_data
.trash
.into_values()
.flatten()
.map(|item| old_to_new_id_map.renew_id(&item.id))
.collect::<Vec<String>>();
// 1. Replace the workspace views id to new id
let mut first_level_views = other_folder_data
.workspace
.child_views
.items
.into_iter()
.filter(|view| !trash_ids.contains(&view.id))
.collect::<Vec<ViewIdentifier>>();
first_level_views.iter_mut().for_each(|view_identifier| {
view_identifier.id = old_to_new_id_map.renew_id(&view_identifier.id);
});
let mut all_views = other_folder_data.views;
all_views.iter_mut().for_each(|view| {
// 2. replace the old parent view id of the view
view.parent_view_id = old_to_new_id_map.renew_id(&view.parent_view_id);
// 3. replace the old id of the view
view.id = old_to_new_id_map.renew_id(&view.id);
// 4. replace the old id of the children views
view.children.iter_mut().for_each(|view_identifier| {
view_identifier.id = old_to_new_id_map.renew_id(&view_identifier.id);
});
});
let mut all_views_map = all_views
.into_iter()
.filter(|view| !trash_ids.contains(&view.id))
.map(|view| (view.id.clone(), view))
.collect::<HashMap<String, View>>();
let parent_views = first_level_views
.into_iter()
.flat_map(
|view_identifier| match all_views_map.remove(&view_identifier.id) {
None => None,
Some(view) => parent_view_from_view(view, &mut all_views_map),
},
)
.collect::<Vec<ParentChildViews>>();
Ok(parent_views)
}
fn parent_view_from_view(
parent_view: View,
all_views_map: &mut HashMap<String, View>,
) -> Option<ParentChildViews> {
let child_views = parent_view
.children
.iter()
.flat_map(
|view_identifier| match all_views_map.remove(&view_identifier.id) {
None => None,
Some(child_view) => parent_view_from_view(child_view, all_views_map),
},
)
.collect::<Vec<ParentChildViews>>();
Some(ParentChildViews {
parent_view,
child_views,
})
}
#[derive(Default)]
struct OldToNewIdMap(HashMap<String, String>);
impl OldToNewIdMap {
fn new() -> Self {
Self::default()
}
fn renew_id(&mut self, old_id: &str) -> String {
let view_id = self
.0
.entry(old_id.to_string())
.or_insert(gen_view_id().to_string());
(*view_id).clone()
}
}
impl Deref for OldToNewIdMap {
type Target = HashMap<String, String>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for OldToNewIdMap {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}

View File

@ -0,0 +1,53 @@
use crate::services::data_import::appflowy_data_import::import_appflowy_data_folder;
use crate::services::entities::Session;
use collab_integrate::{PersistenceError, RocksCollabDB, YrsDocAction};
use std::collections::HashMap;
use collab::preclude::Collab;
use flowy_folder_deps::entities::ImportData;
use std::sync::Arc;
pub enum ImportDataSource {
AppFlowyDataFolder {
path: String,
container_name: String,
},
}
pub(crate) fn import_data(
session: &Session,
source: ImportDataSource,
collab_db: Arc<RocksCollabDB>,
) -> anyhow::Result<ImportData> {
match source {
ImportDataSource::AppFlowyDataFolder {
path,
container_name,
} => import_appflowy_data_folder(session, path, container_name, &collab_db),
}
}
pub fn load_collab_by_oid<'a, R>(
uid: i64,
collab_read_txn: &R,
object_ids: &[String],
) -> HashMap<String, Collab>
where
R: YrsDocAction<'a>,
PersistenceError: From<R::Error>,
{
let mut collab_by_oid = HashMap::new();
for object_id in object_ids {
let collab = Collab::new(uid, object_id, "phantom", vec![]);
match collab
.with_origin_transact_mut(|txn| collab_read_txn.load_doc_with_txn(uid, &object_id, txn))
{
Ok(_) => {
collab_by_oid.insert(object_id.clone(), collab);
},
Err(err) => tracing::error!("🔴Initialize migration collab failed: {:?} ", err),
}
}
collab_by_oid
}

View File

@ -0,0 +1,5 @@
mod appflowy_data_import;
pub(crate) mod importer;
pub use importer::load_collab_by_oid;
pub use importer::ImportDataSource;

View File

@ -1,6 +1,9 @@
use std::fmt;
use base64::alphabet::URL_SAFE;
use std::path::PathBuf;
use std::{fmt, fs};
use base64::engine::general_purpose::STANDARD;
use base64::engine::general_purpose::{PAD, STANDARD};
use base64::engine::GeneralPurpose;
use base64::Engine;
use chrono::prelude::*;
use serde::de::{Deserializer, MapAccess, Visitor};
@ -13,6 +16,7 @@ use flowy_user_deps::entities::{Authenticator, UserAuthResponse};
use crate::entities::AuthenticatorPB;
use crate::migrations::MigrationUser;
use crate::services::db::UserDBPath;
#[derive(Debug, Clone, Serialize)]
pub struct Session {
@ -68,7 +72,7 @@ impl<'de> Visitor<'de> for SessionVisitor {
name: "My Workspace".to_string(),
created_at: Utc::now(),
// For historical reasons, the database_storage_id is constructed by the user_id.
database_storage_id: STANDARD.encode(format!("{}:user:database", user_id)),
database_view_tracker_id: STANDARD.encode(format!("{}:user:database", user_id)),
})
}
}
@ -134,32 +138,6 @@ impl From<Authenticator> for AuthenticatorPB {
}
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct HistoricalUsers {
pub(crate) users: Vec<HistoricalUser>,
}
impl HistoricalUsers {
pub fn add_user(&mut self, new_user: HistoricalUser) {
self.users.retain(|user| user.user_id != new_user.user_id);
self.users.push(new_user);
}
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct HistoricalUser {
pub user_id: i64,
#[serde(default = "flowy_user_deps::DEFAULT_USER_NAME")]
pub user_name: String,
#[serde(default = "DEFAULT_AUTH_TYPE")]
pub auth_type: Authenticator,
pub sign_in_timestamp: i64,
pub storage_path: String,
#[serde(default)]
pub device_id: String,
}
const DEFAULT_AUTH_TYPE: fn() -> Authenticator = || Authenticator::Local;
#[derive(Clone)]
pub(crate) struct ResumableSignUp {
pub user_profile: UserProfile,
@ -167,3 +145,72 @@ pub(crate) struct ResumableSignUp {
pub authenticator: Authenticator,
pub migration_user: Option<MigrationUser>,
}
pub const URL_SAFE_ENGINE: GeneralPurpose = GeneralPurpose::new(&URL_SAFE, PAD);
pub struct UserConfig {
/// Used to store the user data
pub storage_path: String,
/// application_path is the path of the application binary. By default, the
/// storage_path is the same as the application_path. However, when the user
/// choose a custom path for the user data, the storage_path will be different from
/// the application_path.
pub application_path: String,
pub device_id: String,
/// Used as the key of `Session` when saving session information to KV.
pub(crate) session_cache_key: String,
}
impl UserConfig {
/// The `root_dir` represents as the root of the user folders. It must be unique for each
/// users.
pub fn new(name: &str, storage_path: &str, application_path: &str, device_id: &str) -> Self {
let session_cache_key = format!("{}_session_cache", name);
Self {
storage_path: storage_path.to_owned(),
application_path: application_path.to_owned(),
session_cache_key,
device_id: device_id.to_owned(),
}
}
/// Returns bool whether the user choose a custom path for the user data.
pub fn is_custom_storage_path(&self) -> bool {
!self.storage_path.contains(&self.application_path)
}
}
#[derive(Clone)]
pub struct UserPaths {
root: String,
}
impl UserPaths {
pub fn new(root: String) -> Self {
Self { root }
}
/// Returns the path to the user's data directory.
pub(crate) fn user_data_dir(&self, uid: i64) -> String {
format!("{}/{}", self.root, uid)
}
}
impl UserDBPath for UserPaths {
fn user_db_path(&self, uid: i64) -> PathBuf {
PathBuf::from(self.user_data_dir(uid))
}
fn collab_db_path(&self, uid: i64) -> PathBuf {
let mut path = PathBuf::from(self.user_data_dir(uid));
path.push("collab_db");
path
}
fn collab_db_history(&self, uid: i64, create_if_not_exist: bool) -> std::io::Result<PathBuf> {
let path = PathBuf::from(self.user_data_dir(uid)).join("collab_db_history");
if !path.exists() && create_if_not_exist {
fs::create_dir_all(&path)?;
}
Ok(path)
}
}

View File

@ -1,5 +1,6 @@
pub mod cloud_config;
pub mod collab_interact;
pub mod data_import;
pub mod db;
pub mod entities;
pub(crate) mod historical_user;

View File

@ -1,7 +1,7 @@
use std::sync::{Arc, Weak};
use anyhow::Context;
use collab::core::collab::{CollabRawData, MutexCollab};
use collab::core::collab::{CollabDocState, MutexCollab};
use collab_entity::reminder::Reminder;
use collab_entity::CollabType;
use collab_integrate::collab_builder::CollabBuilderConfig;
@ -141,7 +141,7 @@ impl UserManager {
let data = self
.cloud_services
.get_user_service()?
.get_user_awareness_updates(session.user_id)
.get_user_awareness_doc_state(session.user_id)
.await?;
trace!("Get user awareness collab: {}", data.len());
let collab = self
@ -163,13 +163,13 @@ impl UserManager {
&self,
session: &Session,
collab_db: Weak<RocksCollabDB>,
raw_data: CollabRawData,
raw_data: CollabDocState,
) -> Result<Arc<MutexCollab>, FlowyError> {
let collab_builder = self.collab_builder.upgrade().ok_or(FlowyError::new(
ErrorCode::Internal,
"Unexpected error: collab builder is not available",
))?;
let user_awareness_id = Uuid::new_v5(&session.user_uuid, b"user_awareness");
let user_awareness_id = awareness_oid_from_user_uuid(&session.user_uuid);
let collab = collab_builder
.build(
session.user_id,
@ -214,6 +214,10 @@ impl UserManager {
}
}
pub fn awareness_oid_from_user_uuid(user_uuid: &Uuid) -> Uuid {
Uuid::new_v5(user_uuid, b"user_awareness")
}
/// Indicate using which data source to initialize the user awareness
/// If the user is not a new user, the local data source is used. Otherwise, the remote data source is used.
/// When using the remote data source, the user awareness will be initialized from the remote server.

View File

@ -1,8 +1,12 @@
use crate::entities::{AuthStateChangedPB, AuthStatePB};
use flowy_encrypt::{decrypt_text, encrypt_text};
use flowy_error::{ErrorCode, FlowyError, FlowyResult};
use flowy_user_deps::entities::{EncryptionType, UpdateUserProfileParams, UserCredentials};
use flowy_user_deps::entities::{
EncryptionType, UpdateUserProfileParams, UserCredentials, UserProfile,
};
use crate::manager::UserManager;
use crate::notification::send_auth_state_notification;
use crate::services::cloud_config::get_encrypt_secret;
impl UserManager {
@ -60,3 +64,17 @@ impl UserManager {
}
}
}
pub(crate) fn validate_encryption_sign(user_profile: &UserProfile, encryption_sign: &str) -> bool {
// If the local user profile's encryption sign is not equal to the user update's encryption sign,
// which means the user enable encryption in another device, we should logout the current user.
let is_valid = user_profile.encryption_type.sign() == encryption_sign;
if !is_valid {
send_auth_state_notification(AuthStateChangedPB {
state: AuthStatePB::InvalidAuth,
message: "Encryption configuration was changed".to_string(),
})
.send();
}
is_valid
}

View File

@ -23,7 +23,7 @@ impl TryFrom<(i64, &UserWorkspace)> for UserWorkspaceTable {
if value.1.id.is_empty() {
return Err(FlowyError::invalid_data().with_context("The id is empty"));
}
if value.1.database_storage_id.is_empty() {
if value.1.database_view_tracker_id.is_empty() {
return Err(FlowyError::invalid_data().with_context("The database storage id is empty"));
}
@ -32,7 +32,7 @@ impl TryFrom<(i64, &UserWorkspace)> for UserWorkspaceTable {
name: value.1.name.clone(),
uid: value.0,
created_at: value.1.created_at.timestamp(),
database_storage_id: value.1.database_storage_id.clone(),
database_storage_id: value.1.database_view_tracker_id.clone(),
})
}
}
@ -46,7 +46,7 @@ impl From<UserWorkspaceTable> for UserWorkspace {
.timestamp_opt(value.created_at, 0)
.single()
.unwrap_or_default(),
database_storage_id: value.database_storage_id,
database_view_tracker_id: value.database_storage_id,
}
}
}

View File

@ -26,6 +26,16 @@ impl<T> AFPluginData<T> {
}
}
impl<T> AFPluginData<T>
where
T: validator::Validate,
{
pub fn try_into_inner(self) -> Result<T, ValidationErrors> {
self.0.validate()?;
Ok(self.0)
}
}
impl<T> ops::Deref for AFPluginData<T> {
type Target = T;

86
shared-lib/Cargo.lock generated
View File

@ -570,6 +570,15 @@ version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]]
name = "form_urlencoded"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8"
dependencies = [
"percent-encoding",
]
[[package]]
name = "futures-core"
version = "0.3.26"
@ -710,6 +719,26 @@ dependencies = [
"cxx-build",
]
[[package]]
name = "idna"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6"
dependencies = [
"unicode-bidi",
"unicode-normalization",
]
[[package]]
name = "idna"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c"
dependencies = [
"unicode-bidi",
"unicode-normalization",
]
[[package]]
name = "ignore"
version = "0.4.20"
@ -806,6 +835,7 @@ dependencies = [
"rand 0.8.5",
"tempfile",
"tokio",
"validator",
"walkdir",
"zip",
]
@ -1723,6 +1753,21 @@ version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"
[[package]]
name = "tinyvec"
version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50"
dependencies = [
"tinyvec_macros",
]
[[package]]
name = "tinyvec_macros"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
version = "1.34.0"
@ -1879,12 +1924,27 @@ dependencies = [
"unic-common",
]
[[package]]
name = "unicode-bidi"
version = "0.3.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f2528f27a9eb2b21e69c95319b30bd0efd85d09c379741b0f78ea1d86be2416"
[[package]]
name = "unicode-ident"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3"
[[package]]
name = "unicode-normalization"
version = "0.1.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921"
dependencies = [
"tinyvec",
]
[[package]]
name = "unicode-segmentation"
version = "1.10.1"
@ -1897,6 +1957,32 @@ version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b"
[[package]]
name = "url"
version = "2.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643"
dependencies = [
"form_urlencoded",
"idna 0.3.0",
"percent-encoding",
]
[[package]]
name = "validator"
version = "0.16.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b92f40481c04ff1f4f61f304d61793c7b56ff76ac1469f1beb199b1445b253bd"
dependencies = [
"idna 0.4.0",
"lazy_static",
"regex",
"serde",
"serde_derive",
"serde_json",
"url",
]
[[package]]
name = "version_check"
version = "0.9.3"

View File

@ -17,4 +17,5 @@ md5 = "0.7.0"
anyhow.workspace = true
walkdir = "2.4.0"
zip = "0.6.6"
tempfile = "3.8.1"
tempfile = "3.8.1"
validator = "0.16.0"

View File

@ -5,3 +5,4 @@ pub mod file_util;
pub mod future;
pub mod ref_map;
pub mod util;
pub mod validator_fn;

View File

@ -0,0 +1,8 @@
use validator::ValidationError;
pub fn required_not_empty_str(s: &str) -> Result<(), ValidationError> {
if s.is_empty() {
return Err(ValidationError::new("should not be empty string"));
}
Ok(())
}