mirror of
https://github.com/AppFlowy-IO/AppFlowy.git
synced 2024-08-30 18:12:39 +00:00
feat: support pg storage (#2935)
* refactor: using tokio-postgres * chore: update * chore: update env * chore: update * chore: upgrade supabase and add logout button * refactor: update * chore: update * refactor: using message queue to handle the pg connection * refactor: move test * refactor: update sql * chore: create pg database when user login * chore: update scheme * chore: generic user service * chore: update * chore: create statistics * chore: create snapshot * chore: add test * chore: add database cloud service * chore: add document cloud service * chore: update interface * test: add document test * refactor: document interface * chore: fix test * chore: update * chore: update test * test: add test * test: add test * test: add test * chore: update collab rev * fix: flutter analyzer * chore: update * chore: update * chore: update * fix: tests * chore: update * chore: update collab rev * ci: rust fmt --------- Co-authored-by: Lucas.Xu <lucas.xu@appflowy.io>
This commit is contained in:
parent
e0ad364fa3
commit
edc7933c66
8
frontend/.vscode/tasks.json
vendored
8
frontend/.vscode/tasks.json
vendored
@ -234,5 +234,13 @@
|
||||
"cwd": "${workspaceFolder}/appflowy_tauri"
|
||||
}
|
||||
},
|
||||
{
|
||||
"label": "AF: Generate Env",
|
||||
"type": "shell",
|
||||
"command": "dart run build_runner clean && dart run build_runner build --delete-conflicting-outputs ",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/appflowy_flutter"
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
1
frontend/appflowy_flutter/.gitignore
vendored
1
frontend/appflowy_flutter/.gitignore
vendored
@ -70,6 +70,7 @@ windows/flutter/dart_ffi/
|
||||
**/.vscode/
|
||||
|
||||
*.env
|
||||
*.env.*
|
||||
|
||||
coverage/
|
||||
|
||||
|
@ -7,32 +7,24 @@ class Config {
|
||||
required String anonKey,
|
||||
required String key,
|
||||
required String secret,
|
||||
required String pgUrl,
|
||||
required String pgUser,
|
||||
required String pgPassword,
|
||||
required String pgPort,
|
||||
}) async {
|
||||
final postgresConfig = PostgresConfigurationPB.create()
|
||||
..url = pgUrl
|
||||
..userName = pgUser
|
||||
..password = pgPassword
|
||||
..port = int.parse(pgPort);
|
||||
|
||||
await ConfigEventSetSupabaseConfig(
|
||||
SupabaseConfigPB.create()
|
||||
..supabaseUrl = url
|
||||
..key = key
|
||||
..anonKey = anonKey
|
||||
..jwtSecret = secret,
|
||||
..jwtSecret = secret
|
||||
..postgresConfig = postgresConfig,
|
||||
).send();
|
||||
}
|
||||
|
||||
static Future<void> setSupabaseCollabPluginConfig({
|
||||
required String url,
|
||||
required String key,
|
||||
required String jwtSecret,
|
||||
required String collabTable,
|
||||
}) async {
|
||||
final payload = CollabPluginConfigPB.create();
|
||||
final collabTableConfig = CollabTableConfigPB.create()
|
||||
..tableName = collabTable;
|
||||
|
||||
payload.supabaseConfig = SupabaseDBConfigPB.create()
|
||||
..supabaseUrl = url
|
||||
..key = key
|
||||
..jwtSecret = jwtSecret
|
||||
..collabTableConfig = collabTableConfig;
|
||||
|
||||
await ConfigEventSetCollabPluginConfig(payload).send();
|
||||
}
|
||||
}
|
||||
|
42
frontend/appflowy_flutter/lib/env/env.dart
vendored
42
frontend/appflowy_flutter/lib/env/env.dart
vendored
@ -3,6 +3,17 @@ import 'package:envied/envied.dart';
|
||||
|
||||
part 'env.g.dart';
|
||||
|
||||
/// The environment variables are defined in `.env` file that is located in the
|
||||
/// appflowy_flutter.
|
||||
/// Run `dart run build_runner build --delete-conflicting-outputs`
|
||||
/// to generate the keys from the env file.
|
||||
///
|
||||
/// If you want to regenerate the keys, you need to run `dart run
|
||||
/// build_runner clean` before running `dart run build_runner build
|
||||
/// --delete-conflicting-outputs`.
|
||||
|
||||
/// Follow the guide on https://supabase.com/docs/guides/auth/social-login/auth-google to setup the auth provider.
|
||||
///
|
||||
@Envied(path: '.env')
|
||||
abstract class Env {
|
||||
@EnviedField(
|
||||
@ -32,14 +43,39 @@ abstract class Env {
|
||||
|
||||
@EnviedField(
|
||||
obfuscate: true,
|
||||
varName: 'SUPABASE_COLLAB_TABLE',
|
||||
varName: 'SUPABASE_DB',
|
||||
defaultValue: '',
|
||||
)
|
||||
static final String supabaseCollabTable = _Env.supabaseCollabTable;
|
||||
static final String supabaseDb = _Env.supabaseDb;
|
||||
|
||||
@EnviedField(
|
||||
obfuscate: true,
|
||||
varName: 'SUPABASE_DB_USER',
|
||||
defaultValue: '',
|
||||
)
|
||||
static final String supabaseDbUser = _Env.supabaseDbUser;
|
||||
|
||||
@EnviedField(
|
||||
obfuscate: true,
|
||||
varName: 'SUPABASE_DB_PASSWORD',
|
||||
defaultValue: '',
|
||||
)
|
||||
static final String supabaseDbPassword = _Env.supabaseDbPassword;
|
||||
|
||||
@EnviedField(
|
||||
obfuscate: true,
|
||||
varName: 'SUPABASE_DB_PORT',
|
||||
defaultValue: '5432',
|
||||
)
|
||||
static final String supabaseDbPort = _Env.supabaseDbPort;
|
||||
}
|
||||
|
||||
bool get isSupabaseEnable =>
|
||||
Env.supabaseUrl.isNotEmpty &&
|
||||
Env.supabaseAnonKey.isNotEmpty &&
|
||||
Env.supabaseKey.isNotEmpty &&
|
||||
Env.supabaseJwtSecret.isNotEmpty;
|
||||
Env.supabaseJwtSecret.isNotEmpty &&
|
||||
Env.supabaseDb.isNotEmpty &&
|
||||
Env.supabaseDbUser.isNotEmpty &&
|
||||
Env.supabaseDbPassword.isNotEmpty &&
|
||||
Env.supabaseDbPort.isNotEmpty;
|
||||
|
@ -1,6 +1,5 @@
|
||||
import 'dart:io';
|
||||
|
||||
import 'package:appflowy/env/env.dart';
|
||||
import 'package:appflowy/workspace/application/settings/prelude.dart';
|
||||
import 'package:appflowy_backend/appflowy_backend.dart';
|
||||
import 'package:flutter/foundation.dart';
|
||||
@ -62,13 +61,7 @@ class FlowyRunner {
|
||||
// ignore in test mode
|
||||
if (!mode.isUnitTest) ...[
|
||||
const HotKeyTask(),
|
||||
InitSupabaseTask(
|
||||
url: Env.supabaseUrl,
|
||||
anonKey: Env.supabaseAnonKey,
|
||||
key: Env.supabaseKey,
|
||||
jwtSecret: Env.supabaseJwtSecret,
|
||||
collabTable: Env.supabaseCollabTable,
|
||||
),
|
||||
InitSupabaseTask(),
|
||||
const InitAppWidgetTask(),
|
||||
const InitPlatformServiceTask()
|
||||
],
|
||||
|
@ -23,31 +23,29 @@ class InitRustSDKTask extends LaunchTask {
|
||||
Future<void> initialize(LaunchContext context) async {
|
||||
final dir = directory ?? await appFlowyApplicationDataDirectory();
|
||||
|
||||
context.getIt<FlowySDK>().setEnv(getAppFlowyEnv());
|
||||
final env = getAppFlowyEnv();
|
||||
context.getIt<FlowySDK>().setEnv(env);
|
||||
await context.getIt<FlowySDK>().init(dir);
|
||||
}
|
||||
}
|
||||
|
||||
AppFlowyEnv getAppFlowyEnv() {
|
||||
final postgresConfig = PostgresConfiguration(
|
||||
url: Env.supabaseDb,
|
||||
password: Env.supabaseDbPassword,
|
||||
port: int.parse(Env.supabaseDbPort),
|
||||
user_name: Env.supabaseDbUser,
|
||||
);
|
||||
|
||||
final supabaseConfig = SupabaseConfiguration(
|
||||
url: Env.supabaseUrl,
|
||||
key: Env.supabaseKey,
|
||||
jwt_secret: Env.supabaseJwtSecret,
|
||||
);
|
||||
|
||||
final collabTableConfig =
|
||||
CollabTableConfig(enable: true, table_name: Env.supabaseCollabTable);
|
||||
|
||||
final supabaseDBConfig = SupabaseDBConfig(
|
||||
url: Env.supabaseUrl,
|
||||
key: Env.supabaseKey,
|
||||
jwt_secret: Env.supabaseJwtSecret,
|
||||
collab_table_config: collabTableConfig,
|
||||
postgres_config: postgresConfig,
|
||||
);
|
||||
|
||||
return AppFlowyEnv(
|
||||
supabase_config: supabaseConfig,
|
||||
supabase_db_config: supabaseDBConfig,
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -1,49 +1,37 @@
|
||||
import 'package:appflowy/core/config/config.dart';
|
||||
import 'package:appflowy_backend/log.dart';
|
||||
import 'package:appflowy/env/env.dart';
|
||||
import 'package:supabase_flutter/supabase_flutter.dart';
|
||||
|
||||
import '../startup.dart';
|
||||
|
||||
bool isSupabaseEnable = false;
|
||||
bool isSupabaseInitialized = false;
|
||||
|
||||
class InitSupabaseTask extends LaunchTask {
|
||||
const InitSupabaseTask({
|
||||
required this.url,
|
||||
required this.anonKey,
|
||||
required this.key,
|
||||
required this.jwtSecret,
|
||||
this.collabTable = "",
|
||||
});
|
||||
|
||||
final String url;
|
||||
final String anonKey;
|
||||
final String key;
|
||||
final String jwtSecret;
|
||||
final String collabTable;
|
||||
|
||||
@override
|
||||
Future<void> initialize(LaunchContext context) async {
|
||||
if (url.isEmpty || anonKey.isEmpty || jwtSecret.isEmpty || key.isEmpty) {
|
||||
isSupabaseEnable = false;
|
||||
Log.info('Supabase config is empty, skip init supabase.');
|
||||
if (!isSupabaseEnable) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (isSupabaseInitialized) {
|
||||
return;
|
||||
}
|
||||
await Supabase.initialize(
|
||||
url: url,
|
||||
anonKey: anonKey,
|
||||
url: Env.supabaseUrl,
|
||||
anonKey: Env.supabaseAnonKey,
|
||||
debug: false,
|
||||
);
|
||||
|
||||
await Config.setSupabaseConfig(
|
||||
url: url,
|
||||
key: key,
|
||||
secret: jwtSecret,
|
||||
anonKey: anonKey,
|
||||
url: Env.supabaseUrl,
|
||||
key: Env.supabaseKey,
|
||||
secret: Env.supabaseJwtSecret,
|
||||
anonKey: Env.supabaseAnonKey,
|
||||
pgPassword: Env.supabaseDbPassword,
|
||||
pgPort: Env.supabaseDbPort,
|
||||
pgUrl: Env.supabaseDb,
|
||||
pgUser: Env.supabaseDbUser,
|
||||
);
|
||||
isSupabaseEnable = true;
|
||||
isSupabaseInitialized = true;
|
||||
}
|
||||
}
|
||||
|
@ -14,6 +14,6 @@ class AuthError {
|
||||
..code = -10003;
|
||||
|
||||
static final supabaseGetUserError = FlowyError()
|
||||
..msg = 'supabase sign in with oauth error'
|
||||
..code = -10003;
|
||||
..msg = 'unable to get user from supabase'
|
||||
..code = -10004;
|
||||
}
|
||||
|
@ -1,11 +1,9 @@
|
||||
import 'dart:async';
|
||||
|
||||
import 'package:appflowy/core/config/kv.dart';
|
||||
import 'package:appflowy/core/config/kv_keys.dart';
|
||||
import 'package:appflowy/startup/startup.dart';
|
||||
import 'package:appflowy/startup/tasks/prelude.dart';
|
||||
import 'package:appflowy/env/env.dart';
|
||||
import 'package:appflowy/user/application/auth/appflowy_auth_service.dart';
|
||||
import 'package:appflowy/user/application/auth/auth_service.dart';
|
||||
import 'package:appflowy/user/application/user_service.dart';
|
||||
import 'package:appflowy_backend/dispatch/dispatch.dart';
|
||||
import 'package:appflowy_backend/log.dart';
|
||||
import 'package:appflowy_backend/protobuf/flowy-error/errors.pb.dart';
|
||||
@ -112,13 +110,12 @@ class SupabaseAuthService implements AuthService {
|
||||
final completer = Completer<Either<FlowyError, UserProfilePB>>();
|
||||
late final StreamSubscription<AuthState> subscription;
|
||||
subscription = _auth.onAuthStateChange.listen((event) async {
|
||||
if (event.event != AuthChangeEvent.signedIn) {
|
||||
final user = event.session?.user;
|
||||
if (event.event != AuthChangeEvent.signedIn || user == null) {
|
||||
completer.complete(left(AuthError.supabaseSignInWithOauthError));
|
||||
} else {
|
||||
final user = await getSupabaseUser();
|
||||
final Either<FlowyError, UserProfilePB> response = await user.fold(
|
||||
(l) => left(l),
|
||||
(r) async => await setupAuth(map: {AuthServiceMapKeys.uuid: r.id}),
|
||||
final Either<FlowyError, UserProfilePB> response = await setupAuth(
|
||||
map: {AuthServiceMapKeys.uuid: user.id},
|
||||
);
|
||||
completer.complete(response);
|
||||
}
|
||||
@ -164,16 +161,21 @@ class SupabaseAuthService implements AuthService {
|
||||
return _appFlowyAuthService.signUpAsGuest();
|
||||
}
|
||||
|
||||
// @override
|
||||
// Future<Either<FlowyError, UserProfilePB>> getUser() async {
|
||||
// final loginType = await getIt<KeyValueStorage>()
|
||||
// .get(KVKeys.loginType)
|
||||
// .then((value) => value.toOption().toNullable());
|
||||
// if (!isSupabaseEnable || (loginType != null && loginType != 'supabase')) {
|
||||
// return _appFlowyAuthService.getUser();
|
||||
// }
|
||||
// final user = await getSupabaseUser();
|
||||
// return user.map((r) => r.toUserProfile());
|
||||
// }
|
||||
|
||||
@override
|
||||
Future<Either<FlowyError, UserProfilePB>> getUser() async {
|
||||
final loginType = await getIt<KeyValueStorage>()
|
||||
.get(KVKeys.loginType)
|
||||
.then((value) => value.toOption().toNullable());
|
||||
if (!isSupabaseEnable || (loginType != null && loginType != 'supabase')) {
|
||||
return _appFlowyAuthService.getUser();
|
||||
}
|
||||
final user = await getSupabaseUser();
|
||||
return user.map((r) => r.toUserProfile());
|
||||
return UserBackendService.getCurrentUserProfile();
|
||||
}
|
||||
|
||||
Future<Either<FlowyError, User>> getSupabaseUser() async {
|
||||
@ -197,14 +199,6 @@ class SupabaseAuthService implements AuthService {
|
||||
}
|
||||
}
|
||||
|
||||
extension on User {
|
||||
UserProfilePB toUserProfile() {
|
||||
return UserProfilePB()
|
||||
..email = email ?? ''
|
||||
..token = this.id;
|
||||
}
|
||||
}
|
||||
|
||||
extension on String {
|
||||
Provider toProvider() {
|
||||
switch (this) {
|
||||
|
@ -1,4 +1,4 @@
|
||||
import 'package:appflowy/startup/tasks/supabase_task.dart';
|
||||
import 'package:appflowy/env/env.dart';
|
||||
import 'package:appflowy/user/application/auth/auth_service.dart';
|
||||
import 'package:appflowy_backend/dispatch/dispatch.dart';
|
||||
import 'package:appflowy_backend/log.dart';
|
||||
|
@ -2,10 +2,12 @@ import 'dart:convert';
|
||||
import 'dart:async';
|
||||
|
||||
import 'package:appflowy/generated/locale_keys.g.dart';
|
||||
import 'package:appflowy/startup/entry_point.dart';
|
||||
import 'package:appflowy/startup/startup.dart';
|
||||
import 'package:appflowy/user/application/auth/auth_service.dart';
|
||||
import 'package:appflowy/util/debounce.dart';
|
||||
import 'package:appflowy/workspace/application/user/settings_user_bloc.dart';
|
||||
import 'package:appflowy_backend/protobuf/flowy-user/user_profile.pb.dart';
|
||||
import 'package:appflowy_backend/protobuf/flowy-user/protobuf.dart';
|
||||
import 'package:easy_localization/easy_localization.dart';
|
||||
import 'package:flowy_infra/image.dart';
|
||||
import 'package:flowy_infra/size.dart';
|
||||
@ -26,24 +28,27 @@ class SettingsUserView extends StatelessWidget {
|
||||
create: (context) => getIt<SettingsUserViewBloc>(param1: user)
|
||||
..add(const SettingsUserEvent.initial()),
|
||||
child: BlocBuilder<SettingsUserViewBloc, SettingsUserState>(
|
||||
builder: (context, state) => SingleChildScrollView(
|
||||
child: Column(
|
||||
crossAxisAlignment: CrossAxisAlignment.start,
|
||||
children: [
|
||||
_renderUserNameInput(context),
|
||||
const VSpace(20),
|
||||
_renderCurrentIcon(context),
|
||||
const VSpace(20),
|
||||
_renderCurrentOpenaiKey(context)
|
||||
],
|
||||
),
|
||||
builder: (context, state) => Column(
|
||||
crossAxisAlignment: CrossAxisAlignment.start,
|
||||
mainAxisSize: MainAxisSize.min,
|
||||
children: [
|
||||
_renderUserNameInput(context),
|
||||
const VSpace(20),
|
||||
_renderCurrentIcon(context),
|
||||
const VSpace(20),
|
||||
_renderCurrentOpenaiKey(context),
|
||||
const Spacer(),
|
||||
_renderLogoutButton(context),
|
||||
const VSpace(20),
|
||||
],
|
||||
),
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
Widget _renderUserNameInput(BuildContext context) {
|
||||
final String name = context.read<SettingsUserViewBloc>().state.userProfile.name;
|
||||
final String name =
|
||||
context.read<SettingsUserViewBloc>().state.userProfile.name;
|
||||
return UserNameInput(name);
|
||||
}
|
||||
|
||||
@ -61,6 +66,23 @@ class SettingsUserView extends StatelessWidget {
|
||||
context.read<SettingsUserViewBloc>().state.userProfile.openaiKey;
|
||||
return _OpenaiKeyInput(openAIKey);
|
||||
}
|
||||
|
||||
Widget _renderLogoutButton(BuildContext context) {
|
||||
return FlowyButton(
|
||||
useIntrinsicWidth: true,
|
||||
text: const FlowyText(
|
||||
'Logout',
|
||||
),
|
||||
onTap: () async {
|
||||
await getIt<AuthService>().signOut(authType: AuthTypePB.Supabase);
|
||||
await getIt<AuthService>().signOut(authType: AuthTypePB.Local);
|
||||
await FlowyRunner.run(
|
||||
FlowyApp(),
|
||||
integrationEnv(),
|
||||
);
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@visibleForTesting
|
||||
|
@ -1,14 +1,15 @@
|
||||
import 'package:json_annotation/json_annotation.dart';
|
||||
|
||||
part 'env_serde.l.dart';
|
||||
// Run `dart run build_runner build` to generate the json serialization
|
||||
// the file `env_serde.g.dart` will be generated in the same directory. Rename
|
||||
// the file to `env_serde.i.dart` because the file is ignored by default.
|
||||
part 'env_serde.i.dart';
|
||||
|
||||
@JsonSerializable()
|
||||
class AppFlowyEnv {
|
||||
final SupabaseConfiguration supabase_config;
|
||||
final SupabaseDBConfig supabase_db_config;
|
||||
|
||||
AppFlowyEnv(
|
||||
{required this.supabase_config, required this.supabase_db_config});
|
||||
AppFlowyEnv({required this.supabase_config});
|
||||
|
||||
factory AppFlowyEnv.fromJson(Map<String, dynamic> json) =>
|
||||
_$AppFlowyEnvFromJson(json);
|
||||
@ -21,9 +22,14 @@ class SupabaseConfiguration {
|
||||
final String url;
|
||||
final String key;
|
||||
final String jwt_secret;
|
||||
final PostgresConfiguration postgres_config;
|
||||
|
||||
SupabaseConfiguration(
|
||||
{required this.url, required this.key, required this.jwt_secret});
|
||||
SupabaseConfiguration({
|
||||
required this.url,
|
||||
required this.key,
|
||||
required this.jwt_secret,
|
||||
required this.postgres_config,
|
||||
});
|
||||
|
||||
factory SupabaseConfiguration.fromJson(Map<String, dynamic> json) =>
|
||||
_$SupabaseConfigurationFromJson(json);
|
||||
@ -32,33 +38,21 @@ class SupabaseConfiguration {
|
||||
}
|
||||
|
||||
@JsonSerializable()
|
||||
class SupabaseDBConfig {
|
||||
class PostgresConfiguration {
|
||||
final String url;
|
||||
final String key;
|
||||
final String jwt_secret;
|
||||
final CollabTableConfig collab_table_config;
|
||||
final String user_name;
|
||||
final String password;
|
||||
final int port;
|
||||
|
||||
SupabaseDBConfig(
|
||||
{required this.url,
|
||||
required this.key,
|
||||
required this.jwt_secret,
|
||||
required this.collab_table_config});
|
||||
PostgresConfiguration({
|
||||
required this.url,
|
||||
required this.user_name,
|
||||
required this.password,
|
||||
required this.port,
|
||||
});
|
||||
|
||||
factory SupabaseDBConfig.fromJson(Map<String, dynamic> json) =>
|
||||
_$SupabaseDBConfigFromJson(json);
|
||||
factory PostgresConfiguration.fromJson(Map<String, dynamic> json) =>
|
||||
_$PostgresConfigurationFromJson(json);
|
||||
|
||||
Map<String, dynamic> toJson() => _$SupabaseDBConfigToJson(this);
|
||||
}
|
||||
|
||||
@JsonSerializable()
|
||||
class CollabTableConfig {
|
||||
final String table_name;
|
||||
final bool enable;
|
||||
|
||||
CollabTableConfig({required this.table_name, required this.enable});
|
||||
|
||||
factory CollabTableConfig.fromJson(Map<String, dynamic> json) =>
|
||||
_$CollabTableConfigFromJson(json);
|
||||
|
||||
Map<String, dynamic> toJson() => _$CollabTableConfigToJson(this);
|
||||
Map<String, dynamic> toJson() => _$PostgresConfigurationToJson(this);
|
||||
}
|
||||
|
@ -9,14 +9,11 @@ part of 'env_serde.dart';
|
||||
AppFlowyEnv _$AppFlowyEnvFromJson(Map<String, dynamic> json) => AppFlowyEnv(
|
||||
supabase_config: SupabaseConfiguration.fromJson(
|
||||
json['supabase_config'] as Map<String, dynamic>),
|
||||
supabase_db_config: SupabaseDBConfig.fromJson(
|
||||
json['supabase_db_config'] as Map<String, dynamic>),
|
||||
);
|
||||
|
||||
Map<String, dynamic> _$AppFlowyEnvToJson(AppFlowyEnv instance) =>
|
||||
<String, dynamic>{
|
||||
'supabase_config': instance.supabase_config,
|
||||
'supabase_db_config': instance.supabase_db_config,
|
||||
};
|
||||
|
||||
SupabaseConfiguration _$SupabaseConfigurationFromJson(
|
||||
@ -25,6 +22,8 @@ SupabaseConfiguration _$SupabaseConfigurationFromJson(
|
||||
url: json['url'] as String,
|
||||
key: json['key'] as String,
|
||||
jwt_secret: json['jwt_secret'] as String,
|
||||
postgres_config: PostgresConfiguration.fromJson(
|
||||
json['postgres_config'] as Map<String, dynamic>),
|
||||
);
|
||||
|
||||
Map<String, dynamic> _$SupabaseConfigurationToJson(
|
||||
@ -33,33 +32,23 @@ Map<String, dynamic> _$SupabaseConfigurationToJson(
|
||||
'url': instance.url,
|
||||
'key': instance.key,
|
||||
'jwt_secret': instance.jwt_secret,
|
||||
'postgres_config': instance.postgres_config,
|
||||
};
|
||||
|
||||
SupabaseDBConfig _$SupabaseDBConfigFromJson(Map<String, dynamic> json) =>
|
||||
SupabaseDBConfig(
|
||||
PostgresConfiguration _$PostgresConfigurationFromJson(
|
||||
Map<String, dynamic> json) =>
|
||||
PostgresConfiguration(
|
||||
url: json['url'] as String,
|
||||
key: json['key'] as String,
|
||||
jwt_secret: json['jwt_secret'] as String,
|
||||
collab_table_config: CollabTableConfig.fromJson(
|
||||
json['collab_table_config'] as Map<String, dynamic>),
|
||||
user_name: json['user_name'] as String,
|
||||
password: json['password'] as String,
|
||||
port: json['port'] as int,
|
||||
);
|
||||
|
||||
Map<String, dynamic> _$SupabaseDBConfigToJson(SupabaseDBConfig instance) =>
|
||||
Map<String, dynamic> _$PostgresConfigurationToJson(
|
||||
PostgresConfiguration instance) =>
|
||||
<String, dynamic>{
|
||||
'url': instance.url,
|
||||
'key': instance.key,
|
||||
'jwt_secret': instance.jwt_secret,
|
||||
'collab_table_config': instance.collab_table_config,
|
||||
};
|
||||
|
||||
CollabTableConfig _$CollabTableConfigFromJson(Map<String, dynamic> json) =>
|
||||
CollabTableConfig(
|
||||
table_name: json['table_name'] as String,
|
||||
enable: json['enable'] as bool,
|
||||
);
|
||||
|
||||
Map<String, dynamic> _$CollabTableConfigToJson(CollabTableConfig instance) =>
|
||||
<String, dynamic>{
|
||||
'table_name': instance.table_name,
|
||||
'enable': instance.enable,
|
||||
'user_name': instance.user_name,
|
||||
'password': instance.password,
|
||||
'port': instance.port,
|
||||
};
|
@ -576,10 +576,10 @@ packages:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: functions_client
|
||||
sha256: "578537de508c62c2875a6fdaa5dc71033283551ac7a32b8b8ef405c6c5823273"
|
||||
sha256: "3b157b4d3ae9e38614fd80fab76d1ef1e0e39ff3412a45de2651f27cecb9d2d2"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "1.3.0"
|
||||
version: "1.3.2"
|
||||
get_it:
|
||||
dependency: "direct main"
|
||||
description:
|
||||
@ -608,10 +608,10 @@ packages:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: gotrue
|
||||
sha256: "3306606658484a05fc885aea15f9fa65bcc28194f35ef294de3a34d01393b928"
|
||||
sha256: "214d5050a68ce68a55da1a6d9d7a2e07e039b359f99f1a17ec685320c9101aa6"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "1.8.0"
|
||||
version: "1.8.4"
|
||||
graphs:
|
||||
dependency: transitive
|
||||
description:
|
||||
@ -1029,10 +1029,10 @@ packages:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: postgrest
|
||||
sha256: "42abd4bf3322af3eb0d286ca2fca7cc28baae52b805761dfa7ab0d206ee072a3"
|
||||
sha256: "78fd180ecd2274df7b04c406746495b5c627248856458f8f537bf5348de9c817"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "1.3.0"
|
||||
version: "1.3.2"
|
||||
process:
|
||||
dependency: transitive
|
||||
description:
|
||||
@ -1077,10 +1077,10 @@ packages:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: realtime_client
|
||||
sha256: "13f6a62244bca7562b47658e3f92e5eeeb79a46d58ad4a97ad536e4ba5e97086"
|
||||
sha256: "0342f73f42345f3547e3cdcc804a0ed108fcd9142d1537d159aead94a213e248"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "1.1.0"
|
||||
version: "1.1.1"
|
||||
reorderables:
|
||||
dependency: "direct main"
|
||||
description:
|
||||
@ -1362,10 +1362,10 @@ packages:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: storage_client
|
||||
sha256: e14434a4cc16b01f2e96f3c646e43fb0bb16624b279a65a34da889cffe4b083c
|
||||
sha256: a3024569213b064587d616827747b766f9bc796e80cec99bd5ffb597b8aeb018
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "1.4.0"
|
||||
version: "1.5.1"
|
||||
stream_channel:
|
||||
dependency: transitive
|
||||
description:
|
||||
@ -1402,18 +1402,18 @@ packages:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: supabase
|
||||
sha256: "8f89e406d1c0101409a9c5d5560ed391d6d3636d2e077336905f3eee18622073"
|
||||
sha256: "5f5e47fcac99a496e15274d5f6944e1323519df9f8929b4ab9eef8711abeb5f3"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "1.9.0"
|
||||
version: "1.9.4"
|
||||
supabase_flutter:
|
||||
dependency: "direct main"
|
||||
description:
|
||||
name: supabase_flutter
|
||||
sha256: "809c67c296d4a0690fdc8e5f952a5e18b3ebd145867f1cb3f8f80248b22a56ae"
|
||||
sha256: "1ebe89b83b992123d40dcf5aa88b87d6c2d0a3c62052380cfc94de2337aac469"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "1.10.0"
|
||||
version: "1.10.4"
|
||||
sync_http:
|
||||
dependency: transitive
|
||||
description:
|
||||
@ -1762,10 +1762,10 @@ packages:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: yet_another_json_isolate
|
||||
sha256: "7809f6517bafd0a7b3d0be63cd5f952ae5c030d682250e8aa9ed7002eaac5ff8"
|
||||
sha256: "86fad76026c4241a32831d6c7febd8f9bded5019e2cd36c5b148499808d8307d"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "1.1.0"
|
||||
version: "1.1.1"
|
||||
sdks:
|
||||
dart: ">=3.0.0 <4.0.0"
|
||||
flutter: ">=3.10.1"
|
||||
|
@ -99,7 +99,7 @@ dependencies:
|
||||
archive: ^3.3.7
|
||||
flutter_svg: ^2.0.6
|
||||
nanoid: ^1.0.0
|
||||
supabase_flutter: ^1.10.0
|
||||
supabase_flutter: ^1.10.4
|
||||
envied: ^0.3.0+3
|
||||
dotted_border: ^2.0.0+3
|
||||
|
||||
|
@ -34,12 +34,12 @@ default = ["custom-protocol"]
|
||||
custom-protocol = ["tauri/custom-protocol"]
|
||||
|
||||
[patch.crates-io]
|
||||
collab = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "d1882d" }
|
||||
collab-folder = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "d1882d" }
|
||||
collab-persistence = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "d1882d" }
|
||||
collab-document = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "d1882d" }
|
||||
collab-database = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "d1882d" }
|
||||
appflowy-integrate = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "d1882d" }
|
||||
collab = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "2134c0" }
|
||||
collab-folder = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "2134c0" }
|
||||
collab-persistence = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "2134c0" }
|
||||
collab-document = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "2134c0" }
|
||||
collab-database = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "2134c0" }
|
||||
appflowy-integrate = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "2134c0" }
|
||||
|
||||
#collab = { path = "../../AppFlowy-Collab/collab" }
|
||||
#collab-folder = { path = "../../AppFlowy-Collab/collab-folder" }
|
||||
|
3
frontend/rust-lib/.gitignore
vendored
3
frontend/rust-lib/.gitignore
vendored
@ -14,4 +14,5 @@ bin/
|
||||
**/resources/proto
|
||||
.idea/
|
||||
AppFlowy-Collab/
|
||||
.env
|
||||
.env
|
||||
.env.**
|
316
frontend/rust-lib/Cargo.lock
generated
316
frontend/rust-lib/Cargo.lock
generated
@ -78,14 +78,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.70"
|
||||
version = "1.0.71"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7de8ce5e0f9f8d88245311066a578d72b7af3e7088f32783804676302df237e4"
|
||||
checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8"
|
||||
|
||||
[[package]]
|
||||
name = "appflowy-integrate"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=d1882d#d1882d6784a8863419727be92c29923cd175fd50"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"collab",
|
||||
@ -112,6 +111,16 @@ version = "0.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6"
|
||||
|
||||
[[package]]
|
||||
name = "assert-json-diff"
|
||||
version = "2.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-stream"
|
||||
version = "0.3.5"
|
||||
@ -887,7 +896,6 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "collab"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=d1882d#d1882d6784a8863419727be92c29923cd175fd50"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bytes",
|
||||
@ -905,7 +913,6 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "collab-client-ws"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=d1882d#d1882d6784a8863419727be92c29923cd175fd50"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"collab-sync",
|
||||
@ -923,7 +930,6 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "collab-database"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=d1882d#d1882d6784a8863419727be92c29923cd175fd50"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
@ -942,6 +948,7 @@ dependencies = [
|
||||
"serde_repr",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tracing",
|
||||
"uuid",
|
||||
]
|
||||
@ -949,7 +956,6 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "collab-derive"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=d1882d#d1882d6784a8863419727be92c29923cd175fd50"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@ -961,7 +967,6 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "collab-document"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=d1882d#d1882d6784a8863419727be92c29923cd175fd50"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"collab",
|
||||
@ -973,13 +978,13 @@ dependencies = [
|
||||
"serde_json",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "collab-folder"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=d1882d#d1882d6784a8863419727be92c29923cd175fd50"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"chrono",
|
||||
@ -999,7 +1004,6 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "collab-persistence"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=d1882d#d1882d6784a8863419727be92c29923cd175fd50"
|
||||
dependencies = [
|
||||
"bincode",
|
||||
"chrono",
|
||||
@ -1019,7 +1023,6 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "collab-plugins"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=d1882d#d1882d6784a8863419727be92c29923cd175fd50"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
@ -1035,13 +1038,16 @@ dependencies = [
|
||||
"parking_lot 0.12.1",
|
||||
"postgrest",
|
||||
"rand 0.8.5",
|
||||
"refinery",
|
||||
"rusoto_credential",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"similar 2.2.1",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tokio-postgres",
|
||||
"tokio-retry",
|
||||
"tokio-stream",
|
||||
"tracing",
|
||||
"y-sync",
|
||||
"yrs",
|
||||
@ -1050,7 +1056,6 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "collab-sync"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=d1882d#d1882d6784a8863419727be92c29923cd175fd50"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"collab",
|
||||
@ -1323,6 +1328,40 @@ dependencies = [
|
||||
"parking_lot_core 0.9.7",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "deadpool"
|
||||
version = "0.9.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "421fe0f90f2ab22016f32a9881be5134fdd71c65298917084b0c7477cbc3856e"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"deadpool-runtime",
|
||||
"num_cpus",
|
||||
"retain_mut",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "deadpool-postgres"
|
||||
version = "0.10.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "836a24a9d49deefe610b8b60c767a7412e9a931d79a89415cd2d2d71630ca8d7"
|
||||
dependencies = [
|
||||
"deadpool",
|
||||
"log",
|
||||
"tokio",
|
||||
"tokio-postgres",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "deadpool-runtime"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eaa37046cc0f6c3cc6090fbdbf73ef0b8ef4cfcc37f6befc0020f63e8cf121e1"
|
||||
dependencies = [
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derivative"
|
||||
version = "2.2.0"
|
||||
@ -1456,6 +1495,12 @@ dependencies = [
|
||||
"regex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "equivalent"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "88bffebc5d80432c9b140ee17875ff173a8ab62faad5b257da912bd2f6c1c0a1"
|
||||
|
||||
[[package]]
|
||||
name = "errno"
|
||||
version = "0.3.1"
|
||||
@ -1506,6 +1551,12 @@ dependencies = [
|
||||
"rand 0.8.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fallible-iterator"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7"
|
||||
|
||||
[[package]]
|
||||
name = "fancy-regex"
|
||||
version = "0.10.0"
|
||||
@ -1574,7 +1625,7 @@ dependencies = [
|
||||
"similar 1.3.0",
|
||||
"syn 1.0.109",
|
||||
"tera",
|
||||
"toml",
|
||||
"toml 0.5.11",
|
||||
"walkdir",
|
||||
]
|
||||
|
||||
@ -1649,7 +1700,7 @@ dependencies = [
|
||||
"flowy-task",
|
||||
"flowy-test",
|
||||
"futures",
|
||||
"indexmap",
|
||||
"indexmap 1.9.3",
|
||||
"lazy_static",
|
||||
"lib-dispatch",
|
||||
"lib-infra",
|
||||
@ -1697,8 +1748,10 @@ dependencies = [
|
||||
"flowy-derive",
|
||||
"flowy-error",
|
||||
"flowy-notification",
|
||||
"indexmap",
|
||||
"futures",
|
||||
"indexmap 1.9.3",
|
||||
"lib-dispatch",
|
||||
"lib-infra",
|
||||
"nanoid",
|
||||
"parking_lot 0.12.1",
|
||||
"protobuf",
|
||||
@ -1708,6 +1761,7 @@ dependencies = [
|
||||
"strum_macros",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tracing",
|
||||
"tracing-subscriber 0.3.16",
|
||||
"uuid",
|
||||
@ -1800,13 +1854,19 @@ name = "flowy-server"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"appflowy-integrate",
|
||||
"async-stream",
|
||||
"bytes",
|
||||
"chrono",
|
||||
"config",
|
||||
"deadpool-postgres",
|
||||
"dotenv",
|
||||
"flowy-database2",
|
||||
"flowy-document2",
|
||||
"flowy-error",
|
||||
"flowy-folder2",
|
||||
"flowy-user",
|
||||
"futures",
|
||||
"futures-util",
|
||||
"hyper",
|
||||
"lazy_static",
|
||||
@ -1814,14 +1874,17 @@ dependencies = [
|
||||
"nanoid",
|
||||
"parking_lot 0.12.1",
|
||||
"postgrest",
|
||||
"refinery",
|
||||
"reqwest",
|
||||
"serde",
|
||||
"serde-aux",
|
||||
"serde_json",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tokio-postgres",
|
||||
"tokio-retry",
|
||||
"tracing",
|
||||
"tracing-subscriber 0.3.16",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
@ -1864,7 +1927,13 @@ dependencies = [
|
||||
name = "flowy-test"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"assert-json-diff",
|
||||
"bytes",
|
||||
"collab",
|
||||
"collab-database",
|
||||
"collab-document",
|
||||
"collab-folder",
|
||||
"dotenv",
|
||||
"flowy-core",
|
||||
"flowy-database2",
|
||||
@ -2164,7 +2233,7 @@ dependencies = [
|
||||
"futures-sink",
|
||||
"futures-util",
|
||||
"http",
|
||||
"indexmap",
|
||||
"indexmap 1.9.3",
|
||||
"slab",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@ -2189,6 +2258,12 @@ dependencies = [
|
||||
"ahash 0.8.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hashbrown"
|
||||
version = "0.14.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a"
|
||||
|
||||
[[package]]
|
||||
name = "hdrhistogram"
|
||||
version = "7.5.2"
|
||||
@ -2317,7 +2392,7 @@ dependencies = [
|
||||
"httpdate",
|
||||
"itoa",
|
||||
"pin-project-lite",
|
||||
"socket2",
|
||||
"socket2 0.4.9",
|
||||
"tokio",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
@ -2437,6 +2512,16 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "indexmap"
|
||||
version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d"
|
||||
dependencies = [
|
||||
"equivalent",
|
||||
"hashbrown 0.14.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "indextree"
|
||||
version = "4.6.0"
|
||||
@ -2588,7 +2673,7 @@ name = "lib-ot"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"indexmap",
|
||||
"indexmap 1.9.3",
|
||||
"indextree",
|
||||
"lazy_static",
|
||||
"log",
|
||||
@ -2774,6 +2859,15 @@ version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40"
|
||||
|
||||
[[package]]
|
||||
name = "md-5"
|
||||
version = "0.10.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca"
|
||||
dependencies = [
|
||||
"digest 0.10.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "md5"
|
||||
version = "0.7.0"
|
||||
@ -3305,6 +3399,37 @@ version = "0.3.26"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160"
|
||||
|
||||
[[package]]
|
||||
name = "postgres-protocol"
|
||||
version = "0.6.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "78b7fa9f396f51dffd61546fd8573ee20592287996568e6175ceb0f8699ad75d"
|
||||
dependencies = [
|
||||
"base64 0.21.0",
|
||||
"byteorder",
|
||||
"bytes",
|
||||
"fallible-iterator",
|
||||
"hmac",
|
||||
"md-5",
|
||||
"memchr",
|
||||
"rand 0.8.5",
|
||||
"sha2",
|
||||
"stringprep",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "postgres-types"
|
||||
version = "0.2.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f028f05971fe20f512bcc679e2c10227e57809a3af86a7606304435bc8896cd6"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"chrono",
|
||||
"fallible-iterator",
|
||||
"postgres-protocol",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "postgrest"
|
||||
version = "1.5.0"
|
||||
@ -3336,7 +3461,7 @@ version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785"
|
||||
dependencies = [
|
||||
"toml",
|
||||
"toml 0.5.11",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -3734,6 +3859,51 @@ dependencies = [
|
||||
"thiserror",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "refinery"
|
||||
version = "0.8.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cdb0436d0dd7bd8d4fce1e828751fa79742b08e35f27cfea7546f8a322b5ef24"
|
||||
dependencies = [
|
||||
"refinery-core",
|
||||
"refinery-macros",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "refinery-core"
|
||||
version = "0.8.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "19206547cd047e8f4dfa6b20c30d3ecaf24be05841b6aa0aa926a47a3d0662bb"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"cfg-if",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"regex",
|
||||
"serde",
|
||||
"siphasher",
|
||||
"thiserror",
|
||||
"time 0.3.21",
|
||||
"tokio",
|
||||
"tokio-postgres",
|
||||
"toml 0.7.5",
|
||||
"url",
|
||||
"walkdir",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "refinery-macros"
|
||||
version = "0.8.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d94d4b9241859ba19eaa5c04c86e782eb3aa0aae2c5868e0cfa90c856e58a174"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"refinery-core",
|
||||
"regex",
|
||||
"syn 2.0.16",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.7.3"
|
||||
@ -3820,6 +3990,12 @@ dependencies = [
|
||||
"winreg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "retain_mut"
|
||||
version = "0.1.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4389f1d5789befaf6029ebd9f7dac4af7f7e3d61b69d4f30e2ac02b57e7712b0"
|
||||
|
||||
[[package]]
|
||||
name = "ring"
|
||||
version = "0.16.20"
|
||||
@ -4143,6 +4319,15 @@ dependencies = [
|
||||
"syn 2.0.16",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_spanned"
|
||||
version = "0.6.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_urlencoded"
|
||||
version = "0.7.1"
|
||||
@ -4303,6 +4488,16 @@ dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "socket2"
|
||||
version = "0.5.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "spin"
|
||||
version = "0.5.2"
|
||||
@ -4315,6 +4510,16 @@ version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
|
||||
|
||||
[[package]]
|
||||
name = "stringprep"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ee348cb74b87454fff4b551cbf727025810a004f88aeacae7f85b87f4e9a1c1"
|
||||
dependencies = [
|
||||
"unicode-bidi",
|
||||
"unicode-normalization",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "strum"
|
||||
version = "0.21.0"
|
||||
@ -4489,6 +4694,7 @@ version = "0.3.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8f3403384eaacbca9923fa06940178ac13e4edb725486d70e8e15881d0c836cc"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"serde",
|
||||
"time-core",
|
||||
"time-macros",
|
||||
@ -4538,7 +4744,7 @@ dependencies = [
|
||||
"parking_lot 0.12.1",
|
||||
"pin-project-lite",
|
||||
"signal-hook-registry",
|
||||
"socket2",
|
||||
"socket2 0.4.9",
|
||||
"tokio-macros",
|
||||
"tracing",
|
||||
"windows-sys 0.45.0",
|
||||
@ -4575,6 +4781,30 @@ dependencies = [
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-postgres"
|
||||
version = "0.7.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6e89f6234aa8fd43779746012fcf53603cdb91fdd8399aa0de868c2d56b6dde1"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"byteorder",
|
||||
"bytes",
|
||||
"fallible-iterator",
|
||||
"futures-channel",
|
||||
"futures-util",
|
||||
"log",
|
||||
"parking_lot 0.12.1",
|
||||
"percent-encoding",
|
||||
"phf 0.11.1",
|
||||
"pin-project-lite",
|
||||
"postgres-protocol",
|
||||
"postgres-types",
|
||||
"socket2 0.5.3",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-retry"
|
||||
version = "0.3.0"
|
||||
@ -4657,6 +4887,40 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml"
|
||||
version = "0.7.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1ebafdf5ad1220cb59e7d17cf4d2c72015297b75b19a10472f99b89225089240"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
"toml_edit",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_datetime"
|
||||
version = "0.6.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_edit"
|
||||
version = "0.19.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "266f016b7f039eec8a1a80dfe6156b633d208b9fccca5e4db1d6775b0c4e34a7"
|
||||
dependencies = [
|
||||
"indexmap 2.0.0",
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
"winnow",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tonic"
|
||||
version = "0.8.3"
|
||||
@ -4697,7 +4961,7 @@ checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"indexmap",
|
||||
"indexmap 1.9.3",
|
||||
"pin-project",
|
||||
"pin-project-lite",
|
||||
"rand 0.8.5",
|
||||
@ -5038,6 +5302,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "345444e32442451b267fc254ae85a209c64be56d2890e601a0c37ff0c3c5ecd2"
|
||||
dependencies = [
|
||||
"getrandom 0.2.9",
|
||||
"serde",
|
||||
"sha1_smol",
|
||||
]
|
||||
|
||||
@ -5411,6 +5676,15 @@ version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a"
|
||||
|
||||
[[package]]
|
||||
name = "winnow"
|
||||
version = "0.4.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ca0ace3845f0d96209f0375e6d367e3eb87eb65d27d445bdc9f1843a26f39448"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winreg"
|
||||
version = "0.10.1"
|
||||
|
@ -33,11 +33,11 @@ opt-level = 3
|
||||
incremental = false
|
||||
|
||||
[patch.crates-io]
|
||||
collab = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "d1882d" }
|
||||
collab-folder = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "d1882d" }
|
||||
collab-document = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "d1882d" }
|
||||
collab-database = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "d1882d" }
|
||||
appflowy-integrate = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "d1882d" }
|
||||
collab = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "2134c0" }
|
||||
collab-folder = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "2134c0" }
|
||||
collab-document = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "2134c0" }
|
||||
collab-database = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "2134c0" }
|
||||
appflowy-integrate = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "2134c0" }
|
||||
|
||||
#collab = { path = "../AppFlowy-Collab/collab" }
|
||||
#collab-folder = { path = "../AppFlowy-Collab/collab-folder" }
|
||||
|
@ -1,11 +1,10 @@
|
||||
use appflowy_integrate::SupabaseDBConfig;
|
||||
use flowy_server::supabase::SupabaseConfiguration;
|
||||
use serde::Deserialize;
|
||||
|
||||
use flowy_server::supabase::SupabaseConfiguration;
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct AppFlowyEnv {
|
||||
supabase_config: SupabaseConfiguration,
|
||||
supabase_db_config: SupabaseDBConfig,
|
||||
}
|
||||
|
||||
impl AppFlowyEnv {
|
||||
@ -13,7 +12,6 @@ impl AppFlowyEnv {
|
||||
if let Ok(env) = serde_json::from_str::<AppFlowyEnv>(env_str) {
|
||||
tracing::trace!("{:?}", env);
|
||||
env.supabase_config.write_env();
|
||||
env.supabase_db_config.write_env();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,8 +1,8 @@
|
||||
use appflowy_integrate::config::AWSDynamoDBConfig;
|
||||
use appflowy_integrate::{CollabTableConfig, SupabaseDBConfig};
|
||||
|
||||
use flowy_derive::ProtoBuf;
|
||||
use flowy_error::FlowyError;
|
||||
use flowy_server::supabase::SupabaseConfiguration;
|
||||
use flowy_server::supabase::{PostgresConfiguration, SupabaseConfiguration};
|
||||
|
||||
#[derive(Default, ProtoBuf)]
|
||||
pub struct KeyValuePB {
|
||||
@ -32,16 +32,21 @@ pub struct SupabaseConfigPB {
|
||||
|
||||
#[pb(index = 4)]
|
||||
jwt_secret: String,
|
||||
|
||||
#[pb(index = 5)]
|
||||
pub postgres_config: PostgresConfigurationPB,
|
||||
}
|
||||
|
||||
impl TryFrom<SupabaseConfigPB> for SupabaseConfiguration {
|
||||
type Error = FlowyError;
|
||||
|
||||
fn try_from(value: SupabaseConfigPB) -> Result<Self, Self::Error> {
|
||||
Ok(Self {
|
||||
url: value.supabase_url,
|
||||
key: value.key,
|
||||
jwt_secret: value.jwt_secret,
|
||||
fn try_from(config: SupabaseConfigPB) -> Result<Self, Self::Error> {
|
||||
let postgres_config = PostgresConfiguration::try_from(config.postgres_config)?;
|
||||
Ok(SupabaseConfiguration {
|
||||
url: config.supabase_url,
|
||||
key: config.key,
|
||||
jwt_secret: config.jwt_secret,
|
||||
postgres_config,
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -50,9 +55,6 @@ impl TryFrom<SupabaseConfigPB> for SupabaseConfiguration {
|
||||
pub struct CollabPluginConfigPB {
|
||||
#[pb(index = 1, one_of)]
|
||||
pub aws_config: Option<AWSDynamoDBConfigPB>,
|
||||
|
||||
#[pb(index = 2, one_of)]
|
||||
pub supabase_config: Option<SupabaseDBConfigPB>,
|
||||
}
|
||||
|
||||
#[derive(Default, ProtoBuf)]
|
||||
@ -81,50 +83,29 @@ impl TryFrom<AWSDynamoDBConfigPB> for AWSDynamoDBConfig {
|
||||
}
|
||||
|
||||
#[derive(Default, ProtoBuf)]
|
||||
pub struct SupabaseDBConfigPB {
|
||||
pub struct PostgresConfigurationPB {
|
||||
#[pb(index = 1)]
|
||||
pub supabase_url: String,
|
||||
pub url: String,
|
||||
|
||||
#[pb(index = 2)]
|
||||
pub key: String,
|
||||
pub user_name: String,
|
||||
|
||||
#[pb(index = 3)]
|
||||
pub jwt_secret: String,
|
||||
pub password: String,
|
||||
|
||||
#[pb(index = 4)]
|
||||
pub collab_table_config: CollabTableConfigPB,
|
||||
pub port: u32,
|
||||
}
|
||||
|
||||
impl TryFrom<SupabaseDBConfigPB> for SupabaseDBConfig {
|
||||
impl TryFrom<PostgresConfigurationPB> for PostgresConfiguration {
|
||||
type Error = FlowyError;
|
||||
|
||||
fn try_from(config: SupabaseDBConfigPB) -> Result<Self, Self::Error> {
|
||||
let update_table_config = CollabTableConfig::try_from(config.collab_table_config)?;
|
||||
Ok(SupabaseDBConfig {
|
||||
url: config.supabase_url,
|
||||
key: config.key,
|
||||
jwt_secret: config.jwt_secret,
|
||||
collab_table_config: update_table_config,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, ProtoBuf)]
|
||||
pub struct CollabTableConfigPB {
|
||||
#[pb(index = 1)]
|
||||
pub table_name: String,
|
||||
}
|
||||
|
||||
impl TryFrom<CollabTableConfigPB> for CollabTableConfig {
|
||||
type Error = FlowyError;
|
||||
|
||||
fn try_from(config: CollabTableConfigPB) -> Result<Self, Self::Error> {
|
||||
if config.table_name.is_empty() {
|
||||
return Err(FlowyError::internal().context("table_name is empty"));
|
||||
}
|
||||
Ok(CollabTableConfig {
|
||||
table_name: config.table_name,
|
||||
enable: true,
|
||||
fn try_from(config: PostgresConfigurationPB) -> Result<Self, Self::Error> {
|
||||
Ok(Self {
|
||||
url: config.url,
|
||||
user_name: config.user_name,
|
||||
password: config.password,
|
||||
port: config.port as u16,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
use appflowy_integrate::config::AWSDynamoDBConfig;
|
||||
use appflowy_integrate::SupabaseDBConfig;
|
||||
|
||||
use flowy_error::{FlowyError, FlowyResult};
|
||||
use flowy_server::supabase::SupabaseConfiguration;
|
||||
use flowy_sqlite::kv::KV;
|
||||
@ -52,10 +52,5 @@ pub(crate) async fn set_collab_plugin_config_handler(
|
||||
aws_config.write_env();
|
||||
}
|
||||
}
|
||||
if let Some(supabase_config_pb) = config.supabase_config {
|
||||
if let Ok(supabase_config) = SupabaseDBConfig::try_from(supabase_config_pb) {
|
||||
supabase_config.write_env();
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
use std::sync::Arc;
|
||||
use std::sync::Weak;
|
||||
|
||||
use appflowy_integrate::{
|
||||
calculate_snapshot_diff, CollabSnapshot, PersistenceError, SnapshotPersistence,
|
||||
@ -14,19 +14,21 @@ use flowy_sqlite::{
|
||||
use flowy_user::services::UserSession;
|
||||
use lib_infra::util::timestamp;
|
||||
|
||||
pub struct SnapshotDBImpl(pub Arc<UserSession>);
|
||||
pub struct SnapshotDBImpl(pub Weak<UserSession>);
|
||||
|
||||
impl SnapshotPersistence for SnapshotDBImpl {
|
||||
fn get_snapshots(&self, _uid: i64, object_id: &str) -> Vec<CollabSnapshot> {
|
||||
self
|
||||
.0
|
||||
.db_pool()
|
||||
.and_then(|pool| Ok(pool.get()?))
|
||||
.and_then(|conn| {
|
||||
CollabSnapshotTableSql::get_all_snapshots(object_id, &conn)
|
||||
.map(|rows| rows.into_iter().map(|row| row.into()).collect())
|
||||
})
|
||||
.unwrap_or_else(|_| vec![])
|
||||
match self.0.upgrade() {
|
||||
None => vec![],
|
||||
Some(user_session) => user_session
|
||||
.db_pool()
|
||||
.and_then(|pool| Ok(pool.get()?))
|
||||
.and_then(|conn| {
|
||||
CollabSnapshotTableSql::get_all_snapshots(object_id, &conn)
|
||||
.map(|rows| rows.into_iter().map(|row| row.into()).collect())
|
||||
})
|
||||
.unwrap_or_else(|_| vec![]),
|
||||
}
|
||||
}
|
||||
|
||||
fn create_snapshot(
|
||||
@ -34,19 +36,15 @@ impl SnapshotPersistence for SnapshotDBImpl {
|
||||
uid: i64,
|
||||
object_id: &str,
|
||||
title: String,
|
||||
collab_type: String,
|
||||
snapshot_data: Vec<u8>,
|
||||
) -> Result<(), PersistenceError> {
|
||||
let object_id = object_id.to_string();
|
||||
let weak_pool = Arc::downgrade(
|
||||
&self
|
||||
.0
|
||||
.db_pool()
|
||||
.map_err(|e| PersistenceError::Internal(Box::new(e)))?,
|
||||
);
|
||||
|
||||
let weak_user_session = self.0.clone();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
if let Some(pool) = weak_pool.upgrade() {
|
||||
if let Some(pool) = weak_user_session
|
||||
.upgrade()
|
||||
.and_then(|user_session| user_session.db_pool().ok())
|
||||
{
|
||||
let conn = pool
|
||||
.get()
|
||||
.map_err(|e| PersistenceError::Internal(Box::new(e)))?;
|
||||
@ -66,7 +64,7 @@ impl SnapshotPersistence for SnapshotDBImpl {
|
||||
object_id: object_id.clone(),
|
||||
title,
|
||||
desc,
|
||||
collab_type,
|
||||
collab_type: "".to_string(),
|
||||
timestamp: timestamp(),
|
||||
data: snapshot_data,
|
||||
},
|
||||
@ -75,7 +73,7 @@ impl SnapshotPersistence for SnapshotDBImpl {
|
||||
.map_err(|e| PersistenceError::Internal(Box::new(e)));
|
||||
|
||||
if let Err(e) = result {
|
||||
tracing::error!("create snapshot error: {:?}", e);
|
||||
tracing::warn!("create snapshot error: {:?}", e);
|
||||
}
|
||||
}
|
||||
Ok::<(), PersistenceError>(())
|
||||
|
@ -1,10 +1,11 @@
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use appflowy_integrate::collab_builder::AppFlowyCollabBuilder;
|
||||
use appflowy_integrate::RocksCollabDB;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use flowy_database2::{DatabaseManager2, DatabaseUser2};
|
||||
use flowy_database2::deps::{DatabaseCloudService, DatabaseUser2};
|
||||
use flowy_database2::DatabaseManager2;
|
||||
use flowy_error::FlowyError;
|
||||
use flowy_task::TaskDispatcher;
|
||||
use flowy_user::services::UserSession;
|
||||
@ -13,32 +14,44 @@ pub struct Database2DepsResolver();
|
||||
|
||||
impl Database2DepsResolver {
|
||||
pub async fn resolve(
|
||||
user_session: Arc<UserSession>,
|
||||
user_session: Weak<UserSession>,
|
||||
task_scheduler: Arc<RwLock<TaskDispatcher>>,
|
||||
collab_builder: Arc<AppFlowyCollabBuilder>,
|
||||
cloud_service: Arc<dyn DatabaseCloudService>,
|
||||
) -> Arc<DatabaseManager2> {
|
||||
let user = Arc::new(DatabaseUserImpl(user_session));
|
||||
Arc::new(DatabaseManager2::new(user, task_scheduler, collab_builder))
|
||||
Arc::new(DatabaseManager2::new(
|
||||
user,
|
||||
task_scheduler,
|
||||
collab_builder,
|
||||
cloud_service,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
struct DatabaseUserImpl(Arc<UserSession>);
|
||||
struct DatabaseUserImpl(Weak<UserSession>);
|
||||
impl DatabaseUser2 for DatabaseUserImpl {
|
||||
fn user_id(&self) -> Result<i64, FlowyError> {
|
||||
self
|
||||
.0
|
||||
.upgrade()
|
||||
.ok_or(FlowyError::internal().context("Unexpected error: UserSession is None"))?
|
||||
.user_id()
|
||||
.map_err(|e| FlowyError::internal().context(e))
|
||||
}
|
||||
|
||||
fn token(&self) -> Result<Option<String>, FlowyError> {
|
||||
self
|
||||
.0
|
||||
.upgrade()
|
||||
.ok_or(FlowyError::internal().context("Unexpected error: UserSession is None"))?
|
||||
.token()
|
||||
.map_err(|e| FlowyError::internal().context(e))
|
||||
}
|
||||
|
||||
fn collab_db(&self) -> Result<Arc<RocksCollabDB>, FlowyError> {
|
||||
self.0.get_collab_db()
|
||||
self
|
||||
.0
|
||||
.upgrade()
|
||||
.ok_or(FlowyError::internal().context("Unexpected error: UserSession is None"))?
|
||||
.get_collab_db()
|
||||
}
|
||||
}
|
||||
|
@ -1,42 +1,54 @@
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use appflowy_integrate::collab_builder::AppFlowyCollabBuilder;
|
||||
use appflowy_integrate::RocksCollabDB;
|
||||
|
||||
use flowy_database2::DatabaseManager2;
|
||||
use flowy_document2::manager::{DocumentManager, DocumentUser};
|
||||
use flowy_document2::deps::{DocumentCloudService, DocumentUser};
|
||||
use flowy_document2::manager::DocumentManager;
|
||||
use flowy_error::FlowyError;
|
||||
use flowy_user::services::UserSession;
|
||||
|
||||
pub struct Document2DepsResolver();
|
||||
impl Document2DepsResolver {
|
||||
pub fn resolve(
|
||||
user_session: Arc<UserSession>,
|
||||
user_session: Weak<UserSession>,
|
||||
_database_manager: &Arc<DatabaseManager2>,
|
||||
collab_builder: Arc<AppFlowyCollabBuilder>,
|
||||
cloud_service: Arc<dyn DocumentCloudService>,
|
||||
) -> Arc<DocumentManager> {
|
||||
let user: Arc<dyn DocumentUser> = Arc::new(DocumentUserImpl(user_session));
|
||||
Arc::new(DocumentManager::new(user.clone(), collab_builder))
|
||||
Arc::new(DocumentManager::new(
|
||||
user.clone(),
|
||||
collab_builder,
|
||||
cloud_service,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
struct DocumentUserImpl(Arc<UserSession>);
|
||||
struct DocumentUserImpl(Weak<UserSession>);
|
||||
impl DocumentUser for DocumentUserImpl {
|
||||
fn user_id(&self) -> Result<i64, FlowyError> {
|
||||
self
|
||||
.0
|
||||
.upgrade()
|
||||
.ok_or(FlowyError::internal().context("Unexpected error: UserSession is None"))?
|
||||
.user_id()
|
||||
.map_err(|e| FlowyError::internal().context(e))
|
||||
}
|
||||
|
||||
fn token(&self) -> Result<Option<String>, FlowyError> {
|
||||
self
|
||||
.0
|
||||
.upgrade()
|
||||
.ok_or(FlowyError::internal().context("Unexpected error: UserSession is None"))?
|
||||
.token()
|
||||
.map_err(|e| FlowyError::internal().context(e))
|
||||
}
|
||||
|
||||
fn collab_db(&self) -> Result<Arc<RocksCollabDB>, FlowyError> {
|
||||
self.0.get_collab_db()
|
||||
self
|
||||
.0
|
||||
.upgrade()
|
||||
.ok_or(FlowyError::internal().context("Unexpected error: UserSession is None"))?
|
||||
.get_collab_db()
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
use std::collections::HashMap;
|
||||
use std::convert::TryFrom;
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use appflowy_integrate::collab_builder::AppFlowyCollabBuilder;
|
||||
use appflowy_integrate::RocksCollabDB;
|
||||
@ -17,7 +17,7 @@ use flowy_document2::parser::json::parser::JsonToDocumentParser;
|
||||
use flowy_error::FlowyError;
|
||||
use flowy_folder2::deps::{FolderCloudService, FolderUser};
|
||||
use flowy_folder2::entities::ViewLayoutPB;
|
||||
use flowy_folder2::manager::Folder2Manager;
|
||||
use flowy_folder2::manager::FolderManager;
|
||||
use flowy_folder2::share::ImportType;
|
||||
use flowy_folder2::view_operation::{
|
||||
FolderOperationHandler, FolderOperationHandlers, View, WorkspaceViewBuilder,
|
||||
@ -30,17 +30,17 @@ use lib_infra::future::FutureResult;
|
||||
pub struct Folder2DepsResolver();
|
||||
impl Folder2DepsResolver {
|
||||
pub async fn resolve(
|
||||
user_session: Arc<UserSession>,
|
||||
user_session: Weak<UserSession>,
|
||||
document_manager: &Arc<DocumentManager>,
|
||||
database_manager: &Arc<DatabaseManager2>,
|
||||
collab_builder: Arc<AppFlowyCollabBuilder>,
|
||||
folder_cloud: Arc<dyn FolderCloudService>,
|
||||
) -> Arc<Folder2Manager> {
|
||||
) -> Arc<FolderManager> {
|
||||
let user: Arc<dyn FolderUser> = Arc::new(FolderUserImpl(user_session.clone()));
|
||||
|
||||
let handlers = folder_operation_handlers(document_manager.clone(), database_manager.clone());
|
||||
Arc::new(
|
||||
Folder2Manager::new(user.clone(), collab_builder, handlers, folder_cloud)
|
||||
FolderManager::new(user.clone(), collab_builder, handlers, folder_cloud)
|
||||
.await
|
||||
.unwrap(),
|
||||
)
|
||||
@ -63,24 +63,30 @@ fn folder_operation_handlers(
|
||||
Arc::new(map)
|
||||
}
|
||||
|
||||
struct FolderUserImpl(Arc<UserSession>);
|
||||
struct FolderUserImpl(Weak<UserSession>);
|
||||
impl FolderUser for FolderUserImpl {
|
||||
fn user_id(&self) -> Result<i64, FlowyError> {
|
||||
self
|
||||
.0
|
||||
.upgrade()
|
||||
.ok_or(FlowyError::internal().context("Unexpected error: UserSession is None"))?
|
||||
.user_id()
|
||||
.map_err(|e| FlowyError::internal().context(e))
|
||||
}
|
||||
|
||||
fn token(&self) -> Result<Option<String>, FlowyError> {
|
||||
self
|
||||
.0
|
||||
.upgrade()
|
||||
.ok_or(FlowyError::internal().context("Unexpected error: UserSession is None"))?
|
||||
.token()
|
||||
.map_err(|e| FlowyError::internal().context(e))
|
||||
}
|
||||
|
||||
fn collab_db(&self) -> Result<Arc<RocksCollabDB>, FlowyError> {
|
||||
self.0.get_collab_db()
|
||||
self
|
||||
.0
|
||||
.upgrade()
|
||||
.ok_or(FlowyError::internal().context("Unexpected error: UserSession is None"))?
|
||||
.get_collab_db()
|
||||
}
|
||||
}
|
||||
|
||||
@ -143,8 +149,7 @@ impl FolderOperationHandler for DocumentFolderOperation {
|
||||
let manager = self.0.clone();
|
||||
let view_id = view_id.to_string();
|
||||
FutureResult::new(async move {
|
||||
let document = manager.get_document_from_disk(&view_id)?;
|
||||
let data: DocumentDataPB = document.lock().get_document()?.into();
|
||||
let data: DocumentDataPB = manager.get_document_data(&view_id)?.into();
|
||||
let data_bytes = data.into_bytes().map_err(|_| FlowyError::invalid_data())?;
|
||||
Ok(data_bytes)
|
||||
})
|
||||
|
@ -1,11 +1,15 @@
|
||||
use lib_infra::future::FutureResult;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use appflowy_integrate::collab_builder::{CollabStorageProvider, CollabStorageType};
|
||||
use appflowy_integrate::RemoteCollabStorage;
|
||||
use parking_lot::RwLock;
|
||||
use serde_repr::*;
|
||||
|
||||
use flowy_database2::deps::{DatabaseCloudService, DatabaseSnapshot};
|
||||
use flowy_document2::deps::{DocumentCloudService, DocumentSnapshot};
|
||||
use flowy_error::{ErrorCode, FlowyError, FlowyResult};
|
||||
use flowy_folder2::deps::{FolderCloudService, Workspace};
|
||||
use flowy_folder2::deps::{FolderCloudService, FolderSnapshot, Workspace};
|
||||
use flowy_server::local_server::LocalServer;
|
||||
use flowy_server::self_host::configuration::self_host_server_configuration;
|
||||
use flowy_server::self_host::SelfHostServer;
|
||||
@ -14,8 +18,7 @@ use flowy_server::AppFlowyServer;
|
||||
use flowy_sqlite::kv::KV;
|
||||
use flowy_user::event_map::{UserAuthService, UserCloudServiceProvider};
|
||||
use flowy_user::services::AuthType;
|
||||
|
||||
use serde_repr::*;
|
||||
use lib_infra::future::FutureResult;
|
||||
|
||||
const SERVER_PROVIDER_TYPE_KEY: &str = "server_provider_type";
|
||||
|
||||
@ -115,6 +118,102 @@ impl FolderCloudService for AppFlowyServerProvider {
|
||||
let name = name.to_string();
|
||||
FutureResult::new(async move { server?.folder_service().create_workspace(uid, &name).await })
|
||||
}
|
||||
|
||||
fn get_folder_latest_snapshot(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
) -> FutureResult<Option<FolderSnapshot>, FlowyError> {
|
||||
let workspace_id = workspace_id.to_string();
|
||||
let server = self.get_provider(&self.provider_type.read());
|
||||
FutureResult::new(async move {
|
||||
server?
|
||||
.folder_service()
|
||||
.get_folder_latest_snapshot(&workspace_id)
|
||||
.await
|
||||
})
|
||||
}
|
||||
|
||||
fn get_folder_updates(&self, workspace_id: &str) -> FutureResult<Vec<Vec<u8>>, FlowyError> {
|
||||
let workspace_id = workspace_id.to_string();
|
||||
let server = self.get_provider(&self.provider_type.read());
|
||||
FutureResult::new(async move {
|
||||
server?
|
||||
.folder_service()
|
||||
.get_folder_updates(&workspace_id)
|
||||
.await
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl DatabaseCloudService for AppFlowyServerProvider {
|
||||
fn get_database_updates(&self, database_id: &str) -> FutureResult<Vec<Vec<u8>>, FlowyError> {
|
||||
let server = self.get_provider(&self.provider_type.read());
|
||||
let database_id = database_id.to_string();
|
||||
FutureResult::new(async move {
|
||||
server?
|
||||
.database_service()
|
||||
.get_database_updates(&database_id)
|
||||
.await
|
||||
})
|
||||
}
|
||||
|
||||
fn get_database_latest_snapshot(
|
||||
&self,
|
||||
database_id: &str,
|
||||
) -> FutureResult<Option<DatabaseSnapshot>, FlowyError> {
|
||||
let server = self.get_provider(&self.provider_type.read());
|
||||
let database_id = database_id.to_string();
|
||||
FutureResult::new(async move {
|
||||
server?
|
||||
.database_service()
|
||||
.get_database_latest_snapshot(&database_id)
|
||||
.await
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl DocumentCloudService for AppFlowyServerProvider {
|
||||
fn get_document_updates(&self, document_id: &str) -> FutureResult<Vec<Vec<u8>>, FlowyError> {
|
||||
let server = self.get_provider(&self.provider_type.read());
|
||||
let document_id = document_id.to_string();
|
||||
FutureResult::new(async move {
|
||||
server?
|
||||
.document_service()
|
||||
.get_document_updates(&document_id)
|
||||
.await
|
||||
})
|
||||
}
|
||||
|
||||
fn get_document_latest_snapshot(
|
||||
&self,
|
||||
document_id: &str,
|
||||
) -> FutureResult<Option<DocumentSnapshot>, FlowyError> {
|
||||
let server = self.get_provider(&self.provider_type.read());
|
||||
let document_id = document_id.to_string();
|
||||
FutureResult::new(async move {
|
||||
server?
|
||||
.document_service()
|
||||
.get_document_latest_snapshot(&document_id)
|
||||
.await
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl CollabStorageProvider for AppFlowyServerProvider {
|
||||
fn storage_type(&self) -> CollabStorageType {
|
||||
self.provider_type().into()
|
||||
}
|
||||
|
||||
fn get_storage(&self, storage_type: &CollabStorageType) -> Option<Arc<dyn RemoteCollabStorage>> {
|
||||
match storage_type {
|
||||
CollabStorageType::Local => None,
|
||||
CollabStorageType::AWS => None,
|
||||
CollabStorageType::Supabase => self
|
||||
.get_provider(&ServerProviderType::Supabase)
|
||||
.ok()
|
||||
.and_then(|provider| provider.collab_storage()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn server_from_auth_type(
|
||||
@ -137,8 +236,7 @@ fn server_from_auth_type(
|
||||
},
|
||||
ServerProviderType::Supabase => {
|
||||
let config = SupabaseConfiguration::from_env()?;
|
||||
let server = Arc::new(SupabaseServer::new(config));
|
||||
Ok(server)
|
||||
Ok(Arc::new(SupabaseServer::new(config)))
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -9,14 +9,14 @@ use std::{
|
||||
},
|
||||
};
|
||||
|
||||
use appflowy_integrate::collab_builder::{AppFlowyCollabBuilder, CloudStorageType};
|
||||
use appflowy_integrate::collab_builder::{AppFlowyCollabBuilder, CollabStorageType};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::debug;
|
||||
|
||||
use flowy_database2::DatabaseManager2;
|
||||
use flowy_document2::manager::DocumentManager as DocumentManager2;
|
||||
use flowy_error::FlowyResult;
|
||||
use flowy_folder2::manager::Folder2Manager;
|
||||
use flowy_folder2::manager::FolderManager;
|
||||
use flowy_sqlite::kv::KV;
|
||||
use flowy_task::{TaskDispatcher, TaskRunner};
|
||||
use flowy_user::entities::UserProfile;
|
||||
@ -46,7 +46,7 @@ pub struct AppFlowyCoreConfig {
|
||||
/// Different `AppFlowyCoreConfig` instance should have different name
|
||||
name: String,
|
||||
/// Panics if the `root` path is not existing
|
||||
storage_path: String,
|
||||
pub storage_path: String,
|
||||
log_filter: String,
|
||||
}
|
||||
|
||||
@ -81,6 +81,7 @@ fn create_log_filter(level: String, with_crates: Vec<String>) -> String {
|
||||
.collect::<Vec<String>>();
|
||||
filters.push(format!("flowy_core={}", level));
|
||||
filters.push(format!("flowy_folder2={}", level));
|
||||
filters.push(format!("collab_sync={}", level));
|
||||
filters.push(format!("collab_folder={}", level));
|
||||
filters.push(format!("collab_persistence={}", level));
|
||||
filters.push(format!("collab_database={}", level));
|
||||
@ -90,6 +91,7 @@ fn create_log_filter(level: String, with_crates: Vec<String>) -> String {
|
||||
filters.push(format!("flowy_user={}", level));
|
||||
filters.push(format!("flowy_document2={}", level));
|
||||
filters.push(format!("flowy_database2={}", level));
|
||||
filters.push(format!("flowy_server={}", level));
|
||||
filters.push(format!("flowy_notification={}", "info"));
|
||||
filters.push(format!("lib_infra={}", level));
|
||||
filters.push(format!("flowy_task={}", level));
|
||||
@ -112,7 +114,7 @@ pub struct AppFlowyCore {
|
||||
pub config: AppFlowyCoreConfig,
|
||||
pub user_session: Arc<UserSession>,
|
||||
pub document_manager2: Arc<DocumentManager2>,
|
||||
pub folder_manager: Arc<Folder2Manager>,
|
||||
pub folder_manager: Arc<FolderManager>,
|
||||
pub database_manager: Arc<DatabaseManager2>,
|
||||
pub event_dispatcher: Arc<AFPluginDispatcher>,
|
||||
pub server_provider: Arc<AppFlowyServerProvider>,
|
||||
@ -141,64 +143,60 @@ impl AppFlowyCore {
|
||||
|
||||
let server_provider = Arc::new(AppFlowyServerProvider::new());
|
||||
|
||||
let (
|
||||
user_session,
|
||||
folder_manager,
|
||||
server_provider,
|
||||
database_manager,
|
||||
document_manager2,
|
||||
collab_builder,
|
||||
) = runtime.block_on(async {
|
||||
let user_session = mk_user_session(&config, server_provider.clone());
|
||||
/// The shared collab builder is used to build the [Collab] instance. The plugins will be loaded
|
||||
/// on demand based on the [CollabPluginConfig].
|
||||
let collab_builder = Arc::new(AppFlowyCollabBuilder::new(
|
||||
server_provider.provider_type().into(),
|
||||
Some(Arc::new(SnapshotDBImpl(user_session.clone()))),
|
||||
));
|
||||
let (user_session, folder_manager, server_provider, database_manager, document_manager2) =
|
||||
runtime.block_on(async {
|
||||
let user_session = mk_user_session(&config, server_provider.clone());
|
||||
/// The shared collab builder is used to build the [Collab] instance. The plugins will be loaded
|
||||
/// on demand based on the [CollabPluginConfig].
|
||||
let collab_builder = Arc::new(AppFlowyCollabBuilder::new(
|
||||
server_provider.clone(),
|
||||
Some(Arc::new(SnapshotDBImpl(Arc::downgrade(&user_session)))),
|
||||
));
|
||||
|
||||
let database_manager2 = Database2DepsResolver::resolve(
|
||||
user_session.clone(),
|
||||
task_dispatcher.clone(),
|
||||
collab_builder.clone(),
|
||||
)
|
||||
.await;
|
||||
let database_manager2 = Database2DepsResolver::resolve(
|
||||
Arc::downgrade(&user_session),
|
||||
task_dispatcher.clone(),
|
||||
collab_builder.clone(),
|
||||
server_provider.clone(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let document_manager2 = Document2DepsResolver::resolve(
|
||||
user_session.clone(),
|
||||
&database_manager2,
|
||||
collab_builder.clone(),
|
||||
);
|
||||
let document_manager2 = Document2DepsResolver::resolve(
|
||||
Arc::downgrade(&user_session),
|
||||
&database_manager2,
|
||||
collab_builder.clone(),
|
||||
server_provider.clone(),
|
||||
);
|
||||
|
||||
let folder_manager = Folder2DepsResolver::resolve(
|
||||
user_session.clone(),
|
||||
&document_manager2,
|
||||
&database_manager2,
|
||||
collab_builder.clone(),
|
||||
server_provider.clone(),
|
||||
)
|
||||
.await;
|
||||
let folder_manager = Folder2DepsResolver::resolve(
|
||||
Arc::downgrade(&user_session),
|
||||
&document_manager2,
|
||||
&database_manager2,
|
||||
collab_builder,
|
||||
server_provider.clone(),
|
||||
)
|
||||
.await;
|
||||
|
||||
(
|
||||
user_session,
|
||||
folder_manager,
|
||||
server_provider,
|
||||
database_manager2,
|
||||
document_manager2,
|
||||
collab_builder,
|
||||
)
|
||||
});
|
||||
(
|
||||
user_session,
|
||||
folder_manager,
|
||||
server_provider,
|
||||
database_manager2,
|
||||
document_manager2,
|
||||
)
|
||||
});
|
||||
|
||||
let user_status_listener = UserStatusCallbackImpl {
|
||||
collab_builder,
|
||||
folder_manager: folder_manager.clone(),
|
||||
database_manager: database_manager.clone(),
|
||||
config: config.clone(),
|
||||
};
|
||||
|
||||
let cloned_user_session = user_session.clone();
|
||||
let cloned_user_session = Arc::downgrade(&user_session);
|
||||
runtime.block_on(async move {
|
||||
cloned_user_session.clone().init(user_status_listener).await;
|
||||
if let Some(user_session) = cloned_user_session.upgrade() {
|
||||
user_session.init(user_status_listener).await;
|
||||
}
|
||||
});
|
||||
|
||||
let event_dispatcher = Arc::new(AFPluginDispatcher::construct(runtime, || {
|
||||
@ -253,20 +251,14 @@ fn mk_user_session(
|
||||
}
|
||||
|
||||
struct UserStatusCallbackImpl {
|
||||
collab_builder: Arc<AppFlowyCollabBuilder>,
|
||||
folder_manager: Arc<Folder2Manager>,
|
||||
folder_manager: Arc<FolderManager>,
|
||||
database_manager: Arc<DatabaseManager2>,
|
||||
#[allow(dead_code)]
|
||||
config: AppFlowyCoreConfig,
|
||||
}
|
||||
|
||||
impl UserStatusCallback for UserStatusCallbackImpl {
|
||||
fn auth_type_did_changed(&self, auth_type: AuthType) {
|
||||
let provider_type: ServerProviderType = auth_type.into();
|
||||
self
|
||||
.collab_builder
|
||||
.set_cloud_storage_type(provider_type.into());
|
||||
}
|
||||
fn auth_type_did_changed(&self, _auth_type: AuthType) {}
|
||||
|
||||
fn did_sign_in(&self, user_id: i64, workspace_id: &str) -> Fut<FlowyResult<()>> {
|
||||
let user_id = user_id.to_owned();
|
||||
@ -281,7 +273,7 @@ impl UserStatusCallback for UserStatusCallbackImpl {
|
||||
})
|
||||
}
|
||||
|
||||
fn did_sign_up(&self, user_profile: &UserProfile) -> Fut<FlowyResult<()>> {
|
||||
fn did_sign_up(&self, is_new: bool, user_profile: &UserProfile) -> Fut<FlowyResult<()>> {
|
||||
let user_profile = user_profile.clone();
|
||||
let folder_manager = self.folder_manager.clone();
|
||||
let database_manager = self.database_manager.clone();
|
||||
@ -290,6 +282,7 @@ impl UserStatusCallback for UserStatusCallbackImpl {
|
||||
.initialize_with_new_user(
|
||||
user_profile.id,
|
||||
&user_profile.token,
|
||||
is_new,
|
||||
&user_profile.workspace_id,
|
||||
)
|
||||
.await?;
|
||||
@ -311,12 +304,12 @@ impl UserStatusCallback for UserStatusCallbackImpl {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ServerProviderType> for CloudStorageType {
|
||||
impl From<ServerProviderType> for CollabStorageType {
|
||||
fn from(server_provider: ServerProviderType) -> Self {
|
||||
match server_provider {
|
||||
ServerProviderType::Local => CloudStorageType::Local,
|
||||
ServerProviderType::SelfHosted => CloudStorageType::Local,
|
||||
ServerProviderType::Supabase => CloudStorageType::Supabase,
|
||||
ServerProviderType::Local => CollabStorageType::Local,
|
||||
ServerProviderType::SelfHosted => CollabStorageType::Local,
|
||||
ServerProviderType::Supabase => CollabStorageType::Supabase,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2,12 +2,12 @@ use std::sync::Arc;
|
||||
|
||||
use flowy_database2::DatabaseManager2;
|
||||
use flowy_document2::manager::DocumentManager as DocumentManager2;
|
||||
use flowy_folder2::manager::Folder2Manager;
|
||||
use flowy_folder2::manager::FolderManager;
|
||||
use flowy_user::services::UserSession;
|
||||
use lib_dispatch::prelude::AFPlugin;
|
||||
|
||||
pub fn make_plugins(
|
||||
folder_manager: &Arc<Folder2Manager>,
|
||||
folder_manager: &Arc<FolderManager>,
|
||||
database_manager: &Arc<DatabaseManager2>,
|
||||
user_session: &Arc<UserSession>,
|
||||
document_manager2: &Arc<DocumentManager2>,
|
||||
|
@ -45,7 +45,7 @@ strum = "0.21"
|
||||
strum_macros = "0.21"
|
||||
|
||||
[dev-dependencies]
|
||||
flowy-test = { path = "../flowy-test" }
|
||||
flowy-test = { path = "../flowy-test", default-features = false }
|
||||
|
||||
[build-dependencies]
|
||||
flowy-codegen = { path = "../../../shared-lib/flowy-codegen"}
|
||||
|
31
frontend/rust-lib/flowy-database2/src/deps.rs
Normal file
31
frontend/rust-lib/flowy-database2/src/deps.rs
Normal file
@ -0,0 +1,31 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use appflowy_integrate::RocksCollabDB;
|
||||
|
||||
use flowy_error::FlowyError;
|
||||
use lib_infra::future::FutureResult;
|
||||
|
||||
pub trait DatabaseUser2: Send + Sync {
|
||||
fn user_id(&self) -> Result<i64, FlowyError>;
|
||||
fn token(&self) -> Result<Option<String>, FlowyError>;
|
||||
fn collab_db(&self) -> Result<Arc<RocksCollabDB>, FlowyError>;
|
||||
}
|
||||
|
||||
/// A trait for database cloud service.
|
||||
/// Each kind of server should implement this trait. Check out the [AppFlowyServerProvider] of
|
||||
/// [flowy-server] crate for more information.
|
||||
pub trait DatabaseCloudService: Send + Sync {
|
||||
fn get_database_updates(&self, database_id: &str) -> FutureResult<Vec<Vec<u8>>, FlowyError>;
|
||||
|
||||
fn get_database_latest_snapshot(
|
||||
&self,
|
||||
database_id: &str,
|
||||
) -> FutureResult<Option<DatabaseSnapshot>, FlowyError>;
|
||||
}
|
||||
|
||||
pub struct DatabaseSnapshot {
|
||||
pub snapshot_id: i64,
|
||||
pub database_id: String,
|
||||
pub data: Vec<u8>,
|
||||
pub created_at: i64,
|
||||
}
|
@ -1,3 +1,4 @@
|
||||
use collab::core::collab_state::SyncState;
|
||||
use collab_database::rows::RowId;
|
||||
use collab_database::user::DatabaseRecord;
|
||||
use collab_database::views::DatabaseLayout;
|
||||
@ -105,7 +106,7 @@ impl TryInto<MoveFieldParams> for MoveFieldPayloadPB {
|
||||
|
||||
fn try_into(self) -> Result<MoveFieldParams, Self::Error> {
|
||||
let view_id = NotEmptyStr::parse(self.view_id).map_err(|_| ErrorCode::DatabaseViewIdIsEmpty)?;
|
||||
let item_id = NotEmptyStr::parse(self.field_id).map_err(|_| ErrorCode::InvalidData)?;
|
||||
let item_id = NotEmptyStr::parse(self.field_id).map_err(|_| ErrorCode::InvalidParams)?;
|
||||
Ok(MoveFieldParams {
|
||||
view_id: view_id.0,
|
||||
field_id: item_id.0,
|
||||
@ -264,3 +265,48 @@ impl TryInto<DatabaseLayoutMeta> for DatabaseLayoutMetaPB {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, ProtoBuf)]
|
||||
pub struct DatabaseSyncStatePB {
|
||||
#[pb(index = 1)]
|
||||
pub is_syncing: bool,
|
||||
|
||||
#[pb(index = 2)]
|
||||
pub is_finish: bool,
|
||||
}
|
||||
|
||||
impl From<SyncState> for DatabaseSyncStatePB {
|
||||
fn from(value: SyncState) -> Self {
|
||||
Self {
|
||||
is_syncing: value.is_syncing(),
|
||||
is_finish: value.is_sync_finished(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, ProtoBuf)]
|
||||
pub struct DatabaseSnapshotStatePB {
|
||||
#[pb(index = 1)]
|
||||
pub new_snapshot_id: i64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, ProtoBuf)]
|
||||
pub struct RepeatedDatabaseSnapshotPB {
|
||||
#[pb(index = 1)]
|
||||
pub items: Vec<DatabaseSnapshotPB>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, ProtoBuf)]
|
||||
pub struct DatabaseSnapshotPB {
|
||||
#[pb(index = 1)]
|
||||
pub snapshot_id: i64,
|
||||
|
||||
#[pb(index = 2)]
|
||||
pub snapshot_desc: String,
|
||||
|
||||
#[pb(index = 3)]
|
||||
pub created_at: i64,
|
||||
|
||||
#[pb(index = 4)]
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
|
@ -1,7 +1,8 @@
|
||||
use crate::services::filter::{Filter, FromFilterString};
|
||||
use flowy_derive::{ProtoBuf, ProtoBuf_Enum};
|
||||
use flowy_error::ErrorCode;
|
||||
|
||||
use crate::services::filter::{Filter, FromFilterString};
|
||||
|
||||
#[derive(Eq, PartialEq, ProtoBuf, Debug, Default, Clone)]
|
||||
pub struct CheckboxFilterPB {
|
||||
#[pb(index = 1)]
|
||||
@ -30,7 +31,7 @@ impl std::convert::TryFrom<u8> for CheckboxFilterConditionPB {
|
||||
match value {
|
||||
0 => Ok(CheckboxFilterConditionPB::IsChecked),
|
||||
1 => Ok(CheckboxFilterConditionPB::IsUnChecked),
|
||||
_ => Err(ErrorCode::InvalidData),
|
||||
_ => Err(ErrorCode::InvalidParams),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,8 @@
|
||||
use crate::services::filter::{Filter, FromFilterString};
|
||||
use flowy_derive::{ProtoBuf, ProtoBuf_Enum};
|
||||
use flowy_error::ErrorCode;
|
||||
|
||||
use crate::services::filter::{Filter, FromFilterString};
|
||||
|
||||
#[derive(Eq, PartialEq, ProtoBuf, Debug, Default, Clone)]
|
||||
pub struct ChecklistFilterPB {
|
||||
#[pb(index = 1)]
|
||||
@ -30,7 +31,7 @@ impl std::convert::TryFrom<u8> for ChecklistFilterConditionPB {
|
||||
match value {
|
||||
0 => Ok(ChecklistFilterConditionPB::IsComplete),
|
||||
1 => Ok(ChecklistFilterConditionPB::IsIncomplete),
|
||||
_ => Err(ErrorCode::InvalidData),
|
||||
_ => Err(ErrorCode::InvalidParams),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,8 +1,11 @@
|
||||
use crate::services::filter::{Filter, FromFilterString};
|
||||
use std::str::FromStr;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use flowy_derive::{ProtoBuf, ProtoBuf_Enum};
|
||||
use flowy_error::ErrorCode;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::str::FromStr;
|
||||
|
||||
use crate::services::filter::{Filter, FromFilterString};
|
||||
|
||||
#[derive(Eq, PartialEq, ProtoBuf, Debug, Default, Clone)]
|
||||
pub struct DateFilterPB {
|
||||
@ -73,7 +76,7 @@ impl std::convert::TryFrom<u8> for DateFilterConditionPB {
|
||||
4 => Ok(DateFilterConditionPB::DateOnOrAfter),
|
||||
5 => Ok(DateFilterConditionPB::DateWithIn),
|
||||
6 => Ok(DateFilterConditionPB::DateIsEmpty),
|
||||
_ => Err(ErrorCode::InvalidData),
|
||||
_ => Err(ErrorCode::InvalidParams),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,8 @@
|
||||
use crate::services::filter::{Filter, FromFilterString};
|
||||
use flowy_derive::{ProtoBuf, ProtoBuf_Enum};
|
||||
use flowy_error::ErrorCode;
|
||||
|
||||
use crate::services::filter::{Filter, FromFilterString};
|
||||
|
||||
#[derive(Eq, PartialEq, ProtoBuf, Debug, Default, Clone)]
|
||||
pub struct NumberFilterPB {
|
||||
#[pb(index = 1)]
|
||||
@ -44,7 +45,7 @@ impl std::convert::TryFrom<u8> for NumberFilterConditionPB {
|
||||
5 => Ok(NumberFilterConditionPB::LessThanOrEqualTo),
|
||||
6 => Ok(NumberFilterConditionPB::NumberIsEmpty),
|
||||
7 => Ok(NumberFilterConditionPB::NumberIsNotEmpty),
|
||||
_ => Err(ErrorCode::InvalidData),
|
||||
_ => Err(ErrorCode::InvalidParams),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,8 +1,9 @@
|
||||
use crate::services::field::SelectOptionIds;
|
||||
use crate::services::filter::{Filter, FromFilterString};
|
||||
use flowy_derive::{ProtoBuf, ProtoBuf_Enum};
|
||||
use flowy_error::ErrorCode;
|
||||
|
||||
use crate::services::field::SelectOptionIds;
|
||||
use crate::services::filter::{Filter, FromFilterString};
|
||||
|
||||
#[derive(Eq, PartialEq, ProtoBuf, Debug, Default, Clone)]
|
||||
pub struct SelectOptionFilterPB {
|
||||
#[pb(index = 1)]
|
||||
@ -38,7 +39,7 @@ impl std::convert::TryFrom<u8> for SelectOptionConditionPB {
|
||||
1 => Ok(SelectOptionConditionPB::OptionIsNot),
|
||||
2 => Ok(SelectOptionConditionPB::OptionIsEmpty),
|
||||
3 => Ok(SelectOptionConditionPB::OptionIsNotEmpty),
|
||||
_ => Err(ErrorCode::InvalidData),
|
||||
_ => Err(ErrorCode::InvalidParams),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,8 @@
|
||||
use crate::services::filter::{Filter, FromFilterString};
|
||||
use flowy_derive::{ProtoBuf, ProtoBuf_Enum};
|
||||
use flowy_error::ErrorCode;
|
||||
|
||||
use crate::services::filter::{Filter, FromFilterString};
|
||||
|
||||
#[derive(Eq, PartialEq, ProtoBuf, Debug, Default, Clone)]
|
||||
pub struct TextFilterPB {
|
||||
#[pb(index = 1)]
|
||||
@ -45,7 +46,7 @@ impl std::convert::TryFrom<u8> for TextFilterConditionPB {
|
||||
5 => Ok(TextFilterConditionPB::EndsWith),
|
||||
6 => Ok(TextFilterConditionPB::TextIsEmpty),
|
||||
7 => Ok(TextFilterConditionPB::TextIsNotEmpty),
|
||||
_ => Err(ErrorCode::InvalidData),
|
||||
_ => Err(ErrorCode::InvalidParams),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -28,6 +28,19 @@ pub(crate) async fn get_database_data_handler(
|
||||
data_result_ok(data)
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip_all, err)]
|
||||
pub(crate) async fn open_database_handler(
|
||||
data: AFPluginData<DatabaseViewIdPB>,
|
||||
manager: AFPluginState<Arc<DatabaseManager2>>,
|
||||
) -> Result<(), FlowyError> {
|
||||
let view_id: DatabaseViewIdPB = data.into_inner();
|
||||
let database_id = manager
|
||||
.get_database_id_with_view_id(view_id.as_ref())
|
||||
.await?;
|
||||
let _ = manager.open_database(&database_id).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip_all, err)]
|
||||
pub(crate) async fn get_database_id_handler(
|
||||
data: AFPluginData<DatabaseViewIdPB>,
|
||||
@ -807,3 +820,13 @@ pub(crate) async fn export_csv_handler(
|
||||
data,
|
||||
})
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip_all, err)]
|
||||
pub(crate) async fn get_snapshots_handler(
|
||||
data: AFPluginData<DatabaseViewIdPB>,
|
||||
manager: AFPluginState<Arc<DatabaseManager2>>,
|
||||
) -> DataResult<RepeatedDatabaseSnapshotPB, FlowyError> {
|
||||
let view_id = data.into_inner().value;
|
||||
let snapshots = manager.get_database_snapshots(&view_id).await?;
|
||||
data_result_ok(RepeatedDatabaseSnapshotPB { items: snapshots })
|
||||
}
|
||||
|
@ -14,6 +14,7 @@ pub fn init(database_manager: Arc<DatabaseManager2>) -> AFPlugin {
|
||||
.state(database_manager);
|
||||
plugin
|
||||
.event(DatabaseEvent::GetDatabase, get_database_data_handler)
|
||||
.event(DatabaseEvent::OpenDatabase, get_database_data_handler)
|
||||
.event(DatabaseEvent::GetDatabaseId, get_database_id_handler)
|
||||
.event(DatabaseEvent::GetDatabaseSetting, get_database_setting_handler)
|
||||
.event(DatabaseEvent::UpdateDatabaseSetting, update_database_setting_handler)
|
||||
@ -72,6 +73,7 @@ pub fn init(database_manager: Arc<DatabaseManager2>) -> AFPlugin {
|
||||
.event(DatabaseEvent::GetLayoutSetting, get_layout_setting_handler)
|
||||
.event(DatabaseEvent::CreateDatabaseView, create_database_view)
|
||||
.event(DatabaseEvent::ExportCSV, export_csv_handler)
|
||||
.event(DatabaseEvent::GetDatabaseSnapshots, get_snapshots_handler)
|
||||
}
|
||||
|
||||
/// [DatabaseEvent] defines events that are used to interact with the Grid. You could check [this](https://appflowy.gitbook.io/docs/essential-documentation/contribute-to-appflowy/architecture/backend/protobuf)
|
||||
@ -110,6 +112,9 @@ pub enum DatabaseEvent {
|
||||
#[event(input = "DatabaseViewIdPB")]
|
||||
DeleteAllSorts = 6,
|
||||
|
||||
#[event(input = "DatabaseViewIdPB")]
|
||||
OpenDatabase = 7,
|
||||
|
||||
/// [GetFields] event is used to get the database's fields.
|
||||
///
|
||||
/// The event handler accepts a [GetFieldPayloadPB] and returns a [RepeatedFieldPB]
|
||||
@ -306,4 +311,8 @@ pub enum DatabaseEvent {
|
||||
|
||||
#[event(input = "DatabaseViewIdPB", output = "DatabaseExportDataPB")]
|
||||
ExportCSV = 141,
|
||||
|
||||
/// Returns all the snapshots of the database view.
|
||||
#[event(input = "DatabaseViewIdPB", output = "RepeatedDatabaseSnapshotPB")]
|
||||
GetDatabaseSnapshots = 150,
|
||||
}
|
||||
|
@ -1,5 +1,6 @@
|
||||
pub use manager::*;
|
||||
|
||||
pub mod deps;
|
||||
pub mod entities;
|
||||
mod event_handler;
|
||||
pub mod event_map;
|
||||
|
@ -14,23 +14,21 @@ use tokio::sync::RwLock;
|
||||
use flowy_error::{internal_error, FlowyError, FlowyResult};
|
||||
use flowy_task::TaskDispatcher;
|
||||
|
||||
use crate::entities::{DatabaseDescriptionPB, DatabaseLayoutPB, RepeatedDatabaseDescriptionPB};
|
||||
use crate::deps::{DatabaseCloudService, DatabaseUser2};
|
||||
use crate::entities::{
|
||||
DatabaseDescriptionPB, DatabaseLayoutPB, DatabaseSnapshotPB, RepeatedDatabaseDescriptionPB,
|
||||
};
|
||||
use crate::services::database::{DatabaseEditor, MutexDatabase};
|
||||
use crate::services::database_view::DatabaseLayoutDepsResolver;
|
||||
use crate::services::share::csv::{CSVFormat, CSVImporter, ImportResult};
|
||||
|
||||
pub trait DatabaseUser2: Send + Sync {
|
||||
fn user_id(&self) -> Result<i64, FlowyError>;
|
||||
fn token(&self) -> Result<Option<String>, FlowyError>;
|
||||
fn collab_db(&self) -> Result<Arc<RocksCollabDB>, FlowyError>;
|
||||
}
|
||||
|
||||
pub struct DatabaseManager2 {
|
||||
user: Arc<dyn DatabaseUser2>,
|
||||
user_database: UserDatabase,
|
||||
task_scheduler: Arc<RwLock<TaskDispatcher>>,
|
||||
editors: RwLock<HashMap<String, Arc<DatabaseEditor>>>,
|
||||
collab_builder: Arc<AppFlowyCollabBuilder>,
|
||||
cloud_service: Arc<dyn DatabaseCloudService>,
|
||||
}
|
||||
|
||||
impl DatabaseManager2 {
|
||||
@ -38,6 +36,7 @@ impl DatabaseManager2 {
|
||||
database_user: Arc<dyn DatabaseUser2>,
|
||||
task_scheduler: Arc<RwLock<TaskDispatcher>>,
|
||||
collab_builder: Arc<AppFlowyCollabBuilder>,
|
||||
cloud_service: Arc<dyn DatabaseCloudService>,
|
||||
) -> Self {
|
||||
Self {
|
||||
user: database_user,
|
||||
@ -45,6 +44,7 @@ impl DatabaseManager2 {
|
||||
task_scheduler,
|
||||
editors: Default::default(),
|
||||
collab_builder,
|
||||
cloud_service,
|
||||
}
|
||||
}
|
||||
|
||||
@ -98,7 +98,10 @@ impl DatabaseManager2 {
|
||||
if let Some(editor) = self.editors.read().await.get(database_id) {
|
||||
return Ok(editor.clone());
|
||||
}
|
||||
self.open_database(database_id).await
|
||||
}
|
||||
|
||||
pub async fn open_database(&self, database_id: &str) -> FlowyResult<Arc<DatabaseEditor>> {
|
||||
tracing::trace!("create new editor for database {}", database_id);
|
||||
let mut editors = self.editors.write().await;
|
||||
let database = MutexDatabase::new(self.with_user_database(
|
||||
@ -117,9 +120,14 @@ impl DatabaseManager2 {
|
||||
|
||||
#[tracing::instrument(level = "debug", skip_all)]
|
||||
pub async fn close_database_view<T: AsRef<str>>(&self, view_id: T) -> FlowyResult<()> {
|
||||
// TODO(natan): defer closing the database if the sync is not finished
|
||||
let view_id = view_id.as_ref();
|
||||
let database_id = self.with_user_database(None, |database| {
|
||||
database.get_database_id_with_view_id(view_id)
|
||||
let database_id = self.with_user_database(None, |databases| {
|
||||
let database_id = databases.get_database_id_with_view_id(view_id);
|
||||
if database_id.is_some() {
|
||||
databases.close_database(database_id.as_ref().unwrap());
|
||||
}
|
||||
database_id
|
||||
});
|
||||
|
||||
if let Some(database_id) = database_id {
|
||||
@ -151,6 +159,7 @@ impl DatabaseManager2 {
|
||||
Ok(database_data)
|
||||
}
|
||||
|
||||
/// Create a new database with the given data that can be deserialized to [DatabaseData].
|
||||
#[tracing::instrument(level = "trace", skip_all, err)]
|
||||
pub async fn create_database_with_database_data(
|
||||
&self,
|
||||
@ -251,6 +260,29 @@ impl DatabaseManager2 {
|
||||
database.update_view_layout(view_id, layout.into()).await
|
||||
}
|
||||
|
||||
pub async fn get_database_snapshots(
|
||||
&self,
|
||||
view_id: &str,
|
||||
) -> FlowyResult<Vec<DatabaseSnapshotPB>> {
|
||||
let database_id = self.get_database_id_with_view_id(view_id).await?;
|
||||
let mut snapshots = vec![];
|
||||
if let Some(snapshot) = self
|
||||
.cloud_service
|
||||
.get_database_latest_snapshot(&database_id)
|
||||
.await?
|
||||
.map(|snapshot| DatabaseSnapshotPB {
|
||||
snapshot_id: snapshot.snapshot_id,
|
||||
snapshot_desc: "".to_string(),
|
||||
created_at: snapshot.created_at,
|
||||
data: snapshot.data,
|
||||
})
|
||||
{
|
||||
snapshots.push(snapshot);
|
||||
}
|
||||
|
||||
Ok(snapshots)
|
||||
}
|
||||
|
||||
fn with_user_database<F, Output>(&self, default_value: Output, f: F) -> Output
|
||||
where
|
||||
F: FnOnce(&InnerUserDatabase) -> Output,
|
||||
@ -261,6 +293,12 @@ impl DatabaseManager2 {
|
||||
Some(folder) => f(folder),
|
||||
}
|
||||
}
|
||||
|
||||
/// Only expose this method for testing
|
||||
#[cfg(debug_assertions)]
|
||||
pub fn get_cloud_service(&self) -> &Arc<dyn DatabaseCloudService> {
|
||||
&self.cloud_service
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
|
@ -1,7 +1,7 @@
|
||||
use flowy_derive::ProtoBuf_Enum;
|
||||
use flowy_notification::NotificationBuilder;
|
||||
|
||||
const OBSERVABLE_CATEGORY: &str = "Grid";
|
||||
const DATABASE_OBSERVABLE_SOURCE: &str = "Database";
|
||||
|
||||
#[derive(ProtoBuf_Enum, Debug, Default)]
|
||||
pub enum DatabaseNotification {
|
||||
@ -45,6 +45,8 @@ pub enum DatabaseNotification {
|
||||
DidDeleteDatabaseView = 83,
|
||||
// Trigger when the database view is moved to trash
|
||||
DidMoveDatabaseViewToTrash = 84,
|
||||
DidUpdateDatabaseSyncUpdate = 85,
|
||||
DidUpdateDatabaseSnapshotState = 86,
|
||||
}
|
||||
|
||||
impl std::convert::From<DatabaseNotification> for i32 {
|
||||
@ -53,7 +55,34 @@ impl std::convert::From<DatabaseNotification> for i32 {
|
||||
}
|
||||
}
|
||||
|
||||
impl std::convert::From<i32> for DatabaseNotification {
|
||||
fn from(notification: i32) -> Self {
|
||||
match notification {
|
||||
20 => DatabaseNotification::DidUpdateViewRows,
|
||||
21 => DatabaseNotification::DidUpdateViewRowsVisibility,
|
||||
22 => DatabaseNotification::DidUpdateFields,
|
||||
40 => DatabaseNotification::DidUpdateCell,
|
||||
50 => DatabaseNotification::DidUpdateField,
|
||||
60 => DatabaseNotification::DidUpdateNumOfGroups,
|
||||
61 => DatabaseNotification::DidUpdateGroupRow,
|
||||
62 => DatabaseNotification::DidGroupByField,
|
||||
63 => DatabaseNotification::DidUpdateFilter,
|
||||
64 => DatabaseNotification::DidUpdateSort,
|
||||
65 => DatabaseNotification::DidReorderRows,
|
||||
66 => DatabaseNotification::DidReorderSingleRow,
|
||||
67 => DatabaseNotification::DidUpdateRowMeta,
|
||||
70 => DatabaseNotification::DidUpdateSettings,
|
||||
80 => DatabaseNotification::DidUpdateLayoutSettings,
|
||||
81 => DatabaseNotification::DidSetNewLayoutField,
|
||||
82 => DatabaseNotification::DidUpdateDatabaseLayout,
|
||||
83 => DatabaseNotification::DidDeleteDatabaseView,
|
||||
84 => DatabaseNotification::DidMoveDatabaseViewToTrash,
|
||||
_ => DatabaseNotification::Unknown,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace")]
|
||||
pub fn send_notification(id: &str, ty: DatabaseNotification) -> NotificationBuilder {
|
||||
NotificationBuilder::new(id, ty, OBSERVABLE_CATEGORY)
|
||||
NotificationBuilder::new(id, ty, DATABASE_OBSERVABLE_SOURCE)
|
||||
}
|
||||
|
@ -277,7 +277,7 @@ pub struct AnyCellChangeset<T>(pub Option<T>);
|
||||
impl<T> AnyCellChangeset<T> {
|
||||
pub fn try_into_inner(self) -> FlowyResult<T> {
|
||||
match self.0 {
|
||||
None => Err(ErrorCode::InvalidData.into()),
|
||||
None => Err(ErrorCode::InvalidParams.into()),
|
||||
Some(data) => Ok(data),
|
||||
}
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ use collab_database::database::Database as InnerDatabase;
|
||||
use collab_database::fields::{Field, TypeOptionData};
|
||||
use collab_database::rows::{Cell, Cells, CreateRowParams, Row, RowCell, RowId};
|
||||
use collab_database::views::{DatabaseLayout, DatabaseView, LayoutSetting};
|
||||
use futures::StreamExt;
|
||||
use parking_lot::Mutex;
|
||||
use tokio::sync::{broadcast, RwLock};
|
||||
|
||||
@ -54,6 +55,38 @@ impl DatabaseEditor {
|
||||
cell_cache: cell_cache.clone(),
|
||||
});
|
||||
|
||||
let database_id = database.lock().get_database_id();
|
||||
|
||||
// Receive database sync state and send to frontend via the notification
|
||||
let mut sync_state = database.lock().subscribe_sync_state();
|
||||
let cloned_database_id = database_id.clone();
|
||||
tokio::spawn(async move {
|
||||
while let Some(sync_state) = sync_state.next().await {
|
||||
send_notification(
|
||||
&cloned_database_id,
|
||||
DatabaseNotification::DidUpdateDatabaseSyncUpdate,
|
||||
)
|
||||
.payload(DatabaseSyncStatePB::from(sync_state))
|
||||
.send();
|
||||
}
|
||||
});
|
||||
|
||||
// Receive database snapshot state and send to frontend via the notification
|
||||
let mut snapshot_state = database.lock().subscribe_snapshot_state();
|
||||
tokio::spawn(async move {
|
||||
while let Some(snapshot_state) = snapshot_state.next().await {
|
||||
if let Some(new_snapshot_id) = snapshot_state.snapshot_id() {
|
||||
tracing::debug!("Did create database snapshot: {}", new_snapshot_id);
|
||||
send_notification(
|
||||
&database_id,
|
||||
DatabaseNotification::DidUpdateDatabaseSnapshotState,
|
||||
)
|
||||
.payload(DatabaseSnapshotStatePB { new_snapshot_id })
|
||||
.send();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let database_views =
|
||||
Arc::new(DatabaseViews::new(database.clone(), cell_cache.clone(), database_view_data).await?);
|
||||
Ok(Self {
|
||||
@ -1090,6 +1123,12 @@ impl DatabaseEditor {
|
||||
.filter(|f| FieldType::from(f.field_type).is_auto_update())
|
||||
.collect::<Vec<Field>>()
|
||||
}
|
||||
|
||||
/// Only expose this method for testing
|
||||
#[cfg(debug_assertions)]
|
||||
pub fn get_mutex_database(&self) -> &MutexDatabase {
|
||||
&self.database
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn notify_did_update_cell(changesets: Vec<CellChangesetNotifyPB>) {
|
||||
|
@ -0,0 +1 @@
|
||||
|
@ -1 +1 @@
|
||||
|
||||
pub mod entities;
|
||||
|
@ -14,6 +14,7 @@ flowy-derive = { path = "../../../shared-lib/flowy-derive" }
|
||||
flowy-notification = { path = "../flowy-notification" }
|
||||
flowy-error = { path = "../flowy-error", features = ["adaptor_serde", "adaptor_database", "adaptor_dispatch", "collab"] }
|
||||
lib-dispatch = { path = "../lib-dispatch" }
|
||||
lib-infra = { path = "../../../shared-lib/lib-infra" }
|
||||
|
||||
protobuf = {version = "2.28.0"}
|
||||
bytes = { version = "1.4" }
|
||||
@ -28,6 +29,8 @@ tokio = { version = "1.26", features = ["full"] }
|
||||
anyhow = "1.0"
|
||||
indexmap = {version = "1.9.2", features = ["serde"]}
|
||||
uuid = { version = "1.3.3", features = ["v4"] }
|
||||
futures = "0.3.26"
|
||||
tokio-stream = { version = "0.1.14", features = ["sync"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.4.0"
|
||||
|
31
frontend/rust-lib/flowy-document2/src/deps.rs
Normal file
31
frontend/rust-lib/flowy-document2/src/deps.rs
Normal file
@ -0,0 +1,31 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use appflowy_integrate::RocksCollabDB;
|
||||
|
||||
use flowy_error::FlowyError;
|
||||
use lib_infra::future::FutureResult;
|
||||
|
||||
pub trait DocumentUser: Send + Sync {
|
||||
fn user_id(&self) -> Result<i64, FlowyError>;
|
||||
fn token(&self) -> Result<Option<String>, FlowyError>; // unused now.
|
||||
fn collab_db(&self) -> Result<Arc<RocksCollabDB>, FlowyError>;
|
||||
}
|
||||
|
||||
/// A trait for document cloud service.
|
||||
/// Each kind of server should implement this trait. Check out the [AppFlowyServerProvider] of
|
||||
/// [flowy-server] crate for more information.
|
||||
pub trait DocumentCloudService: Send + Sync + 'static {
|
||||
fn get_document_updates(&self, document_id: &str) -> FutureResult<Vec<Vec<u8>>, FlowyError>;
|
||||
|
||||
fn get_document_latest_snapshot(
|
||||
&self,
|
||||
document_id: &str,
|
||||
) -> FutureResult<Option<DocumentSnapshot>, FlowyError>;
|
||||
}
|
||||
|
||||
pub struct DocumentSnapshot {
|
||||
pub snapshot_id: i64,
|
||||
pub document_id: String,
|
||||
pub data: Vec<u8>,
|
||||
pub created_at: i64,
|
||||
}
|
@ -4,26 +4,33 @@ use std::{
|
||||
};
|
||||
|
||||
use collab::core::collab::MutexCollab;
|
||||
use collab_document::{blocks::DocumentData, document::Document as InnerDocument};
|
||||
use collab_document::{blocks::DocumentData, document::Document};
|
||||
use futures::StreamExt;
|
||||
use parking_lot::Mutex;
|
||||
use tokio_stream::wrappers::WatchStream;
|
||||
|
||||
use flowy_error::FlowyResult;
|
||||
|
||||
use crate::entities::{DocEventPB, DocumentSnapshotStatePB, DocumentSyncStatePB};
|
||||
use crate::notification::{send_notification, DocumentNotification};
|
||||
|
||||
/// This struct wrap the document::Document
|
||||
#[derive(Clone)]
|
||||
pub struct Document(Arc<Mutex<InnerDocument>>);
|
||||
pub struct MutexDocument(Arc<Mutex<Document>>);
|
||||
|
||||
impl Document {
|
||||
/// Creates and returns a new Document object.
|
||||
impl MutexDocument {
|
||||
/// Open a document with the given collab.
|
||||
/// # Arguments
|
||||
/// * `collab` - the identifier of the collaboration instance
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Result<Document, FlowyError>` - a Result containing either a new Document object or an Error if the document creation failed
|
||||
pub fn new(collab: Arc<MutexCollab>) -> FlowyResult<Self> {
|
||||
InnerDocument::create(collab)
|
||||
.map(|inner| Self(Arc::new(Mutex::new(inner))))
|
||||
.map_err(|err| err.into())
|
||||
pub fn open(doc_id: &str, collab: Arc<MutexCollab>) -> FlowyResult<Self> {
|
||||
let document = Document::open(collab.clone()).map(|inner| Self(Arc::new(Mutex::new(inner))))?;
|
||||
subscribe_document_changed(doc_id, &document);
|
||||
subscribe_document_snapshot_state(&collab);
|
||||
subscribe_document_sync_state(&collab);
|
||||
Ok(document)
|
||||
}
|
||||
|
||||
/// Creates and returns a new Document object with initial data.
|
||||
@ -34,24 +41,73 @@ impl Document {
|
||||
/// # Returns
|
||||
/// * `Result<Document, FlowyError>` - a Result containing either a new Document object or an Error if the document creation failed
|
||||
pub fn create_with_data(collab: Arc<MutexCollab>, data: DocumentData) -> FlowyResult<Self> {
|
||||
InnerDocument::create_with_data(collab, data)
|
||||
.map(|inner| Self(Arc::new(Mutex::new(inner))))
|
||||
.map_err(|err| err.into())
|
||||
let document =
|
||||
Document::create_with_data(collab, data).map(|inner| Self(Arc::new(Mutex::new(inner))))?;
|
||||
Ok(document)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl Sync for Document {}
|
||||
unsafe impl Send for Document {}
|
||||
fn subscribe_document_changed(doc_id: &str, document: &MutexDocument) {
|
||||
let doc_id = doc_id.to_string();
|
||||
document
|
||||
.lock()
|
||||
.subscribe_block_changed(move |events, is_remote| {
|
||||
tracing::trace!(
|
||||
"document changed: {:?}, from remote: {}",
|
||||
&events,
|
||||
is_remote
|
||||
);
|
||||
// send notification to the client.
|
||||
send_notification(&doc_id, DocumentNotification::DidReceiveUpdate)
|
||||
.payload::<DocEventPB>((events, is_remote).into())
|
||||
.send();
|
||||
});
|
||||
}
|
||||
|
||||
impl Deref for Document {
|
||||
type Target = Arc<Mutex<InnerDocument>>;
|
||||
fn subscribe_document_snapshot_state(collab: &Arc<MutexCollab>) {
|
||||
let document_id = collab.lock().object_id.clone();
|
||||
let mut snapshot_state = WatchStream::new(collab.lock().subscribe_snapshot_state());
|
||||
tokio::spawn(async move {
|
||||
while let Some(snapshot_state) = snapshot_state.next().await {
|
||||
if let Some(new_snapshot_id) = snapshot_state.snapshot_id() {
|
||||
tracing::debug!("Did create document snapshot: {}", new_snapshot_id);
|
||||
send_notification(
|
||||
&document_id,
|
||||
DocumentNotification::DidUpdateDocumentSnapshotState,
|
||||
)
|
||||
.payload(DocumentSnapshotStatePB { new_snapshot_id })
|
||||
.send();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn subscribe_document_sync_state(collab: &Arc<MutexCollab>) {
|
||||
let document_id = collab.lock().object_id.clone();
|
||||
let mut sync_state_stream = WatchStream::new(collab.lock().subscribe_sync_state());
|
||||
tokio::spawn(async move {
|
||||
while let Some(sync_state) = sync_state_stream.next().await {
|
||||
send_notification(
|
||||
&document_id,
|
||||
DocumentNotification::DidUpdateDocumentSyncState,
|
||||
)
|
||||
.payload(DocumentSyncStatePB::from(sync_state))
|
||||
.send();
|
||||
}
|
||||
});
|
||||
}
|
||||
unsafe impl Sync for MutexDocument {}
|
||||
unsafe impl Send for MutexDocument {}
|
||||
|
||||
impl Deref for MutexDocument {
|
||||
type Target = Arc<Mutex<Document>>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for Document {
|
||||
impl DerefMut for MutexDocument {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
|
@ -1,2 +0,0 @@
|
||||
pub const PAGE: &str = "page";
|
||||
pub const PARAGRAPH_BLOCK_TYPE: &str = "paragraph";
|
@ -3,10 +3,10 @@ use std::{collections::HashMap, vec};
|
||||
use collab_document::blocks::{Block, DocumentData, DocumentMeta};
|
||||
use nanoid::nanoid;
|
||||
|
||||
use crate::{
|
||||
document_block_keys::{PAGE, PARAGRAPH_BLOCK_TYPE},
|
||||
entities::{BlockPB, ChildrenPB, DocumentDataPB, MetaPB},
|
||||
};
|
||||
use crate::entities::{BlockPB, ChildrenPB, DocumentDataPB, MetaPB};
|
||||
|
||||
pub const PAGE: &str = "page";
|
||||
pub const PARAGRAPH_BLOCK_TYPE: &str = "paragraph";
|
||||
|
||||
impl From<DocumentData> for DocumentDataPB {
|
||||
fn from(data: DocumentData) -> Self {
|
||||
|
@ -1,3 +1,4 @@
|
||||
use collab::core::collab_state::SyncState;
|
||||
use collab_document::blocks::{BlockAction, DocumentData};
|
||||
use std::collections::HashMap;
|
||||
|
||||
@ -336,3 +337,48 @@ impl TryInto<ConvertDataParams> for ConvertDataPayloadPB {
|
||||
Ok(ConvertDataParams { convert_type, data })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, ProtoBuf)]
|
||||
pub struct RepeatedDocumentSnapshotPB {
|
||||
#[pb(index = 1)]
|
||||
pub items: Vec<DocumentSnapshotPB>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, ProtoBuf)]
|
||||
pub struct DocumentSnapshotPB {
|
||||
#[pb(index = 1)]
|
||||
pub snapshot_id: i64,
|
||||
|
||||
#[pb(index = 2)]
|
||||
pub snapshot_desc: String,
|
||||
|
||||
#[pb(index = 3)]
|
||||
pub created_at: i64,
|
||||
|
||||
#[pb(index = 4)]
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, ProtoBuf)]
|
||||
pub struct DocumentSnapshotStatePB {
|
||||
#[pb(index = 1)]
|
||||
pub new_snapshot_id: i64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, ProtoBuf)]
|
||||
pub struct DocumentSyncStatePB {
|
||||
#[pb(index = 1)]
|
||||
pub is_syncing: bool,
|
||||
|
||||
#[pb(index = 2)]
|
||||
pub is_finish: bool,
|
||||
}
|
||||
|
||||
impl From<SyncState> for DocumentSyncStatePB {
|
||||
fn from(value: SyncState) -> Self {
|
||||
Self {
|
||||
is_syncing: value.is_syncing(),
|
||||
is_finish: value.is_sync_finished(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -14,20 +14,8 @@ use collab_document::blocks::{
|
||||
use flowy_error::{FlowyError, FlowyResult};
|
||||
use lib_dispatch::prelude::{data_result_ok, AFPluginData, AFPluginState, DataResult};
|
||||
|
||||
use crate::entities::{
|
||||
ApplyActionParams, CloseDocumentParams, ConvertDataParams, CreateDocumentParams,
|
||||
DocumentRedoUndoParams, OpenDocumentParams,
|
||||
};
|
||||
use crate::{
|
||||
entities::{
|
||||
ApplyActionPayloadPB, BlockActionPB, BlockActionPayloadPB, BlockActionTypePB, BlockEventPB,
|
||||
BlockEventPayloadPB, BlockPB, CloseDocumentPayloadPB, ConvertDataPayloadPB, ConvertType,
|
||||
CreateDocumentPayloadPB, DeltaTypePB, DocEventPB, DocumentDataPB, DocumentRedoUndoPayloadPB,
|
||||
DocumentRedoUndoResponsePB, OpenDocumentPayloadPB,
|
||||
},
|
||||
manager::DocumentManager,
|
||||
parser::json::parser::JsonToDocumentParser,
|
||||
};
|
||||
use crate::entities::*;
|
||||
use crate::{manager::DocumentManager, parser::json::parser::JsonToDocumentParser};
|
||||
|
||||
// Handler for creating a new document
|
||||
pub(crate) async fn create_document_handler(
|
||||
@ -46,8 +34,8 @@ pub(crate) async fn open_document_handler(
|
||||
) -> DataResult<DocumentDataPB, FlowyError> {
|
||||
let params: OpenDocumentParams = data.into_inner().try_into()?;
|
||||
let doc_id = params.document_id;
|
||||
let document = manager.get_or_open_document(&doc_id)?;
|
||||
let document_data = document.lock().get_document()?;
|
||||
let document = manager.get_document(&doc_id)?;
|
||||
let document_data = document.lock().get_document_data()?;
|
||||
data_result_ok(DocumentDataPB::from(document_data))
|
||||
}
|
||||
|
||||
@ -69,8 +57,7 @@ pub(crate) async fn get_document_data_handler(
|
||||
) -> DataResult<DocumentDataPB, FlowyError> {
|
||||
let params: OpenDocumentParams = data.into_inner().try_into()?;
|
||||
let doc_id = params.document_id;
|
||||
let document = manager.get_document_from_disk(&doc_id)?;
|
||||
let document_data = document.lock().get_document()?;
|
||||
let document_data = manager.get_document_data(&doc_id)?;
|
||||
data_result_ok(DocumentDataPB::from(document_data))
|
||||
}
|
||||
|
||||
@ -81,7 +68,7 @@ pub(crate) async fn apply_action_handler(
|
||||
) -> FlowyResult<()> {
|
||||
let params: ApplyActionParams = data.into_inner().try_into()?;
|
||||
let doc_id = params.document_id;
|
||||
let document = manager.get_or_open_document(&doc_id)?;
|
||||
let document = manager.get_document(&doc_id)?;
|
||||
let actions = params.actions;
|
||||
document.lock().apply_action(actions);
|
||||
Ok(())
|
||||
@ -117,7 +104,7 @@ pub(crate) async fn redo_handler(
|
||||
) -> DataResult<DocumentRedoUndoResponsePB, FlowyError> {
|
||||
let params: DocumentRedoUndoParams = data.into_inner().try_into()?;
|
||||
let doc_id = params.document_id;
|
||||
let document = manager.get_or_open_document(&doc_id)?;
|
||||
let document = manager.get_document(&doc_id)?;
|
||||
let document = document.lock();
|
||||
let redo = document.redo();
|
||||
let can_redo = document.can_redo();
|
||||
@ -135,7 +122,7 @@ pub(crate) async fn undo_handler(
|
||||
) -> DataResult<DocumentRedoUndoResponsePB, FlowyError> {
|
||||
let params: DocumentRedoUndoParams = data.into_inner().try_into()?;
|
||||
let doc_id = params.document_id;
|
||||
let document = manager.get_or_open_document(&doc_id)?;
|
||||
let document = manager.get_document(&doc_id)?;
|
||||
let document = document.lock();
|
||||
let undo = document.undo();
|
||||
let can_redo = document.can_redo();
|
||||
@ -153,7 +140,7 @@ pub(crate) async fn can_undo_redo_handler(
|
||||
) -> DataResult<DocumentRedoUndoResponsePB, FlowyError> {
|
||||
let params: DocumentRedoUndoParams = data.into_inner().try_into()?;
|
||||
let doc_id = params.document_id;
|
||||
let document = manager.get_or_open_document(&doc_id)?;
|
||||
let document = manager.get_document(&doc_id)?;
|
||||
let document = document.lock();
|
||||
let can_redo = document.can_redo();
|
||||
let can_undo = document.can_undo();
|
||||
@ -165,6 +152,16 @@ pub(crate) async fn can_undo_redo_handler(
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn get_snapshot_handler(
|
||||
data: AFPluginData<OpenDocumentPayloadPB>,
|
||||
manager: AFPluginState<Arc<DocumentManager>>,
|
||||
) -> DataResult<RepeatedDocumentSnapshotPB, FlowyError> {
|
||||
let params: OpenDocumentParams = data.into_inner().try_into()?;
|
||||
let doc_id = params.document_id;
|
||||
let snapshots = manager.get_document_snapshots(&doc_id).await?;
|
||||
data_result_ok(RepeatedDocumentSnapshotPB { items: snapshots })
|
||||
}
|
||||
|
||||
impl From<BlockActionPB> for BlockAction {
|
||||
fn from(pb: BlockActionPB) -> Self {
|
||||
Self {
|
||||
|
@ -4,34 +4,26 @@ use strum_macros::Display;
|
||||
use flowy_derive::{Flowy_Event, ProtoBuf_Enum};
|
||||
use lib_dispatch::prelude::AFPlugin;
|
||||
|
||||
use crate::{
|
||||
event_handler::{
|
||||
apply_action_handler, can_undo_redo_handler, close_document_handler, convert_data_to_document,
|
||||
create_document_handler, get_document_data_handler, open_document_handler, redo_handler,
|
||||
undo_handler,
|
||||
},
|
||||
manager::DocumentManager,
|
||||
};
|
||||
use crate::event_handler::get_snapshot_handler;
|
||||
use crate::{event_handler::*, manager::DocumentManager};
|
||||
|
||||
pub fn init(document_manager: Arc<DocumentManager>) -> AFPlugin {
|
||||
let mut plugin = AFPlugin::new()
|
||||
AFPlugin::new()
|
||||
.name(env!("CARGO_PKG_NAME"))
|
||||
.state(document_manager);
|
||||
|
||||
plugin = plugin.event(DocumentEvent::CreateDocument, create_document_handler);
|
||||
plugin = plugin.event(DocumentEvent::OpenDocument, open_document_handler);
|
||||
plugin = plugin.event(DocumentEvent::CloseDocument, close_document_handler);
|
||||
plugin = plugin.event(DocumentEvent::ApplyAction, apply_action_handler);
|
||||
plugin = plugin.event(DocumentEvent::GetDocumentData, get_document_data_handler);
|
||||
plugin = plugin.event(
|
||||
DocumentEvent::ConvertDataToDocument,
|
||||
convert_data_to_document,
|
||||
);
|
||||
plugin = plugin.event(DocumentEvent::Redo, redo_handler);
|
||||
plugin = plugin.event(DocumentEvent::Undo, undo_handler);
|
||||
plugin = plugin.event(DocumentEvent::CanUndoRedo, can_undo_redo_handler);
|
||||
|
||||
plugin
|
||||
.state(document_manager)
|
||||
.event(DocumentEvent::CreateDocument, create_document_handler)
|
||||
.event(DocumentEvent::OpenDocument, open_document_handler)
|
||||
.event(DocumentEvent::CloseDocument, close_document_handler)
|
||||
.event(DocumentEvent::ApplyAction, apply_action_handler)
|
||||
.event(DocumentEvent::GetDocumentData, get_document_data_handler)
|
||||
.event(
|
||||
DocumentEvent::ConvertDataToDocument,
|
||||
convert_data_to_document,
|
||||
)
|
||||
.event(DocumentEvent::Redo, redo_handler)
|
||||
.event(DocumentEvent::Undo, undo_handler)
|
||||
.event(DocumentEvent::CanUndoRedo, can_undo_redo_handler)
|
||||
.event(DocumentEvent::GetDocumentSnapshots, get_snapshot_handler)
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash, Display, ProtoBuf_Enum, Flowy_Event)]
|
||||
@ -49,7 +41,7 @@ pub enum DocumentEvent {
|
||||
#[event(input = "ApplyActionPayloadPB")]
|
||||
ApplyAction = 3,
|
||||
|
||||
#[event(input = "OpenDocumentPayloadPB")]
|
||||
#[event(input = "OpenDocumentPayloadPB", output = "DocumentDataPB")]
|
||||
GetDocumentData = 4,
|
||||
|
||||
#[event(input = "ConvertDataPayloadPB", output = "DocumentDataPB")]
|
||||
@ -72,4 +64,7 @@ pub enum DocumentEvent {
|
||||
output = "DocumentRedoUndoResponsePB"
|
||||
)]
|
||||
CanUndoRedo = 8,
|
||||
|
||||
#[event(input = "OpenDocumentPayloadPB", output = "RepeatedDocumentSnapshotPB")]
|
||||
GetDocumentSnapshots = 9,
|
||||
}
|
||||
|
@ -1,5 +1,4 @@
|
||||
pub mod document;
|
||||
pub mod document_block_keys;
|
||||
pub mod document_data;
|
||||
pub mod entities;
|
||||
pub mod event_handler;
|
||||
@ -8,5 +7,6 @@ pub mod manager;
|
||||
pub mod parser;
|
||||
pub mod protobuf;
|
||||
|
||||
pub mod deps;
|
||||
mod notification;
|
||||
mod parse;
|
||||
|
@ -1,39 +1,37 @@
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
use appflowy_integrate::collab_builder::AppFlowyCollabBuilder;
|
||||
use appflowy_integrate::RocksCollabDB;
|
||||
use collab::core::collab::MutexCollab;
|
||||
use collab_document::blocks::DocumentData;
|
||||
use collab_document::error::DocumentError;
|
||||
use collab_document::document::Document;
|
||||
use collab_document::YrsDocAction;
|
||||
use parking_lot::RwLock;
|
||||
|
||||
use flowy_error::{FlowyError, FlowyResult};
|
||||
use flowy_error::{internal_error, FlowyError, FlowyResult};
|
||||
|
||||
use crate::{
|
||||
document::Document,
|
||||
document_data::default_document_data,
|
||||
entities::DocEventPB,
|
||||
notification::{send_notification, DocumentNotification},
|
||||
};
|
||||
|
||||
pub trait DocumentUser: Send + Sync {
|
||||
fn user_id(&self) -> Result<i64, FlowyError>;
|
||||
fn token(&self) -> Result<Option<String>, FlowyError>; // unused now.
|
||||
fn collab_db(&self) -> Result<Arc<RocksCollabDB>, FlowyError>;
|
||||
}
|
||||
use crate::deps::{DocumentCloudService, DocumentUser};
|
||||
use crate::entities::DocumentSnapshotPB;
|
||||
use crate::{document::MutexDocument, document_data::default_document_data};
|
||||
|
||||
pub struct DocumentManager {
|
||||
user: Arc<dyn DocumentUser>,
|
||||
collab_builder: Arc<AppFlowyCollabBuilder>,
|
||||
documents: Arc<RwLock<HashMap<String, Arc<Document>>>>,
|
||||
documents: Arc<RwLock<HashMap<String, Arc<MutexDocument>>>>,
|
||||
#[allow(dead_code)]
|
||||
cloud_service: Arc<dyn DocumentCloudService>,
|
||||
}
|
||||
|
||||
impl DocumentManager {
|
||||
pub fn new(user: Arc<dyn DocumentUser>, collab_builder: Arc<AppFlowyCollabBuilder>) -> Self {
|
||||
pub fn new(
|
||||
user: Arc<dyn DocumentUser>,
|
||||
collab_builder: Arc<AppFlowyCollabBuilder>,
|
||||
cloud_service: Arc<dyn DocumentCloudService>,
|
||||
) -> Self {
|
||||
Self {
|
||||
user,
|
||||
collab_builder,
|
||||
documents: Default::default(),
|
||||
cloud_service,
|
||||
}
|
||||
}
|
||||
|
||||
@ -45,67 +43,52 @@ impl DocumentManager {
|
||||
&self,
|
||||
doc_id: &str,
|
||||
data: Option<DocumentData>,
|
||||
) -> FlowyResult<Arc<Document>> {
|
||||
tracing::debug!("create a document: {:?}", doc_id);
|
||||
let uid = self.user.user_id()?;
|
||||
let db = self.user.collab_db()?;
|
||||
let collab = self.collab_builder.build(uid, doc_id, "document", db);
|
||||
) -> FlowyResult<Arc<MutexDocument>> {
|
||||
tracing::trace!("create a document: {:?}", doc_id);
|
||||
let collab = self.collab_for_document(doc_id)?;
|
||||
let data = data.unwrap_or_else(default_document_data);
|
||||
let document = Arc::new(Document::create_with_data(collab, data)?);
|
||||
let document = Arc::new(MutexDocument::create_with_data(collab, data)?);
|
||||
Ok(document)
|
||||
}
|
||||
|
||||
/// get document
|
||||
/// read the existing document from the map if it exists, otherwise read it from the disk and write it to the map.
|
||||
pub fn get_or_open_document(&self, doc_id: &str) -> FlowyResult<Arc<Document>> {
|
||||
/// Return the document
|
||||
pub fn get_document(&self, doc_id: &str) -> FlowyResult<Arc<MutexDocument>> {
|
||||
if let Some(doc) = self.documents.read().get(doc_id) {
|
||||
return Ok(doc.clone());
|
||||
}
|
||||
// Check if the document exists. If not, return error.
|
||||
if !self.is_doc_exist(doc_id)? {
|
||||
return Err(
|
||||
FlowyError::record_not_found().context(format!("document: {} is not exist", doc_id)),
|
||||
);
|
||||
}
|
||||
|
||||
tracing::debug!("open_document: {:?}", doc_id);
|
||||
// read the existing document from the disk.
|
||||
let document = self.get_document_from_disk(doc_id)?;
|
||||
let uid = self.user.user_id()?;
|
||||
let db = self.user.collab_db()?;
|
||||
let collab = self.collab_builder.build(uid, doc_id, "document", db);
|
||||
let document = Arc::new(MutexDocument::open(doc_id, collab)?);
|
||||
|
||||
// save the document to the memory and read it from the memory if we open the same document again.
|
||||
// and we don't want to subscribe to the document changes if we open the same document again.
|
||||
self
|
||||
.documents
|
||||
.write()
|
||||
.insert(doc_id.to_string(), document.clone());
|
||||
|
||||
// subscribe to the document changes.
|
||||
self.subscribe_document_changes(document.clone(), doc_id)?;
|
||||
|
||||
Ok(document)
|
||||
}
|
||||
|
||||
pub fn subscribe_document_changes(
|
||||
&self,
|
||||
document: Arc<Document>,
|
||||
doc_id: &str,
|
||||
) -> Result<DocumentData, DocumentError> {
|
||||
let mut document = document.lock();
|
||||
let doc_id = doc_id.to_string();
|
||||
document.open(move |events, is_remote| {
|
||||
tracing::trace!(
|
||||
"document changed: {:?}, from remote: {}",
|
||||
&events,
|
||||
is_remote
|
||||
pub fn get_document_data(&self, doc_id: &str) -> FlowyResult<DocumentData> {
|
||||
if !self.is_doc_exist(doc_id)? {
|
||||
return Err(
|
||||
FlowyError::record_not_found().context(format!("document: {} is not exist", doc_id)),
|
||||
);
|
||||
// send notification to the client.
|
||||
send_notification(&doc_id, DocumentNotification::DidReceiveUpdate)
|
||||
.payload::<DocEventPB>((events, is_remote).into())
|
||||
.send();
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// get document
|
||||
/// read the existing document from the disk.
|
||||
pub fn get_document_from_disk(&self, doc_id: &str) -> FlowyResult<Arc<Document>> {
|
||||
let uid = self.user.user_id()?;
|
||||
let db = self.user.collab_db()?;
|
||||
let collab = self.collab_builder.build(uid, doc_id, "document", db);
|
||||
// read the existing document from the disk.
|
||||
let document = Arc::new(Document::new(collab)?);
|
||||
Ok(document)
|
||||
let collab = self.collab_for_document(doc_id)?;
|
||||
Document::open(collab)?
|
||||
.get_document_data()
|
||||
.map_err(internal_error)
|
||||
}
|
||||
|
||||
pub fn close_document(&self, doc_id: &str) -> FlowyResult<()> {
|
||||
@ -123,4 +106,46 @@ impl DocumentManager {
|
||||
self.documents.write().remove(doc_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Return the list of snapshots of the document.
|
||||
pub async fn get_document_snapshots(
|
||||
&self,
|
||||
document_id: &str,
|
||||
) -> FlowyResult<Vec<DocumentSnapshotPB>> {
|
||||
let mut snapshots = vec![];
|
||||
if let Some(snapshot) = self
|
||||
.cloud_service
|
||||
.get_document_latest_snapshot(document_id)
|
||||
.await?
|
||||
.map(|snapshot| DocumentSnapshotPB {
|
||||
snapshot_id: snapshot.snapshot_id,
|
||||
snapshot_desc: "".to_string(),
|
||||
created_at: snapshot.created_at,
|
||||
data: snapshot.data,
|
||||
})
|
||||
{
|
||||
snapshots.push(snapshot);
|
||||
}
|
||||
|
||||
Ok(snapshots)
|
||||
}
|
||||
|
||||
fn collab_for_document(&self, doc_id: &str) -> FlowyResult<Arc<MutexCollab>> {
|
||||
let uid = self.user.user_id()?;
|
||||
let db = self.user.collab_db()?;
|
||||
Ok(self.collab_builder.build(uid, doc_id, "document", db))
|
||||
}
|
||||
|
||||
fn is_doc_exist(&self, doc_id: &str) -> FlowyResult<bool> {
|
||||
let uid = self.user.user_id()?;
|
||||
let db = self.user.collab_db()?;
|
||||
let read_txn = db.read_txn();
|
||||
Ok(read_txn.is_exist(uid, doc_id))
|
||||
}
|
||||
|
||||
/// Only expose this method for testing
|
||||
#[cfg(debug_assertions)]
|
||||
pub fn get_cloud_service(&self) -> &Arc<dyn DocumentCloudService> {
|
||||
&self.cloud_service
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
use flowy_derive::ProtoBuf_Enum;
|
||||
use flowy_notification::NotificationBuilder;
|
||||
|
||||
const OBSERVABLE_CATEGORY: &str = "Document";
|
||||
const DOCUMENT_OBSERVABLE_SOURCE: &str = "Document";
|
||||
|
||||
#[derive(ProtoBuf_Enum, Debug, Default)]
|
||||
pub(crate) enum DocumentNotification {
|
||||
@ -9,6 +9,8 @@ pub(crate) enum DocumentNotification {
|
||||
Unknown = 0,
|
||||
|
||||
DidReceiveUpdate = 1,
|
||||
DidUpdateDocumentSnapshotState = 2,
|
||||
DidUpdateDocumentSyncState = 3,
|
||||
}
|
||||
|
||||
impl std::convert::From<DocumentNotification> for i32 {
|
||||
@ -16,7 +18,17 @@ impl std::convert::From<DocumentNotification> for i32 {
|
||||
notification as i32
|
||||
}
|
||||
}
|
||||
impl std::convert::From<i32> for DocumentNotification {
|
||||
fn from(notification: i32) -> Self {
|
||||
match notification {
|
||||
1 => DocumentNotification::DidReceiveUpdate,
|
||||
2 => DocumentNotification::DidUpdateDocumentSnapshotState,
|
||||
3 => DocumentNotification::DidUpdateDocumentSyncState,
|
||||
_ => DocumentNotification::Unknown,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn send_notification(id: &str, ty: DocumentNotification) -> NotificationBuilder {
|
||||
NotificationBuilder::new(id, ty, OBSERVABLE_CATEGORY)
|
||||
NotificationBuilder::new(id, ty, DOCUMENT_OBSERVABLE_SOURCE)
|
||||
}
|
||||
|
@ -1,12 +1,14 @@
|
||||
use std::{collections::HashMap, vec};
|
||||
|
||||
use collab_document::blocks::{Block, BlockAction, BlockActionPayload, BlockActionType};
|
||||
|
||||
use flowy_document2::document_data::PARAGRAPH_BLOCK_TYPE;
|
||||
|
||||
use crate::document::util;
|
||||
use crate::document::util::gen_id;
|
||||
use collab_document::blocks::{Block, BlockAction, BlockActionPayload, BlockActionType};
|
||||
use flowy_document2::document_block_keys::PARAGRAPH_BLOCK_TYPE;
|
||||
|
||||
#[test]
|
||||
fn document_apply_insert_block_with_empty_parent_id() {
|
||||
#[tokio::test]
|
||||
async fn document_apply_insert_block_with_empty_parent_id() {
|
||||
let (_, document, page_id) = util::create_and_open_empty_document();
|
||||
|
||||
// create a text block with no parent
|
||||
|
@ -1,27 +1,23 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use collab_document::blocks::{Block, BlockAction, BlockActionPayload, BlockActionType};
|
||||
|
||||
use flowy_document2::document_block_keys::PARAGRAPH_BLOCK_TYPE;
|
||||
use flowy_document2::document_data::default_document_data;
|
||||
use flowy_document2::manager::DocumentManager;
|
||||
use flowy_document2::document_data::{default_document_data, PARAGRAPH_BLOCK_TYPE};
|
||||
|
||||
use crate::document::util::{default_collab_builder, gen_document_id, gen_id, FakeUser};
|
||||
use crate::document::util::{gen_document_id, gen_id, DocumentTest};
|
||||
|
||||
#[tokio::test]
|
||||
async fn undo_redo_test() {
|
||||
let user = FakeUser::new();
|
||||
let manager = DocumentManager::new(Arc::new(user), default_collab_builder());
|
||||
let test = DocumentTest::new();
|
||||
|
||||
let doc_id: String = gen_document_id();
|
||||
let data = default_document_data();
|
||||
|
||||
// create a document
|
||||
_ = manager.create_document(&doc_id, Some(data.clone()));
|
||||
_ = test.create_document(&doc_id, Some(data.clone()));
|
||||
|
||||
// open a document
|
||||
let document = manager.get_or_open_document(&doc_id).unwrap();
|
||||
let document = test.get_document(&doc_id).unwrap();
|
||||
let document = document.lock();
|
||||
let page_block = document.get_block(&data.page_id).unwrap();
|
||||
let page_id = page_block.id;
|
||||
|
@ -1,69 +1,60 @@
|
||||
use std::{collections::HashMap, sync::Arc, vec};
|
||||
use std::{collections::HashMap, vec};
|
||||
|
||||
use collab_document::blocks::{Block, BlockAction, BlockActionPayload, BlockActionType};
|
||||
use serde_json::{json, to_value, Value};
|
||||
|
||||
use flowy_document2::document_block_keys::PARAGRAPH_BLOCK_TYPE;
|
||||
use flowy_document2::document_data::default_document_data;
|
||||
use flowy_document2::manager::DocumentManager;
|
||||
use flowy_document2::document_data::{default_document_data, PARAGRAPH_BLOCK_TYPE};
|
||||
|
||||
use crate::document::util::{default_collab_builder, gen_document_id, gen_id};
|
||||
use crate::document::util::{gen_document_id, gen_id, DocumentTest};
|
||||
|
||||
use super::util::FakeUser;
|
||||
|
||||
#[test]
|
||||
fn restore_document() {
|
||||
let user = FakeUser::new();
|
||||
let manager = DocumentManager::new(Arc::new(user), default_collab_builder());
|
||||
#[tokio::test]
|
||||
async fn restore_document() {
|
||||
let test = DocumentTest::new();
|
||||
|
||||
// create a document
|
||||
let doc_id: String = gen_document_id();
|
||||
let data = default_document_data();
|
||||
let document_a = manager
|
||||
.create_document(&doc_id, Some(data.clone()))
|
||||
.unwrap();
|
||||
let data_a = document_a.lock().get_document().unwrap();
|
||||
let document_a = test.create_document(&doc_id, Some(data.clone())).unwrap();
|
||||
let data_a = document_a.lock().get_document_data().unwrap();
|
||||
assert_eq!(data_a, data);
|
||||
|
||||
// open a document
|
||||
let data_b = manager
|
||||
.get_or_open_document(&doc_id)
|
||||
let data_b = test
|
||||
.get_document(&doc_id)
|
||||
.unwrap()
|
||||
.lock()
|
||||
.get_document()
|
||||
.get_document_data()
|
||||
.unwrap();
|
||||
// close a document
|
||||
_ = manager.close_document(&doc_id);
|
||||
_ = test.close_document(&doc_id);
|
||||
assert_eq!(data_b, data);
|
||||
|
||||
// restore
|
||||
_ = manager.create_document(&doc_id, Some(data.clone()));
|
||||
_ = test.create_document(&doc_id, Some(data.clone()));
|
||||
// open a document
|
||||
let data_b = manager
|
||||
.get_or_open_document(&doc_id)
|
||||
let data_b = test
|
||||
.get_document(&doc_id)
|
||||
.unwrap()
|
||||
.lock()
|
||||
.get_document()
|
||||
.get_document_data()
|
||||
.unwrap();
|
||||
// close a document
|
||||
_ = manager.close_document(&doc_id);
|
||||
_ = test.close_document(&doc_id);
|
||||
|
||||
assert_eq!(data_b, data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn document_apply_insert_action() {
|
||||
let user = FakeUser::new();
|
||||
let manager = DocumentManager::new(Arc::new(user), default_collab_builder());
|
||||
|
||||
#[tokio::test]
|
||||
async fn document_apply_insert_action() {
|
||||
let test = DocumentTest::new();
|
||||
let doc_id: String = gen_document_id();
|
||||
let data = default_document_data();
|
||||
|
||||
// create a document
|
||||
_ = manager.create_document(&doc_id, Some(data.clone()));
|
||||
_ = test.create_document(&doc_id, Some(data.clone()));
|
||||
|
||||
// open a document
|
||||
let document = manager.get_or_open_document(&doc_id).unwrap();
|
||||
let document = test.get_document(&doc_id).unwrap();
|
||||
let page_block = document.lock().get_block(&data.page_id).unwrap();
|
||||
|
||||
// insert a text block
|
||||
@ -85,36 +76,34 @@ fn document_apply_insert_action() {
|
||||
},
|
||||
};
|
||||
document.lock().apply_action(vec![insert_text_action]);
|
||||
let data_a = document.lock().get_document().unwrap();
|
||||
let data_a = document.lock().get_document_data().unwrap();
|
||||
// close the original document
|
||||
_ = manager.close_document(&doc_id);
|
||||
_ = test.close_document(&doc_id);
|
||||
|
||||
// re-open the document
|
||||
let data_b = manager
|
||||
.get_or_open_document(&doc_id)
|
||||
let data_b = test
|
||||
.get_document(&doc_id)
|
||||
.unwrap()
|
||||
.lock()
|
||||
.get_document()
|
||||
.get_document_data()
|
||||
.unwrap();
|
||||
// close a document
|
||||
_ = manager.close_document(&doc_id);
|
||||
_ = test.close_document(&doc_id);
|
||||
|
||||
assert_eq!(data_b, data_a);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn document_apply_update_page_action() {
|
||||
let user = FakeUser::new();
|
||||
let manager = DocumentManager::new(Arc::new(user), default_collab_builder());
|
||||
|
||||
#[tokio::test]
|
||||
async fn document_apply_update_page_action() {
|
||||
let test = DocumentTest::new();
|
||||
let doc_id: String = gen_document_id();
|
||||
let data = default_document_data();
|
||||
|
||||
// create a document
|
||||
_ = manager.create_document(&doc_id, Some(data.clone()));
|
||||
_ = test.create_document(&doc_id, Some(data.clone()));
|
||||
|
||||
// open a document
|
||||
let document = manager.get_or_open_document(&doc_id).unwrap();
|
||||
let document = test.get_document(&doc_id).unwrap();
|
||||
let page_block = document.lock().get_block(&data.page_id).unwrap();
|
||||
|
||||
let mut page_block_clone = page_block;
|
||||
@ -135,28 +124,26 @@ fn document_apply_update_page_action() {
|
||||
tracing::trace!("{:?}", &actions);
|
||||
document.lock().apply_action(actions);
|
||||
let page_block_old = document.lock().get_block(&data.page_id).unwrap();
|
||||
_ = manager.close_document(&doc_id);
|
||||
_ = test.close_document(&doc_id);
|
||||
|
||||
// re-open the document
|
||||
let document = manager.get_or_open_document(&doc_id).unwrap();
|
||||
let document = test.get_document(&doc_id).unwrap();
|
||||
let page_block_new = document.lock().get_block(&data.page_id).unwrap();
|
||||
assert_eq!(page_block_old, page_block_new);
|
||||
assert!(page_block_new.data.contains_key("delta"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn document_apply_update_action() {
|
||||
let user = FakeUser::new();
|
||||
let manager = DocumentManager::new(Arc::new(user), default_collab_builder());
|
||||
|
||||
#[tokio::test]
|
||||
async fn document_apply_update_action() {
|
||||
let test = DocumentTest::new();
|
||||
let doc_id: String = gen_document_id();
|
||||
let data = default_document_data();
|
||||
|
||||
// create a document
|
||||
_ = manager.create_document(&doc_id, Some(data.clone()));
|
||||
_ = test.create_document(&doc_id, Some(data.clone()));
|
||||
|
||||
// open a document
|
||||
let document = manager.get_or_open_document(&doc_id).unwrap();
|
||||
let document = test.get_document(&doc_id).unwrap();
|
||||
let page_block = document.lock().get_block(&data.page_id).unwrap();
|
||||
|
||||
// insert a text block
|
||||
@ -203,12 +190,12 @@ fn document_apply_update_action() {
|
||||
};
|
||||
document.lock().apply_action(vec![update_text_action]);
|
||||
// close the original document
|
||||
_ = manager.close_document(&doc_id);
|
||||
_ = test.close_document(&doc_id);
|
||||
|
||||
// re-open the document
|
||||
let document = manager.get_or_open_document(&doc_id).unwrap();
|
||||
let document = test.get_document(&doc_id).unwrap();
|
||||
let block = document.lock().get_block(&text_block_id).unwrap();
|
||||
assert_eq!(block.data, updated_text_block_data);
|
||||
// close a document
|
||||
_ = manager.close_document(&doc_id);
|
||||
_ = test.close_document(&doc_id);
|
||||
}
|
||||
|
@ -1,16 +1,40 @@
|
||||
use appflowy_integrate::collab_builder::{AppFlowyCollabBuilder, CloudStorageType};
|
||||
|
||||
use std::ops::Deref;
|
||||
use std::sync::Arc;
|
||||
|
||||
use appflowy_integrate::collab_builder::{AppFlowyCollabBuilder, DefaultCollabStorageProvider};
|
||||
use appflowy_integrate::RocksCollabDB;
|
||||
use flowy_document2::document::Document;
|
||||
use nanoid::nanoid;
|
||||
use parking_lot::Once;
|
||||
use tempfile::TempDir;
|
||||
use tracing_subscriber::{fmt::Subscriber, util::SubscriberInitExt, EnvFilter};
|
||||
|
||||
use flowy_document2::deps::{DocumentCloudService, DocumentSnapshot, DocumentUser};
|
||||
use flowy_document2::document::MutexDocument;
|
||||
use flowy_document2::document_data::default_document_data;
|
||||
use flowy_document2::manager::{DocumentManager, DocumentUser};
|
||||
use nanoid::nanoid;
|
||||
use flowy_document2::manager::DocumentManager;
|
||||
use flowy_error::FlowyError;
|
||||
use lib_infra::future::FutureResult;
|
||||
|
||||
pub struct DocumentTest {
|
||||
inner: DocumentManager,
|
||||
}
|
||||
|
||||
impl DocumentTest {
|
||||
pub fn new() -> Self {
|
||||
let user = FakeUser::new();
|
||||
let cloud_service = Arc::new(LocalTestDocumentCloudServiceImpl());
|
||||
let manager = DocumentManager::new(Arc::new(user), default_collab_builder(), cloud_service);
|
||||
Self { inner: manager }
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for DocumentTest {
|
||||
type Target = DocumentManager;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.inner
|
||||
}
|
||||
}
|
||||
|
||||
pub struct FakeUser {
|
||||
kv: Arc<RocksCollabDB>,
|
||||
@ -53,25 +77,21 @@ pub fn db() -> Arc<RocksCollabDB> {
|
||||
}
|
||||
|
||||
pub fn default_collab_builder() -> Arc<AppFlowyCollabBuilder> {
|
||||
let builder = AppFlowyCollabBuilder::new(CloudStorageType::Local, None);
|
||||
let builder = AppFlowyCollabBuilder::new(DefaultCollabStorageProvider(), None);
|
||||
Arc::new(builder)
|
||||
}
|
||||
|
||||
pub fn create_and_open_empty_document() -> (DocumentManager, Arc<Document>, String) {
|
||||
let user = FakeUser::new();
|
||||
let manager = DocumentManager::new(Arc::new(user), default_collab_builder());
|
||||
|
||||
pub fn create_and_open_empty_document() -> (DocumentTest, Arc<MutexDocument>, String) {
|
||||
let test = DocumentTest::new();
|
||||
let doc_id: String = gen_document_id();
|
||||
let data = default_document_data();
|
||||
|
||||
// create a document
|
||||
_ = manager
|
||||
.create_document(&doc_id, Some(data.clone()))
|
||||
.unwrap();
|
||||
_ = test.create_document(&doc_id, Some(data.clone())).unwrap();
|
||||
|
||||
let document = manager.get_or_open_document(&doc_id).unwrap();
|
||||
let document = test.get_document(&doc_id).unwrap();
|
||||
|
||||
(manager, document, data.page_id)
|
||||
(test, document, data.page_id)
|
||||
}
|
||||
|
||||
pub fn gen_document_id() -> String {
|
||||
@ -82,3 +102,17 @@ pub fn gen_document_id() -> String {
|
||||
pub fn gen_id() -> String {
|
||||
nanoid!(10)
|
||||
}
|
||||
|
||||
pub struct LocalTestDocumentCloudServiceImpl();
|
||||
impl DocumentCloudService for LocalTestDocumentCloudServiceImpl {
|
||||
fn get_document_updates(&self, _document_id: &str) -> FutureResult<Vec<Vec<u8>>, FlowyError> {
|
||||
FutureResult::new(async move { Ok(vec![]) })
|
||||
}
|
||||
|
||||
fn get_document_latest_snapshot(
|
||||
&self,
|
||||
_document_id: &str,
|
||||
) -> FutureResult<Option<DocumentSnapshot>, FlowyError> {
|
||||
FutureResult::new(async move { Ok(None) })
|
||||
}
|
||||
}
|
||||
|
@ -149,8 +149,8 @@ pub enum ErrorCode {
|
||||
#[error("Invalid date time format")]
|
||||
InvalidDateTimeFormat = 47,
|
||||
|
||||
#[error("Invalid data")]
|
||||
InvalidData = 49,
|
||||
#[error("Invalid params")]
|
||||
InvalidParams = 49,
|
||||
|
||||
#[error("Serde")]
|
||||
Serde = 50,
|
||||
@ -208,6 +208,12 @@ pub enum ErrorCode {
|
||||
|
||||
#[error("Apply actions is empty")]
|
||||
ApplyActionsIsEmpty = 68,
|
||||
|
||||
#[error("Connect postgres database failed")]
|
||||
PgConnectError = 69,
|
||||
|
||||
#[error("Postgres database error")]
|
||||
PgDatabaseError = 70,
|
||||
}
|
||||
|
||||
impl ErrorCode {
|
||||
|
@ -79,7 +79,7 @@ impl FlowyError {
|
||||
static_flowy_error!(user_id, ErrorCode::UserIdInvalid);
|
||||
static_flowy_error!(user_not_exist, ErrorCode::UserNotExist);
|
||||
static_flowy_error!(text_too_long, ErrorCode::TextTooLong);
|
||||
static_flowy_error!(invalid_data, ErrorCode::InvalidData);
|
||||
static_flowy_error!(invalid_data, ErrorCode::InvalidParams);
|
||||
static_flowy_error!(out_of_bounds, ErrorCode::OutOfBounds);
|
||||
static_flowy_error!(serde, ErrorCode::Serde);
|
||||
static_flowy_error!(field_record_not_found, ErrorCode::FieldRecordNotFound);
|
||||
|
@ -31,7 +31,7 @@ tokio-stream = { version = "0.1.14", features = ["sync"] }
|
||||
|
||||
[dev-dependencies]
|
||||
flowy-folder2 = { path = "../flowy-folder2"}
|
||||
flowy-test = { path = "../flowy-test" }
|
||||
flowy-test = { path = "../flowy-test", default-features = false }
|
||||
|
||||
[build-dependencies]
|
||||
flowy-codegen = { path = "../../../shared-lib/flowy-codegen"}
|
||||
|
@ -14,4 +14,18 @@ pub trait FolderUser: Send + Sync {
|
||||
/// [FolderCloudService] represents the cloud service for folder.
|
||||
pub trait FolderCloudService: Send + Sync + 'static {
|
||||
fn create_workspace(&self, uid: i64, name: &str) -> FutureResult<Workspace, FlowyError>;
|
||||
|
||||
fn get_folder_latest_snapshot(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
) -> FutureResult<Option<FolderSnapshot>, FlowyError>;
|
||||
|
||||
fn get_folder_updates(&self, workspace_id: &str) -> FutureResult<Vec<Vec<u8>>, FlowyError>;
|
||||
}
|
||||
|
||||
pub struct FolderSnapshot {
|
||||
pub snapshot_id: i64,
|
||||
pub database_id: String,
|
||||
pub data: Vec<u8>,
|
||||
pub created_at: i64,
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ use crate::{
|
||||
entities::parser::workspace::{WorkspaceDesc, WorkspaceIdentify, WorkspaceName},
|
||||
entities::view::ViewPB,
|
||||
};
|
||||
use collab::core::collab_state::SyncState;
|
||||
use collab_folder::core::Workspace;
|
||||
use flowy_derive::ProtoBuf;
|
||||
use flowy_error::ErrorCode;
|
||||
@ -151,3 +152,48 @@ impl TryInto<UpdateWorkspaceParams> for UpdateWorkspacePayloadPB {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, ProtoBuf)]
|
||||
pub struct RepeatedFolderSnapshotPB {
|
||||
#[pb(index = 1)]
|
||||
pub items: Vec<FolderSnapshotPB>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, ProtoBuf)]
|
||||
pub struct FolderSnapshotPB {
|
||||
#[pb(index = 1)]
|
||||
pub snapshot_id: i64,
|
||||
|
||||
#[pb(index = 2)]
|
||||
pub snapshot_desc: String,
|
||||
|
||||
#[pb(index = 3)]
|
||||
pub created_at: i64,
|
||||
|
||||
#[pb(index = 4)]
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, ProtoBuf)]
|
||||
pub struct FolderSnapshotStatePB {
|
||||
#[pb(index = 1)]
|
||||
pub new_snapshot_id: i64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, ProtoBuf)]
|
||||
pub struct FolderSyncStatePB {
|
||||
#[pb(index = 1)]
|
||||
pub is_syncing: bool,
|
||||
|
||||
#[pb(index = 2)]
|
||||
pub is_finish: bool,
|
||||
}
|
||||
|
||||
impl From<SyncState> for FolderSyncStatePB {
|
||||
fn from(value: SyncState) -> Self {
|
||||
Self {
|
||||
is_syncing: value.is_syncing(),
|
||||
is_finish: value.is_sync_finished(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4,13 +4,13 @@ use flowy_error::FlowyError;
|
||||
use lib_dispatch::prelude::{data_result_ok, AFPluginData, AFPluginState, DataResult};
|
||||
|
||||
use crate::entities::*;
|
||||
use crate::manager::Folder2Manager;
|
||||
use crate::manager::FolderManager;
|
||||
use crate::share::ImportParams;
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(data, folder), err)]
|
||||
pub(crate) async fn create_workspace_handler(
|
||||
data: AFPluginData<CreateWorkspacePayloadPB>,
|
||||
folder: AFPluginState<Arc<Folder2Manager>>,
|
||||
folder: AFPluginState<Arc<FolderManager>>,
|
||||
) -> DataResult<WorkspacePB, FlowyError> {
|
||||
let params: CreateWorkspaceParams = data.into_inner().try_into()?;
|
||||
let workspace = folder.create_workspace(params).await?;
|
||||
@ -19,7 +19,7 @@ pub(crate) async fn create_workspace_handler(
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(folder), err)]
|
||||
pub(crate) async fn read_workspace_views_handler(
|
||||
folder: AFPluginState<Arc<Folder2Manager>>,
|
||||
folder: AFPluginState<Arc<FolderManager>>,
|
||||
) -> DataResult<RepeatedViewPB, FlowyError> {
|
||||
let child_views = folder.get_current_workspace_views().await?;
|
||||
let repeated_view: RepeatedViewPB = child_views.into();
|
||||
@ -29,7 +29,7 @@ pub(crate) async fn read_workspace_views_handler(
|
||||
#[tracing::instrument(level = "debug", skip(data, folder), err)]
|
||||
pub(crate) async fn open_workspace_handler(
|
||||
data: AFPluginData<WorkspaceIdPB>,
|
||||
folder: AFPluginState<Arc<Folder2Manager>>,
|
||||
folder: AFPluginState<Arc<FolderManager>>,
|
||||
) -> DataResult<WorkspacePB, FlowyError> {
|
||||
let params: WorkspaceIdPB = data.into_inner();
|
||||
match params.value {
|
||||
@ -50,7 +50,7 @@ pub(crate) async fn open_workspace_handler(
|
||||
#[tracing::instrument(level = "debug", skip(data, folder), err)]
|
||||
pub(crate) async fn read_workspaces_handler(
|
||||
data: AFPluginData<WorkspaceIdPB>,
|
||||
folder: AFPluginState<Arc<Folder2Manager>>,
|
||||
folder: AFPluginState<Arc<FolderManager>>,
|
||||
) -> DataResult<RepeatedWorkspacePB, FlowyError> {
|
||||
let params: WorkspaceIdPB = data.into_inner();
|
||||
let workspaces = match params.value {
|
||||
@ -67,7 +67,7 @@ pub(crate) async fn read_workspaces_handler(
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(folder), err)]
|
||||
pub async fn read_current_workspace_setting_handler(
|
||||
folder: AFPluginState<Arc<Folder2Manager>>,
|
||||
folder: AFPluginState<Arc<FolderManager>>,
|
||||
) -> DataResult<WorkspaceSettingPB, FlowyError> {
|
||||
let workspace = folder.get_current_workspace().await?;
|
||||
let latest_view: Option<ViewPB> = folder.get_current_view().await;
|
||||
@ -79,7 +79,7 @@ pub async fn read_current_workspace_setting_handler(
|
||||
|
||||
pub(crate) async fn create_view_handler(
|
||||
data: AFPluginData<CreateViewPayloadPB>,
|
||||
folder: AFPluginState<Arc<Folder2Manager>>,
|
||||
folder: AFPluginState<Arc<FolderManager>>,
|
||||
) -> DataResult<ViewPB, FlowyError> {
|
||||
let params: CreateViewParams = data.into_inner().try_into()?;
|
||||
let set_as_current = params.set_as_current;
|
||||
@ -92,7 +92,7 @@ pub(crate) async fn create_view_handler(
|
||||
|
||||
pub(crate) async fn create_orphan_view_handler(
|
||||
data: AFPluginData<CreateOrphanViewPayloadPB>,
|
||||
folder: AFPluginState<Arc<Folder2Manager>>,
|
||||
folder: AFPluginState<Arc<FolderManager>>,
|
||||
) -> DataResult<ViewPB, FlowyError> {
|
||||
let params: CreateViewParams = data.into_inner().try_into()?;
|
||||
let set_as_current = params.set_as_current;
|
||||
@ -105,7 +105,7 @@ pub(crate) async fn create_orphan_view_handler(
|
||||
|
||||
pub(crate) async fn read_view_handler(
|
||||
data: AFPluginData<ViewIdPB>,
|
||||
folder: AFPluginState<Arc<Folder2Manager>>,
|
||||
folder: AFPluginState<Arc<FolderManager>>,
|
||||
) -> DataResult<ViewPB, FlowyError> {
|
||||
let view_id: ViewIdPB = data.into_inner();
|
||||
let view_pb = folder.get_view(&view_id.value).await?;
|
||||
@ -115,7 +115,7 @@ pub(crate) async fn read_view_handler(
|
||||
#[tracing::instrument(level = "debug", skip(data, folder), err)]
|
||||
pub(crate) async fn update_view_handler(
|
||||
data: AFPluginData<UpdateViewPayloadPB>,
|
||||
folder: AFPluginState<Arc<Folder2Manager>>,
|
||||
folder: AFPluginState<Arc<FolderManager>>,
|
||||
) -> Result<(), FlowyError> {
|
||||
let params: UpdateViewParams = data.into_inner().try_into()?;
|
||||
folder.update_view_with_params(params).await?;
|
||||
@ -124,7 +124,7 @@ pub(crate) async fn update_view_handler(
|
||||
|
||||
pub(crate) async fn delete_view_handler(
|
||||
data: AFPluginData<RepeatedViewIdPB>,
|
||||
folder: AFPluginState<Arc<Folder2Manager>>,
|
||||
folder: AFPluginState<Arc<FolderManager>>,
|
||||
) -> Result<(), FlowyError> {
|
||||
let params: RepeatedViewIdPB = data.into_inner();
|
||||
for view_id in ¶ms.items {
|
||||
@ -135,7 +135,7 @@ pub(crate) async fn delete_view_handler(
|
||||
|
||||
pub(crate) async fn set_latest_view_handler(
|
||||
data: AFPluginData<ViewIdPB>,
|
||||
folder: AFPluginState<Arc<Folder2Manager>>,
|
||||
folder: AFPluginState<Arc<FolderManager>>,
|
||||
) -> Result<(), FlowyError> {
|
||||
let view_id: ViewIdPB = data.into_inner();
|
||||
let _ = folder.set_current_view(&view_id.value).await;
|
||||
@ -144,7 +144,7 @@ pub(crate) async fn set_latest_view_handler(
|
||||
|
||||
pub(crate) async fn close_view_handler(
|
||||
data: AFPluginData<ViewIdPB>,
|
||||
folder: AFPluginState<Arc<Folder2Manager>>,
|
||||
folder: AFPluginState<Arc<FolderManager>>,
|
||||
) -> Result<(), FlowyError> {
|
||||
let view_id: ViewIdPB = data.into_inner();
|
||||
let _ = folder.close_view(&view_id.value).await;
|
||||
@ -154,7 +154,7 @@ pub(crate) async fn close_view_handler(
|
||||
#[tracing::instrument(level = "debug", skip_all, err)]
|
||||
pub(crate) async fn move_view_handler(
|
||||
data: AFPluginData<MoveViewPayloadPB>,
|
||||
folder: AFPluginState<Arc<Folder2Manager>>,
|
||||
folder: AFPluginState<Arc<FolderManager>>,
|
||||
) -> Result<(), FlowyError> {
|
||||
let params: MoveViewParams = data.into_inner().try_into()?;
|
||||
folder
|
||||
@ -166,7 +166,7 @@ pub(crate) async fn move_view_handler(
|
||||
#[tracing::instrument(level = "debug", skip(data, folder), err)]
|
||||
pub(crate) async fn duplicate_view_handler(
|
||||
data: AFPluginData<ViewPB>,
|
||||
folder: AFPluginState<Arc<Folder2Manager>>,
|
||||
folder: AFPluginState<Arc<FolderManager>>,
|
||||
) -> Result<(), FlowyError> {
|
||||
let view: ViewPB = data.into_inner();
|
||||
folder.duplicate_view(&view.id).await?;
|
||||
@ -175,7 +175,7 @@ pub(crate) async fn duplicate_view_handler(
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(folder), err)]
|
||||
pub(crate) async fn read_trash_handler(
|
||||
folder: AFPluginState<Arc<Folder2Manager>>,
|
||||
folder: AFPluginState<Arc<FolderManager>>,
|
||||
) -> DataResult<RepeatedTrashPB, FlowyError> {
|
||||
let trash = folder.get_all_trash().await;
|
||||
data_result_ok(trash.into())
|
||||
@ -184,7 +184,7 @@ pub(crate) async fn read_trash_handler(
|
||||
#[tracing::instrument(level = "debug", skip(identifier, folder), err)]
|
||||
pub(crate) async fn putback_trash_handler(
|
||||
identifier: AFPluginData<TrashIdPB>,
|
||||
folder: AFPluginState<Arc<Folder2Manager>>,
|
||||
folder: AFPluginState<Arc<FolderManager>>,
|
||||
) -> Result<(), FlowyError> {
|
||||
folder.restore_trash(&identifier.id).await;
|
||||
Ok(())
|
||||
@ -193,7 +193,7 @@ pub(crate) async fn putback_trash_handler(
|
||||
#[tracing::instrument(level = "debug", skip(identifiers, folder), err)]
|
||||
pub(crate) async fn delete_trash_handler(
|
||||
identifiers: AFPluginData<RepeatedTrashIdPB>,
|
||||
folder: AFPluginState<Arc<Folder2Manager>>,
|
||||
folder: AFPluginState<Arc<FolderManager>>,
|
||||
) -> Result<(), FlowyError> {
|
||||
let trash_ids = identifiers.into_inner().items;
|
||||
for trash_id in trash_ids {
|
||||
@ -204,7 +204,7 @@ pub(crate) async fn delete_trash_handler(
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(folder), err)]
|
||||
pub(crate) async fn restore_all_trash_handler(
|
||||
folder: AFPluginState<Arc<Folder2Manager>>,
|
||||
folder: AFPluginState<Arc<FolderManager>>,
|
||||
) -> Result<(), FlowyError> {
|
||||
folder.restore_all_trash().await;
|
||||
Ok(())
|
||||
@ -212,7 +212,7 @@ pub(crate) async fn restore_all_trash_handler(
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(folder), err)]
|
||||
pub(crate) async fn delete_all_trash_handler(
|
||||
folder: AFPluginState<Arc<Folder2Manager>>,
|
||||
folder: AFPluginState<Arc<FolderManager>>,
|
||||
) -> Result<(), FlowyError> {
|
||||
folder.delete_all_trash().await;
|
||||
Ok(())
|
||||
@ -221,9 +221,22 @@ pub(crate) async fn delete_all_trash_handler(
|
||||
#[tracing::instrument(level = "debug", skip(data, folder), err)]
|
||||
pub(crate) async fn import_data_handler(
|
||||
data: AFPluginData<ImportPB>,
|
||||
folder: AFPluginState<Arc<Folder2Manager>>,
|
||||
folder: AFPluginState<Arc<FolderManager>>,
|
||||
) -> Result<(), FlowyError> {
|
||||
let params: ImportParams = data.into_inner().try_into()?;
|
||||
folder.import(params).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(folder), err)]
|
||||
pub(crate) async fn get_folder_snapshots_handler(
|
||||
data: AFPluginData<WorkspaceIdPB>,
|
||||
folder: AFPluginState<Arc<FolderManager>>,
|
||||
) -> DataResult<RepeatedFolderSnapshotPB, FlowyError> {
|
||||
if let Some(workspace_id) = &data.value {
|
||||
let snapshots = folder.get_folder_snapshots(workspace_id).await?;
|
||||
data_result_ok(RepeatedFolderSnapshotPB { items: snapshots })
|
||||
} else {
|
||||
data_result_ok(RepeatedFolderSnapshotPB { items: vec![] })
|
||||
}
|
||||
}
|
||||
|
@ -6,9 +6,9 @@ use flowy_derive::{Flowy_Event, ProtoBuf_Enum};
|
||||
use lib_dispatch::prelude::*;
|
||||
|
||||
use crate::event_handler::*;
|
||||
use crate::manager::Folder2Manager;
|
||||
use crate::manager::FolderManager;
|
||||
|
||||
pub fn init(folder: Arc<Folder2Manager>) -> AFPlugin {
|
||||
pub fn init(folder: Arc<FolderManager>) -> AFPlugin {
|
||||
AFPlugin::new().name("Flowy-Folder").state(folder)
|
||||
// Workspace
|
||||
.event(FolderEvent::CreateWorkspace, create_workspace_handler)
|
||||
@ -36,6 +36,7 @@ pub fn init(folder: Arc<Folder2Manager>) -> AFPlugin {
|
||||
.event(FolderEvent::RestoreAllTrash, restore_all_trash_handler)
|
||||
.event(FolderEvent::DeleteAllTrash, delete_all_trash_handler)
|
||||
.event(FolderEvent::ImportData, import_data_handler)
|
||||
.event(FolderEvent::GetFolderSnapshots, get_folder_snapshots_handler)
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Display, Hash, ProtoBuf_Enum, Flowy_Event)]
|
||||
@ -128,4 +129,7 @@ pub enum FolderEvent {
|
||||
|
||||
#[event(input = "ImportPB")]
|
||||
ImportData = 30,
|
||||
|
||||
#[event()]
|
||||
GetFolderSnapshots = 31,
|
||||
}
|
||||
|
@ -4,7 +4,7 @@ use std::sync::{Arc, Weak};
|
||||
|
||||
use appflowy_integrate::collab_builder::AppFlowyCollabBuilder;
|
||||
use appflowy_integrate::CollabPersistenceConfig;
|
||||
use collab::core::collab_state::CollabState;
|
||||
use collab::core::collab_state::SyncState;
|
||||
use collab_folder::core::{
|
||||
Folder, FolderContext, TrashChange, TrashChangeReceiver, TrashInfo, View, ViewChange,
|
||||
ViewChangeReceiver, ViewLayout, Workspace,
|
||||
@ -19,8 +19,8 @@ use flowy_error::{ErrorCode, FlowyError, FlowyResult};
|
||||
use crate::deps::{FolderCloudService, FolderUser};
|
||||
use crate::entities::{
|
||||
view_pb_with_child_views, view_pb_without_child_views, ChildViewUpdatePB, CreateViewParams,
|
||||
CreateWorkspaceParams, DeletedViewPB, RepeatedTrashPB, RepeatedViewPB, RepeatedWorkspacePB,
|
||||
UpdateViewParams, ViewPB, WorkspacePB,
|
||||
CreateWorkspaceParams, DeletedViewPB, FolderSnapshotPB, FolderSnapshotStatePB, FolderSyncStatePB,
|
||||
RepeatedTrashPB, RepeatedViewPB, RepeatedWorkspacePB, UpdateViewParams, ViewPB, WorkspacePB,
|
||||
};
|
||||
use crate::notification::{
|
||||
send_notification, send_workspace_notification, send_workspace_setting_notification,
|
||||
@ -32,7 +32,7 @@ use crate::view_operation::{
|
||||
create_view, gen_view_id, FolderOperationHandler, FolderOperationHandlers,
|
||||
};
|
||||
|
||||
pub struct Folder2Manager {
|
||||
pub struct FolderManager {
|
||||
mutex_folder: Arc<MutexFolder>,
|
||||
collab_builder: Arc<AppFlowyCollabBuilder>,
|
||||
user: Arc<dyn FolderUser>,
|
||||
@ -40,10 +40,10 @@ pub struct Folder2Manager {
|
||||
cloud_service: Arc<dyn FolderCloudService>,
|
||||
}
|
||||
|
||||
unsafe impl Send for Folder2Manager {}
|
||||
unsafe impl Sync for Folder2Manager {}
|
||||
unsafe impl Send for FolderManager {}
|
||||
unsafe impl Sync for FolderManager {}
|
||||
|
||||
impl Folder2Manager {
|
||||
impl FolderManager {
|
||||
pub async fn new(
|
||||
user: Arc<dyn FolderUser>,
|
||||
collab_builder: Arc<AppFlowyCollabBuilder>,
|
||||
@ -134,13 +134,18 @@ impl Folder2Manager {
|
||||
trash_change_tx: trash_tx,
|
||||
};
|
||||
let folder = Folder::get_or_create(collab, folder_context);
|
||||
let folder_state_rx = folder.subscribe_state_change();
|
||||
let folder_state_rx = folder.subscribe_sync_state();
|
||||
*self.mutex_folder.lock() = Some(folder);
|
||||
|
||||
let weak_mutex_folder = Arc::downgrade(&self.mutex_folder);
|
||||
listen_on_folder_state_change(workspace_id, folder_state_rx, &weak_mutex_folder);
|
||||
listen_on_trash_change(trash_rx, &weak_mutex_folder);
|
||||
listen_on_view_change(view_rx, &weak_mutex_folder);
|
||||
subscribe_folder_sync_state_changed(
|
||||
workspace_id.clone(),
|
||||
folder_state_rx,
|
||||
&weak_mutex_folder,
|
||||
);
|
||||
subscribe_folder_snapshot_state_changed(workspace_id, &weak_mutex_folder);
|
||||
subscribe_folder_trash_changed(trash_rx, &weak_mutex_folder);
|
||||
subscribe_folder_view_changed(view_rx, &weak_mutex_folder);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -151,24 +156,30 @@ impl Folder2Manager {
|
||||
&self,
|
||||
user_id: i64,
|
||||
token: &str,
|
||||
is_new: bool,
|
||||
workspace_id: &str,
|
||||
) -> FlowyResult<()> {
|
||||
self.initialize(user_id, workspace_id).await?;
|
||||
let (folder_data, workspace_pb) = DefaultFolderBuilder::build(
|
||||
self.user.user_id()?,
|
||||
workspace_id.to_string(),
|
||||
&self.operation_handlers,
|
||||
)
|
||||
.await;
|
||||
self.with_folder((), |folder| {
|
||||
folder.create_with_data(folder_data);
|
||||
});
|
||||
|
||||
send_notification(token, FolderNotification::DidCreateWorkspace)
|
||||
.payload(RepeatedWorkspacePB {
|
||||
items: vec![workspace_pb],
|
||||
})
|
||||
.send();
|
||||
// Create the default workspace if the user is new
|
||||
tracing::info!("initialize_with_user: is_new: {}", is_new);
|
||||
if is_new {
|
||||
let (folder_data, workspace_pb) = DefaultFolderBuilder::build(
|
||||
self.user.user_id()?,
|
||||
workspace_id.to_string(),
|
||||
&self.operation_handlers,
|
||||
)
|
||||
.await;
|
||||
self.with_folder((), |folder| {
|
||||
folder.create_with_data(folder_data);
|
||||
});
|
||||
|
||||
send_notification(token, FolderNotification::DidCreateWorkspace)
|
||||
.payload(RepeatedWorkspacePB {
|
||||
items: vec![workspace_pb],
|
||||
})
|
||||
.send();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -540,7 +551,7 @@ impl Folder2Manager {
|
||||
pub(crate) async fn import(&self, import_data: ImportParams) -> FlowyResult<View> {
|
||||
if import_data.data.is_none() && import_data.file_path.is_none() {
|
||||
return Err(FlowyError::new(
|
||||
ErrorCode::InvalidData,
|
||||
ErrorCode::InvalidParams,
|
||||
"data or file_path is required",
|
||||
));
|
||||
}
|
||||
@ -626,10 +637,47 @@ impl Folder2Manager {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn get_folder_snapshots(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
) -> FlowyResult<Vec<FolderSnapshotPB>> {
|
||||
let mut snapshots = vec![];
|
||||
if let Some(snapshot) = self
|
||||
.cloud_service
|
||||
.get_folder_latest_snapshot(workspace_id)
|
||||
.await?
|
||||
.map(|snapshot| FolderSnapshotPB {
|
||||
snapshot_id: snapshot.snapshot_id,
|
||||
snapshot_desc: "".to_string(),
|
||||
created_at: snapshot.created_at,
|
||||
data: snapshot.data,
|
||||
})
|
||||
{
|
||||
snapshots.push(snapshot);
|
||||
}
|
||||
|
||||
Ok(snapshots)
|
||||
}
|
||||
|
||||
/// Only expose this method for testing
|
||||
#[cfg(debug_assertions)]
|
||||
pub fn get_mutex_folder(&self) -> &Arc<MutexFolder> {
|
||||
&self.mutex_folder
|
||||
}
|
||||
|
||||
/// Only expose this method for testing
|
||||
#[cfg(debug_assertions)]
|
||||
pub fn get_cloud_service(&self) -> &Arc<dyn FolderCloudService> {
|
||||
&self.cloud_service
|
||||
}
|
||||
}
|
||||
|
||||
/// Listen on the [ViewChange] after create/delete/update events happened
|
||||
fn listen_on_view_change(mut rx: ViewChangeReceiver, weak_mutex_folder: &Weak<MutexFolder>) {
|
||||
fn subscribe_folder_view_changed(
|
||||
mut rx: ViewChangeReceiver,
|
||||
weak_mutex_folder: &Weak<MutexFolder>,
|
||||
) {
|
||||
let weak_mutex_folder = weak_mutex_folder.clone();
|
||||
tokio::spawn(async move {
|
||||
while let Ok(value) = rx.recv().await {
|
||||
@ -664,15 +712,43 @@ fn listen_on_view_change(mut rx: ViewChangeReceiver, weak_mutex_folder: &Weak<Mu
|
||||
});
|
||||
}
|
||||
|
||||
fn listen_on_folder_state_change(
|
||||
fn subscribe_folder_snapshot_state_changed(
|
||||
workspace_id: String,
|
||||
mut folder_state_rx: WatchStream<CollabState>,
|
||||
weak_mutex_folder: &Weak<MutexFolder>,
|
||||
) {
|
||||
let weak_mutex_folder = weak_mutex_folder.clone();
|
||||
tokio::spawn(async move {
|
||||
if let Some(mutex_folder) = weak_mutex_folder.upgrade() {
|
||||
let stream = mutex_folder
|
||||
.lock()
|
||||
.as_ref()
|
||||
.map(|folder| folder.subscribe_snapshot_state());
|
||||
if let Some(mut state_stream) = stream {
|
||||
while let Some(snapshot_state) = state_stream.next().await {
|
||||
if let Some(new_snapshot_id) = snapshot_state.snapshot_id() {
|
||||
tracing::debug!("Did create folder snapshot: {}", new_snapshot_id);
|
||||
send_notification(
|
||||
&workspace_id,
|
||||
FolderNotification::DidUpdateFolderSnapshotState,
|
||||
)
|
||||
.payload(FolderSnapshotStatePB { new_snapshot_id })
|
||||
.send();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn subscribe_folder_sync_state_changed(
|
||||
workspace_id: String,
|
||||
mut folder_state_rx: WatchStream<SyncState>,
|
||||
weak_mutex_folder: &Weak<MutexFolder>,
|
||||
) {
|
||||
let weak_mutex_folder = weak_mutex_folder.clone();
|
||||
tokio::spawn(async move {
|
||||
while let Some(state) = folder_state_rx.next().await {
|
||||
if state.is_root_changed() {
|
||||
if state.is_full_sync() {
|
||||
if let Some(mutex_folder) = weak_mutex_folder.upgrade() {
|
||||
let folder = mutex_folder.lock().take();
|
||||
if let Some(folder) = folder {
|
||||
@ -683,12 +759,19 @@ fn listen_on_folder_state_change(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
send_notification(&workspace_id, FolderNotification::DidUpdateFolderSyncUpdate)
|
||||
.payload(FolderSyncStatePB::from(state))
|
||||
.send();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// Listen on the [TrashChange]s and notify the frontend some views were changed.
|
||||
fn listen_on_trash_change(mut rx: TrashChangeReceiver, weak_mutex_folder: &Weak<MutexFolder>) {
|
||||
fn subscribe_folder_trash_changed(
|
||||
mut rx: TrashChangeReceiver,
|
||||
weak_mutex_folder: &Weak<MutexFolder>,
|
||||
) {
|
||||
let weak_mutex_folder = weak_mutex_folder.clone();
|
||||
tokio::spawn(async move {
|
||||
while let Ok(value) = rx.recv().await {
|
||||
|
@ -8,7 +8,7 @@ use lib_dispatch::prelude::ToBytes;
|
||||
|
||||
use crate::entities::{view_pb_without_child_views, WorkspacePB, WorkspaceSettingPB};
|
||||
|
||||
const OBSERVABLE_CATEGORY: &str = "Workspace";
|
||||
const FOLDER_OBSERVABLE_SOURCE: &str = "Workspace";
|
||||
|
||||
#[derive(ProtoBuf_Enum, Debug, Default)]
|
||||
pub(crate) enum FolderNotification {
|
||||
@ -22,16 +22,18 @@ pub(crate) enum FolderNotification {
|
||||
DidUpdateWorkspaceViews = 3,
|
||||
/// Trigger when the settings of the workspace are changed. The changes including the latest visiting view, etc
|
||||
DidUpdateWorkspaceSetting = 4,
|
||||
DidUpdateView = 29,
|
||||
DidUpdateChildViews = 30,
|
||||
DidUpdateView = 10,
|
||||
DidUpdateChildViews = 11,
|
||||
/// Trigger after deleting the view
|
||||
DidDeleteView = 31,
|
||||
DidDeleteView = 12,
|
||||
/// Trigger when restore the view from trash
|
||||
DidRestoreView = 32,
|
||||
DidRestoreView = 13,
|
||||
/// Trigger after moving the view to trash
|
||||
DidMoveViewToTrash = 33,
|
||||
DidMoveViewToTrash = 14,
|
||||
/// Trigger when the number of trash is changed
|
||||
DidUpdateTrash = 34,
|
||||
DidUpdateTrash = 15,
|
||||
DidUpdateFolderSnapshotState = 16,
|
||||
DidUpdateFolderSyncUpdate = 17,
|
||||
}
|
||||
|
||||
impl std::convert::From<FolderNotification> for i32 {
|
||||
@ -40,9 +42,29 @@ impl std::convert::From<FolderNotification> for i32 {
|
||||
}
|
||||
}
|
||||
|
||||
impl std::convert::From<i32> for FolderNotification {
|
||||
fn from(value: i32) -> Self {
|
||||
match value {
|
||||
1 => FolderNotification::DidCreateWorkspace,
|
||||
2 => FolderNotification::DidUpdateWorkspace,
|
||||
3 => FolderNotification::DidUpdateWorkspaceViews,
|
||||
4 => FolderNotification::DidUpdateWorkspaceSetting,
|
||||
10 => FolderNotification::DidUpdateView,
|
||||
11 => FolderNotification::DidUpdateChildViews,
|
||||
12 => FolderNotification::DidDeleteView,
|
||||
13 => FolderNotification::DidRestoreView,
|
||||
14 => FolderNotification::DidMoveViewToTrash,
|
||||
15 => FolderNotification::DidUpdateTrash,
|
||||
16 => FolderNotification::DidUpdateFolderSnapshotState,
|
||||
17 => FolderNotification::DidUpdateFolderSyncUpdate,
|
||||
_ => FolderNotification::Unknown,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace")]
|
||||
pub(crate) fn send_notification(id: &str, ty: FolderNotification) -> NotificationBuilder {
|
||||
NotificationBuilder::new(id, ty, OBSERVABLE_CATEGORY)
|
||||
NotificationBuilder::new(id, ty, FOLDER_OBSERVABLE_SOURCE)
|
||||
}
|
||||
|
||||
/// The [CURRENT_WORKSPACE] represents as the current workspace that opened by the
|
||||
|
@ -1,10 +1,10 @@
|
||||
use crate::entities::{CreateViewParams, ViewLayoutPB};
|
||||
use crate::manager::Folder2Manager;
|
||||
use crate::manager::FolderManager;
|
||||
use crate::view_operation::gen_view_id;
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[cfg(feature = "test_helper")]
|
||||
impl Folder2Manager {
|
||||
impl FolderManager {
|
||||
pub async fn create_test_grid_view(
|
||||
&self,
|
||||
app_id: &str,
|
||||
|
@ -1,11 +1,14 @@
|
||||
pub mod entities;
|
||||
mod protobuf;
|
||||
use std::sync::RwLock;
|
||||
|
||||
use crate::entities::SubscribeObject;
|
||||
use bytes::Bytes;
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use lib_dispatch::prelude::ToBytes;
|
||||
use std::sync::RwLock;
|
||||
|
||||
use crate::entities::SubscribeObject;
|
||||
|
||||
pub mod entities;
|
||||
mod protobuf;
|
||||
|
||||
lazy_static! {
|
||||
static ref NOTIFICATION_SENDER: RwLock<Vec<Box<dyn NotificationSender>>> = RwLock::new(vec![]);
|
||||
@ -14,10 +17,7 @@ lazy_static! {
|
||||
pub fn register_notification_sender<T: NotificationSender>(sender: T) {
|
||||
let box_sender = Box::new(sender);
|
||||
match NOTIFICATION_SENDER.write() {
|
||||
Ok(mut write_guard) => {
|
||||
write_guard.pop();
|
||||
write_guard.push(box_sender)
|
||||
},
|
||||
Ok(mut write_guard) => write_guard.push(box_sender),
|
||||
Err(err) => tracing::error!("Failed to push notification sender: {:?}", err),
|
||||
}
|
||||
}
|
||||
|
@ -20,17 +20,31 @@ tokio = { version = "1.26", features = ["sync"]}
|
||||
parking_lot = "0.12"
|
||||
lazy_static = "1.4.0"
|
||||
bytes = "1.0.1"
|
||||
postgrest = "1.0"
|
||||
tokio-retry = "0.3"
|
||||
anyhow = "1.0"
|
||||
uuid = { version = "1.3.3", features = ["v4"] }
|
||||
chrono = { version = "0.4.22", default-features = false, features = ["clock"] }
|
||||
appflowy-integrate = { version = "0.1.0" }
|
||||
|
||||
postgrest = "1.0"
|
||||
tokio-postgres = { version = "0.7.8", optional = true, features = ["with-uuid-1","with-chrono-0_4"] }
|
||||
deadpool-postgres = "0.10.5"
|
||||
refinery= { version = "0.8.10", optional = true, features = ["tokio-postgres"] }
|
||||
async-stream = "0.3.4"
|
||||
futures = "0.3.26"
|
||||
|
||||
lib-infra = { path = "../../../shared-lib/lib-infra" }
|
||||
flowy-user = { path = "../flowy-user" }
|
||||
flowy-folder2 = { path = "../flowy-folder2" }
|
||||
flowy-database2 = { path = "../flowy-database2" }
|
||||
flowy-document2 = { path = "../flowy-document2" }
|
||||
flowy-error = { path = "../flowy-error" }
|
||||
|
||||
[dev-dependencies]
|
||||
uuid = { version = "1.3.3", features = ["v4"] }
|
||||
tracing-subscriber = { version = "0.3.3", features = ["env-filter"] }
|
||||
dotenv = "0.15.0"
|
||||
|
||||
[features]
|
||||
default = ["postgres_storage"]
|
||||
postgres_storage = ["tokio-postgres", "refinery", ]
|
||||
|
@ -1,6 +1,10 @@
|
||||
use flowy_folder2::deps::FolderCloudService;
|
||||
use std::sync::Arc;
|
||||
|
||||
use appflowy_integrate::RemoteCollabStorage;
|
||||
|
||||
use flowy_database2::deps::DatabaseCloudService;
|
||||
use flowy_document2::deps::DocumentCloudService;
|
||||
use flowy_folder2::deps::FolderCloudService;
|
||||
use flowy_user::event_map::UserAuthService;
|
||||
|
||||
pub mod local_server;
|
||||
@ -8,6 +12,7 @@ mod request;
|
||||
mod response;
|
||||
pub mod self_host;
|
||||
pub mod supabase;
|
||||
pub mod util;
|
||||
|
||||
/// In order to run this the supabase test, you need to create a .env file in the root directory of this project
|
||||
/// and add the following environment variables:
|
||||
@ -26,4 +31,7 @@ pub mod supabase;
|
||||
pub trait AppFlowyServer: Send + Sync + 'static {
|
||||
fn user_service(&self) -> Arc<dyn UserAuthService>;
|
||||
fn folder_service(&self) -> Arc<dyn FolderCloudService>;
|
||||
fn database_service(&self) -> Arc<dyn DatabaseCloudService>;
|
||||
fn document_service(&self) -> Arc<dyn DocumentCloudService>;
|
||||
fn collab_storage(&self) -> Option<Arc<dyn RemoteCollabStorage>>;
|
||||
}
|
||||
|
@ -0,0 +1,18 @@
|
||||
use flowy_database2::deps::{DatabaseCloudService, DatabaseSnapshot};
|
||||
use flowy_error::FlowyError;
|
||||
use lib_infra::future::FutureResult;
|
||||
|
||||
pub(crate) struct LocalServerDatabaseCloudServiceImpl();
|
||||
|
||||
impl DatabaseCloudService for LocalServerDatabaseCloudServiceImpl {
|
||||
fn get_database_updates(&self, _database_id: &str) -> FutureResult<Vec<Vec<u8>>, FlowyError> {
|
||||
FutureResult::new(async move { Ok(vec![]) })
|
||||
}
|
||||
|
||||
fn get_database_latest_snapshot(
|
||||
&self,
|
||||
_database_id: &str,
|
||||
) -> FutureResult<Option<DatabaseSnapshot>, FlowyError> {
|
||||
FutureResult::new(async move { Ok(None) })
|
||||
}
|
||||
}
|
@ -0,0 +1,18 @@
|
||||
use flowy_document2::deps::{DocumentCloudService, DocumentSnapshot};
|
||||
use flowy_error::FlowyError;
|
||||
use lib_infra::future::FutureResult;
|
||||
|
||||
pub(crate) struct LocalServerDocumentCloudServiceImpl();
|
||||
|
||||
impl DocumentCloudService for LocalServerDocumentCloudServiceImpl {
|
||||
fn get_document_updates(&self, _document_id: &str) -> FutureResult<Vec<Vec<u8>>, FlowyError> {
|
||||
FutureResult::new(async move { Ok(vec![]) })
|
||||
}
|
||||
|
||||
fn get_document_latest_snapshot(
|
||||
&self,
|
||||
_document_id: &str,
|
||||
) -> FutureResult<Option<DocumentSnapshot>, FlowyError> {
|
||||
FutureResult::new(async move { Ok(None) })
|
||||
}
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
use flowy_error::FlowyError;
|
||||
use flowy_folder2::deps::{FolderCloudService, Workspace};
|
||||
use flowy_folder2::deps::{FolderCloudService, FolderSnapshot, Workspace};
|
||||
use flowy_folder2::gen_workspace_id;
|
||||
use lib_infra::future::FutureResult;
|
||||
use lib_infra::util::timestamp;
|
||||
@ -18,4 +18,15 @@ impl FolderCloudService for LocalServerFolderCloudServiceImpl {
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
fn get_folder_latest_snapshot(
|
||||
&self,
|
||||
_workspace_id: &str,
|
||||
) -> FutureResult<Option<FolderSnapshot>, FlowyError> {
|
||||
FutureResult::new(async move { Ok(None) })
|
||||
}
|
||||
|
||||
fn get_folder_updates(&self, _workspace_id: &str) -> FutureResult<Vec<Vec<u8>>, FlowyError> {
|
||||
FutureResult::new(async move { Ok(vec![]) })
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,9 @@
|
||||
mod folder;
|
||||
mod user;
|
||||
|
||||
pub(crate) use database::*;
|
||||
pub(crate) use document::*;
|
||||
pub(crate) use folder::*;
|
||||
pub(crate) use user::*;
|
||||
|
||||
mod database;
|
||||
mod document;
|
||||
mod folder;
|
||||
mod user;
|
||||
|
@ -5,7 +5,7 @@ use flowy_error::FlowyError;
|
||||
use flowy_user::entities::{
|
||||
SignInParams, SignInResponse, SignUpParams, SignUpResponse, UpdateUserProfileParams, UserProfile,
|
||||
};
|
||||
use flowy_user::event_map::UserAuthService;
|
||||
use flowy_user::event_map::{UserAuthService, UserCredentials};
|
||||
use lib_infra::box_any::BoxAny;
|
||||
use lib_infra::future::FutureResult;
|
||||
|
||||
@ -27,6 +27,7 @@ impl UserAuthService for LocalServerUserAuthServiceImpl {
|
||||
user_id: uid,
|
||||
name: params.name,
|
||||
workspace_id,
|
||||
is_new: true,
|
||||
email: Some(params.email),
|
||||
token: None,
|
||||
})
|
||||
@ -54,8 +55,7 @@ impl UserAuthService for LocalServerUserAuthServiceImpl {
|
||||
|
||||
fn update_user(
|
||||
&self,
|
||||
_uid: i64,
|
||||
_token: &Option<String>,
|
||||
_credential: UserCredentials,
|
||||
_params: UpdateUserProfileParams,
|
||||
) -> FutureResult<(), FlowyError> {
|
||||
FutureResult::new(async { Ok(()) })
|
||||
@ -63,9 +63,12 @@ impl UserAuthService for LocalServerUserAuthServiceImpl {
|
||||
|
||||
fn get_user_profile(
|
||||
&self,
|
||||
_token: Option<String>,
|
||||
_uid: i64,
|
||||
_credential: UserCredentials,
|
||||
) -> FutureResult<Option<UserProfile>, FlowyError> {
|
||||
FutureResult::new(async { Ok(None) })
|
||||
}
|
||||
|
||||
fn check_user(&self, _credential: UserCredentials) -> FutureResult<(), FlowyError> {
|
||||
FutureResult::new(async { Ok(()) })
|
||||
}
|
||||
}
|
||||
|
@ -1,12 +1,16 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use flowy_folder2::deps::FolderCloudService;
|
||||
use appflowy_integrate::RemoteCollabStorage;
|
||||
use parking_lot::RwLock;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use flowy_database2::deps::DatabaseCloudService;
|
||||
use flowy_document2::deps::DocumentCloudService;
|
||||
use flowy_folder2::deps::FolderCloudService;
|
||||
use flowy_user::event_map::UserAuthService;
|
||||
|
||||
use crate::local_server::impls::{
|
||||
LocalServerDatabaseCloudServiceImpl, LocalServerDocumentCloudServiceImpl,
|
||||
LocalServerFolderCloudServiceImpl, LocalServerUserAuthServiceImpl,
|
||||
};
|
||||
use crate::AppFlowyServer;
|
||||
@ -38,4 +42,16 @@ impl AppFlowyServer for LocalServer {
|
||||
fn folder_service(&self) -> Arc<dyn FolderCloudService> {
|
||||
Arc::new(LocalServerFolderCloudServiceImpl())
|
||||
}
|
||||
|
||||
fn database_service(&self) -> Arc<dyn DatabaseCloudService> {
|
||||
Arc::new(LocalServerDatabaseCloudServiceImpl())
|
||||
}
|
||||
|
||||
fn document_service(&self) -> Arc<dyn DocumentCloudService> {
|
||||
Arc::new(LocalServerDocumentCloudServiceImpl())
|
||||
}
|
||||
|
||||
fn collab_storage(&self) -> Option<Arc<dyn RemoteCollabStorage>> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,18 @@
|
||||
use flowy_database2::deps::{DatabaseCloudService, DatabaseSnapshot};
|
||||
use flowy_error::FlowyError;
|
||||
use lib_infra::future::FutureResult;
|
||||
|
||||
pub(crate) struct SelfHostedDatabaseCloudServiceImpl();
|
||||
|
||||
impl DatabaseCloudService for SelfHostedDatabaseCloudServiceImpl {
|
||||
fn get_database_updates(&self, _database_id: &str) -> FutureResult<Vec<Vec<u8>>, FlowyError> {
|
||||
FutureResult::new(async move { Ok(vec![]) })
|
||||
}
|
||||
|
||||
fn get_database_latest_snapshot(
|
||||
&self,
|
||||
_database_id: &str,
|
||||
) -> FutureResult<Option<DatabaseSnapshot>, FlowyError> {
|
||||
FutureResult::new(async move { Ok(None) })
|
||||
}
|
||||
}
|
@ -0,0 +1,18 @@
|
||||
use flowy_document2::deps::{DocumentCloudService, DocumentSnapshot};
|
||||
use flowy_error::FlowyError;
|
||||
use lib_infra::future::FutureResult;
|
||||
|
||||
pub(crate) struct SelfHostedDocumentCloudServiceImpl();
|
||||
|
||||
impl DocumentCloudService for SelfHostedDocumentCloudServiceImpl {
|
||||
fn get_document_updates(&self, _document_id: &str) -> FutureResult<Vec<Vec<u8>>, FlowyError> {
|
||||
FutureResult::new(async move { Ok(vec![]) })
|
||||
}
|
||||
|
||||
fn get_document_latest_snapshot(
|
||||
&self,
|
||||
_document_id: &str,
|
||||
) -> FutureResult<Option<DocumentSnapshot>, FlowyError> {
|
||||
FutureResult::new(async move { Ok(None) })
|
||||
}
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
use flowy_error::FlowyError;
|
||||
use flowy_folder2::deps::{FolderCloudService, Workspace};
|
||||
use flowy_folder2::deps::{FolderCloudService, FolderSnapshot, Workspace};
|
||||
use flowy_folder2::gen_workspace_id;
|
||||
use lib_infra::future::FutureResult;
|
||||
use lib_infra::util::timestamp;
|
||||
@ -18,4 +18,15 @@ impl FolderCloudService for SelfHostedServerFolderCloudServiceImpl {
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
fn get_folder_latest_snapshot(
|
||||
&self,
|
||||
_workspace_id: &str,
|
||||
) -> FutureResult<Option<FolderSnapshot>, FlowyError> {
|
||||
FutureResult::new(async move { Ok(None) })
|
||||
}
|
||||
|
||||
fn get_folder_updates(&self, _workspace_id: &str) -> FutureResult<Vec<Vec<u8>>, FlowyError> {
|
||||
FutureResult::new(async move { Ok(vec![]) })
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,9 @@
|
||||
mod folder;
|
||||
mod user;
|
||||
|
||||
pub(crate) use database::*;
|
||||
pub(crate) use document::*;
|
||||
pub(crate) use folder::*;
|
||||
pub(crate) use user::*;
|
||||
|
||||
mod database;
|
||||
mod document;
|
||||
mod folder;
|
||||
mod user;
|
||||
|
@ -2,7 +2,7 @@ use flowy_error::{ErrorCode, FlowyError};
|
||||
use flowy_user::entities::{
|
||||
SignInParams, SignInResponse, SignUpParams, SignUpResponse, UpdateUserProfileParams, UserProfile,
|
||||
};
|
||||
use flowy_user::event_map::UserAuthService;
|
||||
use flowy_user::event_map::{UserAuthService, UserCredentials};
|
||||
use lib_infra::box_any::BoxAny;
|
||||
use lib_infra::future::FutureResult;
|
||||
|
||||
@ -42,7 +42,7 @@ impl UserAuthService for SelfHostedUserAuthServiceImpl {
|
||||
match token {
|
||||
None => FutureResult::new(async {
|
||||
Err(FlowyError::new(
|
||||
ErrorCode::InvalidData,
|
||||
ErrorCode::InvalidParams,
|
||||
"Token should not be empty",
|
||||
))
|
||||
}),
|
||||
@ -59,19 +59,18 @@ impl UserAuthService for SelfHostedUserAuthServiceImpl {
|
||||
|
||||
fn update_user(
|
||||
&self,
|
||||
_uid: i64,
|
||||
token: &Option<String>,
|
||||
credential: UserCredentials,
|
||||
params: UpdateUserProfileParams,
|
||||
) -> FutureResult<(), FlowyError> {
|
||||
match token {
|
||||
match credential.token {
|
||||
None => FutureResult::new(async {
|
||||
Err(FlowyError::new(
|
||||
ErrorCode::InvalidData,
|
||||
ErrorCode::InvalidParams,
|
||||
"Token should not be empty",
|
||||
))
|
||||
}),
|
||||
Some(token) => {
|
||||
let token = token.to_owned();
|
||||
let token = token;
|
||||
let url = self.config.user_profile_url();
|
||||
FutureResult::new(async move {
|
||||
update_user_profile_request(&token, params, &url).await?;
|
||||
@ -83,13 +82,11 @@ impl UserAuthService for SelfHostedUserAuthServiceImpl {
|
||||
|
||||
fn get_user_profile(
|
||||
&self,
|
||||
token: Option<String>,
|
||||
_uid: i64,
|
||||
credential: UserCredentials,
|
||||
) -> FutureResult<Option<UserProfile>, FlowyError> {
|
||||
let token = token;
|
||||
let url = self.config.user_profile_url();
|
||||
FutureResult::new(async move {
|
||||
match token {
|
||||
match credential.token {
|
||||
None => Err(FlowyError::new(
|
||||
ErrorCode::UnexpectedEmpty,
|
||||
"Token should not be empty",
|
||||
@ -101,6 +98,11 @@ impl UserAuthService for SelfHostedUserAuthServiceImpl {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn check_user(&self, _credential: UserCredentials) -> FutureResult<(), FlowyError> {
|
||||
// TODO(nathan): implement the OpenAPI for this
|
||||
FutureResult::new(async { Ok(()) })
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn user_sign_up_request(
|
||||
|
@ -1,10 +1,15 @@
|
||||
use flowy_folder2::deps::FolderCloudService;
|
||||
use std::sync::Arc;
|
||||
|
||||
use appflowy_integrate::RemoteCollabStorage;
|
||||
|
||||
use flowy_database2::deps::DatabaseCloudService;
|
||||
use flowy_document2::deps::DocumentCloudService;
|
||||
use flowy_folder2::deps::FolderCloudService;
|
||||
use flowy_user::event_map::UserAuthService;
|
||||
|
||||
use crate::self_host::configuration::SelfHostedConfiguration;
|
||||
use crate::self_host::impls::{
|
||||
SelfHostedDatabaseCloudServiceImpl, SelfHostedDocumentCloudServiceImpl,
|
||||
SelfHostedServerFolderCloudServiceImpl, SelfHostedUserAuthServiceImpl,
|
||||
};
|
||||
use crate::AppFlowyServer;
|
||||
@ -27,4 +32,16 @@ impl AppFlowyServer for SelfHostServer {
|
||||
fn folder_service(&self) -> Arc<dyn FolderCloudService> {
|
||||
Arc::new(SelfHostedServerFolderCloudServiceImpl())
|
||||
}
|
||||
|
||||
fn database_service(&self) -> Arc<dyn DatabaseCloudService> {
|
||||
Arc::new(SelfHostedDatabaseCloudServiceImpl())
|
||||
}
|
||||
|
||||
fn document_service(&self) -> Arc<dyn DocumentCloudService> {
|
||||
Arc::new(SelfHostedDocumentCloudServiceImpl())
|
||||
}
|
||||
|
||||
fn collab_storage(&self) -> Option<Arc<dyn RemoteCollabStorage>> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
90
frontend/rust-lib/flowy-server/src/supabase/configuration.rs
Normal file
90
frontend/rust-lib/flowy-server/src/supabase/configuration.rs
Normal file
@ -0,0 +1,90 @@
|
||||
use serde::Deserialize;
|
||||
|
||||
use flowy_error::{ErrorCode, FlowyError};
|
||||
|
||||
pub const SUPABASE_URL: &str = "SUPABASE_URL";
|
||||
pub const SUPABASE_ANON_KEY: &str = "SUPABASE_ANON_KEY";
|
||||
pub const SUPABASE_KEY: &str = "SUPABASE_KEY";
|
||||
pub const SUPABASE_JWT_SECRET: &str = "SUPABASE_JWT_SECRET";
|
||||
|
||||
pub const SUPABASE_DB: &str = "SUPABASE_DB";
|
||||
pub const SUPABASE_DB_USER: &str = "SUPABASE_DB_USER";
|
||||
pub const SUPABASE_DB_PASSWORD: &str = "SUPABASE_DB_PASSWORD";
|
||||
pub const SUPABASE_DB_PORT: &str = "SUPABASE_DB_PORT";
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct SupabaseConfiguration {
|
||||
/// The url of the supabase server.
|
||||
pub url: String,
|
||||
/// The key of the supabase server.
|
||||
pub key: String,
|
||||
/// The secret used to sign the JWT tokens.
|
||||
pub jwt_secret: String,
|
||||
|
||||
pub postgres_config: PostgresConfiguration,
|
||||
}
|
||||
|
||||
impl SupabaseConfiguration {
|
||||
/// Load the configuration from the environment variables.
|
||||
/// SUPABASE_URL=https://<your-supabase-url>.supabase.co
|
||||
/// SUPABASE_KEY=<your-supabase-key>
|
||||
/// SUPABASE_JWT_SECRET=<your-supabase-jwt-secret>
|
||||
///
|
||||
pub fn from_env() -> Result<Self, FlowyError> {
|
||||
let postgres_config = PostgresConfiguration::from_env()?;
|
||||
Ok(Self {
|
||||
url: std::env::var(SUPABASE_URL)
|
||||
.map_err(|_| FlowyError::new(ErrorCode::InvalidAuthConfig, "Missing SUPABASE_URL"))?,
|
||||
key: std::env::var(SUPABASE_KEY)
|
||||
.map_err(|_| FlowyError::new(ErrorCode::InvalidAuthConfig, "Missing SUPABASE_KEY"))?,
|
||||
jwt_secret: std::env::var(SUPABASE_JWT_SECRET).map_err(|_| {
|
||||
FlowyError::new(ErrorCode::InvalidAuthConfig, "Missing SUPABASE_JWT_SECRET")
|
||||
})?,
|
||||
postgres_config,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn write_env(&self) {
|
||||
std::env::set_var(SUPABASE_URL, &self.url);
|
||||
std::env::set_var(SUPABASE_KEY, &self.key);
|
||||
std::env::set_var(SUPABASE_JWT_SECRET, &self.jwt_secret);
|
||||
self.postgres_config.write_env();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct PostgresConfiguration {
|
||||
pub url: String,
|
||||
pub user_name: String,
|
||||
pub password: String,
|
||||
pub port: u16,
|
||||
}
|
||||
|
||||
impl PostgresConfiguration {
|
||||
pub fn from_env() -> Result<Self, FlowyError> {
|
||||
let url = std::env::var(SUPABASE_DB)
|
||||
.map_err(|_| FlowyError::new(ErrorCode::InvalidAuthConfig, "Missing SUPABASE_DB"))?;
|
||||
let user_name = std::env::var(SUPABASE_DB_USER)
|
||||
.map_err(|_| FlowyError::new(ErrorCode::InvalidAuthConfig, "Missing SUPABASE_DB_USER"))?;
|
||||
let password = std::env::var(SUPABASE_DB_PASSWORD)
|
||||
.map_err(|_| FlowyError::new(ErrorCode::InvalidAuthConfig, "Missing SUPABASE_DB_PASSWORD"))?;
|
||||
let port = std::env::var(SUPABASE_DB_PORT)
|
||||
.map_err(|_| FlowyError::new(ErrorCode::InvalidAuthConfig, "Missing SUPABASE_DB_PORT"))?
|
||||
.parse::<u16>()
|
||||
.map_err(|_e| FlowyError::new(ErrorCode::InvalidAuthConfig, "Missing SUPABASE_DB_PORT"))?;
|
||||
|
||||
Ok(Self {
|
||||
url,
|
||||
user_name,
|
||||
password,
|
||||
port,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn write_env(&self) {
|
||||
std::env::set_var(SUPABASE_DB, &self.url);
|
||||
std::env::set_var(SUPABASE_DB_USER, &self.user_name);
|
||||
std::env::set_var(SUPABASE_DB_PASSWORD, &self.password);
|
||||
std::env::set_var(SUPABASE_DB_PORT, self.port.to_string());
|
||||
}
|
||||
}
|
39
frontend/rust-lib/flowy-server/src/supabase/entities.rs
Normal file
39
frontend/rust-lib/flowy-server/src/supabase/entities.rs
Normal file
@ -0,0 +1,39 @@
|
||||
use serde::Deserialize;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::supabase::impls::WORKSPACE_ID;
|
||||
use crate::util::deserialize_null_or_default;
|
||||
|
||||
pub enum GetUserProfileParams {
|
||||
Uid(i64),
|
||||
Uuid(Uuid),
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug, Deserialize, Clone)]
|
||||
pub(crate) struct UserProfileResponse {
|
||||
pub uid: i64,
|
||||
#[serde(deserialize_with = "deserialize_null_or_default")]
|
||||
pub name: String,
|
||||
|
||||
#[serde(deserialize_with = "deserialize_null_or_default")]
|
||||
pub email: String,
|
||||
|
||||
#[serde(deserialize_with = "deserialize_null_or_default")]
|
||||
pub workspace_id: String,
|
||||
}
|
||||
|
||||
impl From<tokio_postgres::Row> for UserProfileResponse {
|
||||
fn from(row: tokio_postgres::Row) -> Self {
|
||||
let workspace_id: Uuid = row.get(WORKSPACE_ID);
|
||||
Self {
|
||||
uid: row.get("uid"),
|
||||
name: row.try_get("name").unwrap_or_default(),
|
||||
email: row.try_get("email").unwrap_or_default(),
|
||||
workspace_id: workspace_id.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub(crate) struct UserProfileResponseList(pub Vec<UserProfileResponse>);
|
@ -0,0 +1,304 @@
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use anyhow::Error;
|
||||
use appflowy_integrate::{
|
||||
merge_updates_v1, CollabObject, Decode, MsgId, RemoteCollabSnapshot, RemoteCollabState,
|
||||
RemoteCollabStorage, YrsUpdate,
|
||||
};
|
||||
use chrono::{DateTime, Utc};
|
||||
use deadpool_postgres::GenericClient;
|
||||
use futures_util::TryStreamExt;
|
||||
use tokio::task::spawn_blocking;
|
||||
use tokio_postgres::types::ToSql;
|
||||
use tokio_postgres::Row;
|
||||
|
||||
use flowy_error::FlowyError;
|
||||
use lib_infra::async_trait::async_trait;
|
||||
|
||||
use crate::supabase::sql_builder::{
|
||||
DeleteSqlBuilder, InsertSqlBuilder, SelectSqlBuilder, WhereCondition,
|
||||
};
|
||||
use crate::supabase::PostgresServer;
|
||||
|
||||
pub struct PgCollabStorageImpl {
|
||||
server: Arc<PostgresServer>,
|
||||
}
|
||||
|
||||
const AF_COLLAB_KEY_COLUMN: &str = "key";
|
||||
const AF_COLLAB_SNAPSHOT_OID_COLUMN: &str = "oid";
|
||||
const AF_COLLAB_SNAPSHOT_ID_COLUMN: &str = "sid";
|
||||
const AF_COLLAB_SNAPSHOT_BLOB_COLUMN: &str = "blob";
|
||||
const AF_COLLAB_SNAPSHOT_BLOB_SIZE_COLUMN: &str = "blob_size";
|
||||
const AF_COLLAB_SNAPSHOT_CREATED_AT_COLUMN: &str = "created_at";
|
||||
const AF_COLLAB_SNAPSHOT_TABLE: &str = "af_collab_snapshot";
|
||||
|
||||
impl PgCollabStorageImpl {
|
||||
pub fn new(server: Arc<PostgresServer>) -> Self {
|
||||
Self { server }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RemoteCollabStorage for PgCollabStorageImpl {
|
||||
async fn get_all_updates(&self, object_id: &str) -> Result<Vec<Vec<u8>>, Error> {
|
||||
get_updates_from_server(object_id, Arc::downgrade(&self.server)).await
|
||||
}
|
||||
|
||||
async fn get_latest_snapshot(
|
||||
&self,
|
||||
object_id: &str,
|
||||
) -> Result<Option<RemoteCollabSnapshot>, Error> {
|
||||
get_latest_snapshot_from_server(object_id, Arc::downgrade(&self.server)).await
|
||||
}
|
||||
|
||||
async fn get_collab_state(&self, object_id: &str) -> Result<Option<RemoteCollabState>, Error> {
|
||||
let client = self.server.get_pg_client().await.recv().await?;
|
||||
let (sql, params) = SelectSqlBuilder::new("af_collab_state")
|
||||
.column("*")
|
||||
.where_clause("oid", object_id.to_string())
|
||||
.order_by("snapshot_created_at", false)
|
||||
.limit(1)
|
||||
.build();
|
||||
let stmt = client.prepare_cached(&sql).await?;
|
||||
if let Some(row) = client
|
||||
.query_raw(&stmt, params)
|
||||
.await?
|
||||
.try_collect::<Vec<_>>()
|
||||
.await?
|
||||
.first()
|
||||
{
|
||||
let created_at = row.try_get::<&str, DateTime<Utc>>("snapshot_created_at")?;
|
||||
let current_edit_count = row.try_get::<_, i64>("current_edit_count")?;
|
||||
let last_snapshot_edit_count = row.try_get::<_, i64>("snapshot_edit_count")?;
|
||||
|
||||
let state = RemoteCollabState {
|
||||
current_edit_count,
|
||||
last_snapshot_edit_count,
|
||||
last_snapshot_created_at: created_at.timestamp(),
|
||||
};
|
||||
return Ok(Some(state));
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn create_snapshot(&self, object: &CollabObject, snapshot: Vec<u8>) -> Result<i64, Error> {
|
||||
let client = self.server.get_pg_client().await.recv().await?;
|
||||
let value_size = snapshot.len() as i32;
|
||||
let (sql, params) = InsertSqlBuilder::new("af_collab_snapshot")
|
||||
.value(AF_COLLAB_SNAPSHOT_OID_COLUMN, object.id.clone())
|
||||
.value("name", object.name.clone())
|
||||
.value(AF_COLLAB_SNAPSHOT_BLOB_COLUMN, snapshot)
|
||||
.value(AF_COLLAB_SNAPSHOT_BLOB_SIZE_COLUMN, value_size)
|
||||
.returning(AF_COLLAB_SNAPSHOT_ID_COLUMN)
|
||||
.build();
|
||||
let stmt = client.prepare_cached(&sql).await?;
|
||||
|
||||
let all_rows = client
|
||||
.query_raw(&stmt, params)
|
||||
.await?
|
||||
.try_collect::<Vec<_>>()
|
||||
.await?;
|
||||
let row = all_rows
|
||||
.first()
|
||||
.ok_or(anyhow::anyhow!("Create snapshot failed. No row returned"))?;
|
||||
let sid = row.try_get::<&str, i64>(AF_COLLAB_SNAPSHOT_ID_COLUMN)?;
|
||||
return Ok(sid);
|
||||
}
|
||||
|
||||
async fn send_update(
|
||||
&self,
|
||||
object: &CollabObject,
|
||||
_id: MsgId,
|
||||
update: Vec<u8>,
|
||||
) -> Result<(), Error> {
|
||||
let client = self.server.get_pg_client().await.recv().await?;
|
||||
let value_size = update.len() as i32;
|
||||
let (sql, params) = InsertSqlBuilder::new("af_collab")
|
||||
.value("oid", object.id.clone())
|
||||
.value("name", object.name.clone())
|
||||
.value("value", update)
|
||||
.value("value_size", value_size)
|
||||
.build();
|
||||
|
||||
let stmt = client.prepare_cached(&sql).await?;
|
||||
client.execute_raw(&stmt, params).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn send_init_sync(
|
||||
&self,
|
||||
object: &CollabObject,
|
||||
_id: MsgId,
|
||||
init_update: Vec<u8>,
|
||||
) -> Result<(), Error> {
|
||||
let mut client = self.server.get_pg_client().await.recv().await?;
|
||||
let txn = client.transaction().await?;
|
||||
|
||||
// 1.Get all updates
|
||||
let (sql, params) = SelectSqlBuilder::new("af_collab")
|
||||
.column(AF_COLLAB_KEY_COLUMN)
|
||||
.column("value")
|
||||
.order_by(AF_COLLAB_KEY_COLUMN, true)
|
||||
.where_clause("oid", object.id.clone())
|
||||
.build();
|
||||
let get_all_update_stmt = txn.prepare_cached(&sql).await?;
|
||||
let row_stream = txn.query_raw(&get_all_update_stmt, params).await?;
|
||||
let remote_updates = row_stream.try_collect::<Vec<_>>().await?;
|
||||
|
||||
let insert_builder = InsertSqlBuilder::new("af_collab")
|
||||
.value("oid", object.id.clone())
|
||||
.value("name", object.name.clone());
|
||||
|
||||
let (sql, params) = if !remote_updates.is_empty() {
|
||||
let remoted_keys = remote_updates
|
||||
.iter()
|
||||
.map(|row| row.get::<_, i64>(AF_COLLAB_KEY_COLUMN))
|
||||
.collect::<Vec<_>>();
|
||||
let last_row_key = remoted_keys.last().cloned().unwrap();
|
||||
|
||||
// 2.Merge all updates
|
||||
let merged_update =
|
||||
spawn_blocking(move || merge_update_from_rows(remote_updates, init_update)).await??;
|
||||
|
||||
// 3. Delete all updates
|
||||
let (sql, params) = DeleteSqlBuilder::new("af_collab")
|
||||
.where_condition(WhereCondition::Equals(
|
||||
"oid".to_string(),
|
||||
Box::new(object.id.clone()),
|
||||
))
|
||||
.where_condition(WhereCondition::In(
|
||||
AF_COLLAB_KEY_COLUMN.to_string(),
|
||||
remoted_keys
|
||||
.into_iter()
|
||||
.map(|key| Box::new(key) as Box<dyn ToSql + Send + Sync>)
|
||||
.collect::<Vec<_>>(),
|
||||
))
|
||||
.build();
|
||||
let delete_stmt = txn.prepare_cached(&sql).await?;
|
||||
txn.execute_raw(&delete_stmt, params).await?;
|
||||
|
||||
let value_size = merged_update.len() as i32;
|
||||
// Override the key with the last row key in case of concurrent init sync
|
||||
insert_builder
|
||||
.value("value", merged_update)
|
||||
.value("value_size", value_size)
|
||||
.value(AF_COLLAB_KEY_COLUMN, last_row_key)
|
||||
.overriding_system_value()
|
||||
.build()
|
||||
} else {
|
||||
let value_size = init_update.len() as i32;
|
||||
insert_builder
|
||||
.value("value", init_update)
|
||||
.value("value_size", value_size)
|
||||
.build()
|
||||
};
|
||||
|
||||
// 4.Insert the merged update
|
||||
let stmt = txn.prepare_cached(&sql).await?;
|
||||
txn.execute_raw(&stmt, params).await?;
|
||||
|
||||
// 4.commit the transaction
|
||||
txn.commit().await?;
|
||||
tracing::trace!("{} init sync done", object.id);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_updates_from_server(
|
||||
object_id: &str,
|
||||
server: Weak<PostgresServer>,
|
||||
) -> Result<Vec<Vec<u8>>, Error> {
|
||||
match server.upgrade() {
|
||||
None => Ok(vec![]),
|
||||
Some(server) => {
|
||||
let client = server.get_pg_client().await.recv().await?;
|
||||
let (sql, params) = SelectSqlBuilder::new("af_collab")
|
||||
.column("value")
|
||||
.order_by(AF_COLLAB_KEY_COLUMN, true)
|
||||
.where_clause("oid", object_id.to_string())
|
||||
.build();
|
||||
let stmt = client.prepare_cached(&sql).await?;
|
||||
let row_stream = client.query_raw(&stmt, params).await?;
|
||||
Ok(
|
||||
row_stream
|
||||
.try_collect::<Vec<_>>()
|
||||
.await?
|
||||
.into_iter()
|
||||
.flat_map(|row| update_from_row(row).ok())
|
||||
.collect(),
|
||||
)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_latest_snapshot_from_server(
|
||||
object_id: &str,
|
||||
server: Weak<PostgresServer>,
|
||||
) -> Result<Option<RemoteCollabSnapshot>, Error> {
|
||||
match server.upgrade() {
|
||||
None => Ok(None),
|
||||
Some(server) => {
|
||||
let client = server.get_pg_client().await.recv().await?;
|
||||
let (sql, params) = SelectSqlBuilder::new(AF_COLLAB_SNAPSHOT_TABLE)
|
||||
.column(AF_COLLAB_SNAPSHOT_ID_COLUMN)
|
||||
.column(AF_COLLAB_SNAPSHOT_BLOB_COLUMN)
|
||||
.column(AF_COLLAB_SNAPSHOT_CREATED_AT_COLUMN)
|
||||
.order_by(AF_COLLAB_SNAPSHOT_ID_COLUMN, false)
|
||||
.limit(1)
|
||||
.where_clause(AF_COLLAB_SNAPSHOT_OID_COLUMN, object_id.to_string())
|
||||
.build();
|
||||
|
||||
let stmt = client.prepare_cached(&sql).await?;
|
||||
let all_rows = client
|
||||
.query_raw(&stmt, params)
|
||||
.await?
|
||||
.try_collect::<Vec<_>>()
|
||||
.await?;
|
||||
|
||||
let row = all_rows.first().ok_or(anyhow::anyhow!(
|
||||
"Get latest snapshot failed. No row returned"
|
||||
))?;
|
||||
let snapshot_id = row.try_get::<_, i64>(AF_COLLAB_SNAPSHOT_ID_COLUMN)?;
|
||||
let update = row.try_get::<_, Vec<u8>>(AF_COLLAB_SNAPSHOT_BLOB_COLUMN)?;
|
||||
let created_at = row
|
||||
.try_get::<_, DateTime<Utc>>(AF_COLLAB_SNAPSHOT_CREATED_AT_COLUMN)?
|
||||
.timestamp();
|
||||
|
||||
Ok(Some(RemoteCollabSnapshot {
|
||||
snapshot_id,
|
||||
oid: object_id.to_string(),
|
||||
data: update,
|
||||
created_at,
|
||||
}))
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn update_from_row(row: Row) -> Result<Vec<u8>, FlowyError> {
|
||||
row
|
||||
.try_get::<_, Vec<u8>>("value")
|
||||
.map_err(|e| FlowyError::internal().context(format!("Failed to get value from row: {}", e)))
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn decode_update_from_row(row: Row) -> Result<YrsUpdate, FlowyError> {
|
||||
let update = update_from_row(row)?;
|
||||
YrsUpdate::decode_v1(&update).map_err(|_| FlowyError::internal().context("Invalid yrs update"))
|
||||
}
|
||||
|
||||
fn merge_update_from_rows(rows: Vec<Row>, new_update: Vec<u8>) -> Result<Vec<u8>, FlowyError> {
|
||||
let mut updates = vec![];
|
||||
for row in rows {
|
||||
let update = update_from_row(row)?;
|
||||
updates.push(update);
|
||||
}
|
||||
updates.push(new_update);
|
||||
|
||||
let updates = updates
|
||||
.iter()
|
||||
.map(|update| update.as_ref())
|
||||
.collect::<Vec<&[u8]>>();
|
||||
|
||||
merge_updates_v1(&updates).map_err(|_| FlowyError::internal().context("Failed to merge updates"))
|
||||
}
|
@ -0,0 +1,55 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use tokio::sync::oneshot::channel;
|
||||
|
||||
use flowy_database2::deps::{DatabaseCloudService, DatabaseSnapshot};
|
||||
use flowy_error::{internal_error, FlowyError};
|
||||
use lib_infra::future::FutureResult;
|
||||
|
||||
use crate::supabase::impls::{get_latest_snapshot_from_server, get_updates_from_server};
|
||||
use crate::supabase::PostgresServer;
|
||||
|
||||
pub(crate) struct SupabaseDatabaseCloudServiceImpl {
|
||||
server: Arc<PostgresServer>,
|
||||
}
|
||||
|
||||
impl SupabaseDatabaseCloudServiceImpl {
|
||||
pub fn new(server: Arc<PostgresServer>) -> Self {
|
||||
Self { server }
|
||||
}
|
||||
}
|
||||
|
||||
impl DatabaseCloudService for SupabaseDatabaseCloudServiceImpl {
|
||||
fn get_database_updates(&self, database_id: &str) -> FutureResult<Vec<Vec<u8>>, FlowyError> {
|
||||
let server = Arc::downgrade(&self.server);
|
||||
let (tx, rx) = channel();
|
||||
let database_id = database_id.to_string();
|
||||
tokio::spawn(async move { tx.send(get_updates_from_server(&database_id, server).await) });
|
||||
FutureResult::new(async { rx.await.map_err(internal_error)?.map_err(internal_error) })
|
||||
}
|
||||
|
||||
fn get_database_latest_snapshot(
|
||||
&self,
|
||||
database_id: &str,
|
||||
) -> FutureResult<Option<DatabaseSnapshot>, FlowyError> {
|
||||
let server = Arc::downgrade(&self.server);
|
||||
let (tx, rx) = channel();
|
||||
let database_id = database_id.to_string();
|
||||
tokio::spawn(
|
||||
async move { tx.send(get_latest_snapshot_from_server(&database_id, server).await) },
|
||||
);
|
||||
FutureResult::new(async {
|
||||
Ok(
|
||||
rx.await
|
||||
.map_err(internal_error)?
|
||||
.map_err(internal_error)?
|
||||
.map(|snapshot| DatabaseSnapshot {
|
||||
snapshot_id: snapshot.snapshot_id,
|
||||
database_id: snapshot.oid,
|
||||
data: snapshot.data,
|
||||
created_at: snapshot.created_at,
|
||||
}),
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
@ -0,0 +1,58 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use tokio::sync::oneshot::channel;
|
||||
|
||||
use flowy_document2::deps::{DocumentCloudService, DocumentSnapshot};
|
||||
use flowy_error::{internal_error, FlowyError};
|
||||
use lib_infra::future::FutureResult;
|
||||
|
||||
use crate::supabase::impls::{get_latest_snapshot_from_server, get_updates_from_server};
|
||||
use crate::supabase::PostgresServer;
|
||||
|
||||
pub(crate) struct SupabaseDocumentCloudServiceImpl {
|
||||
server: Arc<PostgresServer>,
|
||||
}
|
||||
|
||||
impl SupabaseDocumentCloudServiceImpl {
|
||||
pub fn new(server: Arc<PostgresServer>) -> Self {
|
||||
Self { server }
|
||||
}
|
||||
}
|
||||
|
||||
impl DocumentCloudService for SupabaseDocumentCloudServiceImpl {
|
||||
fn get_document_updates(&self, document_id: &str) -> FutureResult<Vec<Vec<u8>>, FlowyError> {
|
||||
let server = Arc::downgrade(&self.server);
|
||||
let (tx, rx) = channel();
|
||||
let document_id = document_id.to_string();
|
||||
tokio::spawn(async move { tx.send(get_updates_from_server(&document_id, server).await) });
|
||||
FutureResult::new(async { rx.await.map_err(internal_error)?.map_err(internal_error) })
|
||||
}
|
||||
|
||||
fn get_document_latest_snapshot(
|
||||
&self,
|
||||
document_id: &str,
|
||||
) -> FutureResult<Option<DocumentSnapshot>, FlowyError> {
|
||||
let server = Arc::downgrade(&self.server);
|
||||
let (tx, rx) = channel();
|
||||
let document_id = document_id.to_string();
|
||||
tokio::spawn(
|
||||
async move { tx.send(get_latest_snapshot_from_server(&document_id, server).await) },
|
||||
);
|
||||
|
||||
FutureResult::new(async {
|
||||
{
|
||||
Ok(
|
||||
rx.await
|
||||
.map_err(internal_error)?
|
||||
.map_err(internal_error)?
|
||||
.map(|snapshot| DocumentSnapshot {
|
||||
snapshot_id: snapshot.snapshot_id,
|
||||
document_id: snapshot.oid,
|
||||
data: snapshot.data,
|
||||
created_at: snapshot.created_at,
|
||||
}),
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -1,54 +1,184 @@
|
||||
use crate::supabase::request::create_workspace_with_uid;
|
||||
use flowy_error::FlowyError;
|
||||
use flowy_folder2::deps::{FolderCloudService, Workspace};
|
||||
use lib_infra::future::FutureResult;
|
||||
use postgrest::Postgrest;
|
||||
use std::sync::Arc;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use futures_util::{pin_mut, StreamExt};
|
||||
use tokio::sync::oneshot::channel;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::supabase::impls::{get_latest_snapshot_from_server, get_updates_from_server};
|
||||
use flowy_error::{internal_error, ErrorCode, FlowyError};
|
||||
use flowy_folder2::deps::{FolderCloudService, FolderSnapshot, Workspace};
|
||||
use lib_infra::future::FutureResult;
|
||||
|
||||
use crate::supabase::pg_db::PostgresObject;
|
||||
use crate::supabase::sql_builder::{InsertSqlBuilder, SelectSqlBuilder};
|
||||
use crate::supabase::PostgresServer;
|
||||
|
||||
pub(crate) const WORKSPACE_TABLE: &str = "af_workspace";
|
||||
pub(crate) const WORKSPACE_NAME_COLUMN: &str = "workspace_name";
|
||||
pub(crate) const WORKSPACE_ID: &str = "workspace_id";
|
||||
const WORKSPACE_NAME: &str = "workspace_name";
|
||||
const CREATED_AT: &str = "created_at";
|
||||
|
||||
pub(crate) struct SupabaseFolderCloudServiceImpl {
|
||||
postgrest: Arc<Postgrest>,
|
||||
server: Arc<PostgresServer>,
|
||||
}
|
||||
|
||||
impl SupabaseFolderCloudServiceImpl {
|
||||
pub fn new(server: Arc<PostgresServer>) -> Self {
|
||||
Self { server }
|
||||
}
|
||||
}
|
||||
|
||||
impl FolderCloudService for SupabaseFolderCloudServiceImpl {
|
||||
fn create_workspace(&self, uid: i64, name: &str) -> FutureResult<Workspace, FlowyError> {
|
||||
let server = self.server.clone();
|
||||
let (tx, rx) = channel();
|
||||
let name = name.to_string();
|
||||
let postgrest = self.postgrest.clone();
|
||||
FutureResult::new(async move { create_workspace_with_uid(postgrest, uid, &name).await })
|
||||
tokio::spawn(async move {
|
||||
tx.send(
|
||||
async move {
|
||||
let client = server.get_pg_client().await.recv().await?;
|
||||
create_workspace(&client, uid, &name).await
|
||||
}
|
||||
.await,
|
||||
)
|
||||
});
|
||||
FutureResult::new(async { rx.await.map_err(internal_error)? })
|
||||
}
|
||||
|
||||
fn get_folder_latest_snapshot(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
) -> FutureResult<Option<FolderSnapshot>, FlowyError> {
|
||||
let server = Arc::downgrade(&self.server);
|
||||
let workspace_id = workspace_id.to_string();
|
||||
let (tx, rx) = channel();
|
||||
tokio::spawn(
|
||||
async move { tx.send(get_latest_snapshot_from_server(&workspace_id, server).await) },
|
||||
);
|
||||
FutureResult::new(async {
|
||||
Ok(
|
||||
rx.await
|
||||
.map_err(internal_error)?
|
||||
.map_err(internal_error)?
|
||||
.map(|snapshot| FolderSnapshot {
|
||||
snapshot_id: snapshot.snapshot_id,
|
||||
database_id: snapshot.oid,
|
||||
data: snapshot.data,
|
||||
created_at: snapshot.created_at,
|
||||
}),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
fn get_folder_updates(&self, workspace_id: &str) -> FutureResult<Vec<Vec<u8>>, FlowyError> {
|
||||
let server = Arc::downgrade(&self.server);
|
||||
let (tx, rx) = channel();
|
||||
let workspace_id = workspace_id.to_string();
|
||||
tokio::spawn(async move { tx.send(get_updates_from_server(&workspace_id, server).await) });
|
||||
FutureResult::new(async { rx.await.map_err(internal_error)?.map_err(internal_error) })
|
||||
}
|
||||
}
|
||||
|
||||
async fn create_workspace(
|
||||
client: &PostgresObject,
|
||||
uid: i64,
|
||||
name: &str,
|
||||
) -> Result<Workspace, FlowyError> {
|
||||
let new_workspace_id = Uuid::new_v4();
|
||||
|
||||
// Create workspace
|
||||
let (sql, params) = InsertSqlBuilder::new(WORKSPACE_TABLE)
|
||||
.value("uid", uid)
|
||||
.value(WORKSPACE_ID, new_workspace_id)
|
||||
.value(WORKSPACE_NAME, name.to_string())
|
||||
.build();
|
||||
let stmt = client
|
||||
.prepare_cached(&sql)
|
||||
.await
|
||||
.map_err(|e| FlowyError::new(ErrorCode::PgDatabaseError, e))?;
|
||||
client
|
||||
.execute_raw(&stmt, params)
|
||||
.await
|
||||
.map_err(|e| FlowyError::new(ErrorCode::PgDatabaseError, e))?;
|
||||
|
||||
// Read the workspace
|
||||
let (sql, params) = SelectSqlBuilder::new(WORKSPACE_TABLE)
|
||||
.column(WORKSPACE_ID)
|
||||
.column(WORKSPACE_NAME)
|
||||
.column(CREATED_AT)
|
||||
.where_clause(WORKSPACE_ID, new_workspace_id)
|
||||
.build();
|
||||
let stmt = client
|
||||
.prepare_cached(&sql)
|
||||
.await
|
||||
.map_err(|e| FlowyError::new(ErrorCode::PgDatabaseError, e))?;
|
||||
|
||||
let rows = Box::pin(
|
||||
client
|
||||
.query_raw(&stmt, params)
|
||||
.await
|
||||
.map_err(|e| FlowyError::new(ErrorCode::PgDatabaseError, e))?,
|
||||
);
|
||||
pin_mut!(rows);
|
||||
|
||||
if let Some(Ok(row)) = rows.next().await {
|
||||
let created_at = row
|
||||
.try_get::<&str, DateTime<Utc>>(CREATED_AT)
|
||||
.unwrap_or_default();
|
||||
let workspace_id: Uuid = row.get(WORKSPACE_ID);
|
||||
|
||||
Ok(Workspace {
|
||||
id: workspace_id.to_string(),
|
||||
name: row.get(WORKSPACE_NAME),
|
||||
child_views: Default::default(),
|
||||
created_at: created_at.timestamp(),
|
||||
})
|
||||
} else {
|
||||
Err(FlowyError::new(
|
||||
ErrorCode::PgDatabaseError,
|
||||
"Create workspace failed",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::supabase::request::{
|
||||
create_user_with_uuid, create_workspace_with_uid, get_user_workspace_with_uid,
|
||||
};
|
||||
use crate::supabase::{SupabaseConfiguration, SupabaseServer};
|
||||
use dotenv::dotenv;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use uuid::Uuid;
|
||||
|
||||
use flowy_folder2::deps::FolderCloudService;
|
||||
use flowy_user::event_map::UserAuthService;
|
||||
use lib_infra::box_any::BoxAny;
|
||||
|
||||
use crate::supabase::impls::folder::SupabaseFolderCloudServiceImpl;
|
||||
use crate::supabase::impls::SupabaseUserAuthServiceImpl;
|
||||
use crate::supabase::{PostgresConfiguration, PostgresServer};
|
||||
|
||||
#[tokio::test]
|
||||
async fn create_user_workspace() {
|
||||
dotenv().ok();
|
||||
if let Ok(config) = SupabaseConfiguration::from_env() {
|
||||
let server = Arc::new(SupabaseServer::new(config));
|
||||
let uuid = uuid::Uuid::new_v4();
|
||||
let uid = create_user_with_uuid(server.postgres.clone(), uuid.to_string())
|
||||
.await
|
||||
.unwrap()
|
||||
.uid;
|
||||
|
||||
create_workspace_with_uid(server.postgres.clone(), uid, "test")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let workspaces = get_user_workspace_with_uid(server.postgres.clone(), uid)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(workspaces.len(), 2);
|
||||
assert_eq!(workspaces[0].name, "My workspace");
|
||||
assert_eq!(workspaces[1].name, "test");
|
||||
if dotenv::from_filename("./.env.workspace.test").is_err() {
|
||||
return;
|
||||
}
|
||||
let server = Arc::new(PostgresServer::new(
|
||||
PostgresConfiguration::from_env().unwrap(),
|
||||
));
|
||||
let user_service = SupabaseUserAuthServiceImpl::new(server.clone());
|
||||
|
||||
// create user
|
||||
let mut params = HashMap::new();
|
||||
params.insert("uuid".to_string(), Uuid::new_v4().to_string());
|
||||
let user = user_service.sign_up(BoxAny::new(params)).await.unwrap();
|
||||
|
||||
// create workspace
|
||||
let folder_service = SupabaseFolderCloudServiceImpl::new(server);
|
||||
let workspace = folder_service
|
||||
.create_workspace(user.user_id, "my test workspace")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(workspace.name, "my test workspace");
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,11 @@
|
||||
pub use collab_storage::*;
|
||||
pub(crate) use database::*;
|
||||
pub(crate) use document::*;
|
||||
pub(crate) use folder::*;
|
||||
pub use user::*;
|
||||
|
||||
mod collab_storage;
|
||||
mod database;
|
||||
mod document;
|
||||
mod folder;
|
||||
mod user;
|
||||
|
||||
pub(crate) use folder::*;
|
||||
pub(crate) use user::*;
|
||||
|
@ -1,54 +1,75 @@
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use postgrest::Postgrest;
|
||||
use deadpool_postgres::GenericClient;
|
||||
use futures::pin_mut;
|
||||
use futures_util::StreamExt;
|
||||
use tokio::sync::oneshot::channel;
|
||||
use tokio_postgres::error::SqlState;
|
||||
use uuid::Uuid;
|
||||
|
||||
use flowy_error::FlowyError;
|
||||
use flowy_error::{internal_error, ErrorCode, FlowyError};
|
||||
use flowy_user::entities::{SignInResponse, SignUpResponse, UpdateUserProfileParams, UserProfile};
|
||||
use flowy_user::event_map::UserAuthService;
|
||||
use flowy_user::event_map::{UserAuthService, UserCredentials};
|
||||
use lib_infra::box_any::BoxAny;
|
||||
use lib_infra::future::FutureResult;
|
||||
|
||||
use crate::supabase::request::*;
|
||||
use crate::supabase::entities::{GetUserProfileParams, UserProfileResponse};
|
||||
use crate::supabase::pg_db::PostgresObject;
|
||||
use crate::supabase::sql_builder::{SelectSqlBuilder, UpdateSqlBuilder};
|
||||
use crate::supabase::PostgresServer;
|
||||
use crate::util::uuid_from_box_any;
|
||||
|
||||
pub(crate) const USER_TABLE: &str = "af_user";
|
||||
pub(crate) const USER_PROFILE_TABLE: &str = "af_user_profile";
|
||||
#[allow(dead_code)]
|
||||
pub(crate) const USER_WORKSPACE_TABLE: &str = "af_workspace";
|
||||
pub(crate) struct PostgrestUserAuthServiceImpl {
|
||||
postgrest: Arc<Postgrest>,
|
||||
pub const USER_UUID: &str = "uuid";
|
||||
|
||||
pub struct SupabaseUserAuthServiceImpl {
|
||||
server: Arc<PostgresServer>,
|
||||
}
|
||||
|
||||
impl PostgrestUserAuthServiceImpl {
|
||||
pub(crate) fn new(postgrest: Arc<Postgrest>) -> Self {
|
||||
Self { postgrest }
|
||||
impl SupabaseUserAuthServiceImpl {
|
||||
pub fn new(server: Arc<PostgresServer>) -> Self {
|
||||
Self { server }
|
||||
}
|
||||
}
|
||||
|
||||
impl UserAuthService for PostgrestUserAuthServiceImpl {
|
||||
impl UserAuthService for SupabaseUserAuthServiceImpl {
|
||||
fn sign_up(&self, params: BoxAny) -> FutureResult<SignUpResponse, FlowyError> {
|
||||
let postgrest = self.postgrest.clone();
|
||||
FutureResult::new(async move {
|
||||
let uuid = uuid_from_box_any(params)?;
|
||||
let user = create_user_with_uuid(postgrest, uuid).await?;
|
||||
Ok(SignUpResponse {
|
||||
user_id: user.uid,
|
||||
workspace_id: user.workspace_id,
|
||||
..Default::default()
|
||||
})
|
||||
})
|
||||
let server = self.server.clone();
|
||||
let (tx, rx) = channel();
|
||||
tokio::spawn(async move {
|
||||
tx.send(
|
||||
async {
|
||||
let client = server.get_pg_client().await.recv().await?;
|
||||
let uuid = uuid_from_box_any(params)?;
|
||||
create_user_with_uuid(&client, uuid).await
|
||||
}
|
||||
.await,
|
||||
)
|
||||
});
|
||||
FutureResult::new(async { rx.await.map_err(internal_error)? })
|
||||
}
|
||||
|
||||
fn sign_in(&self, params: BoxAny) -> FutureResult<SignInResponse, FlowyError> {
|
||||
let postgrest = self.postgrest.clone();
|
||||
FutureResult::new(async move {
|
||||
let uuid = uuid_from_box_any(params)?;
|
||||
let user_profile = get_user_profile(postgrest, GetUserProfileParams::Uuid(uuid)).await?;
|
||||
Ok(SignInResponse {
|
||||
user_id: user_profile.uid,
|
||||
workspace_id: user_profile.workspace_id,
|
||||
..Default::default()
|
||||
})
|
||||
})
|
||||
let server = self.server.clone();
|
||||
let (tx, rx) = channel();
|
||||
tokio::spawn(async move {
|
||||
tx.send(
|
||||
async {
|
||||
let client = server.get_pg_client().await.recv().await?;
|
||||
let uuid = uuid_from_box_any(params)?;
|
||||
let user_profile = get_user_profile(&client, GetUserProfileParams::Uuid(uuid)).await?;
|
||||
Ok(SignInResponse {
|
||||
user_id: user_profile.uid,
|
||||
workspace_id: user_profile.workspace_id,
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
.await,
|
||||
)
|
||||
});
|
||||
FutureResult::new(async { rx.await.map_err(internal_error)? })
|
||||
}
|
||||
|
||||
fn sign_out(&self, _token: Option<String>) -> FutureResult<(), FlowyError> {
|
||||
@ -57,111 +78,222 @@ impl UserAuthService for PostgrestUserAuthServiceImpl {
|
||||
|
||||
fn update_user(
|
||||
&self,
|
||||
_uid: i64,
|
||||
_token: &Option<String>,
|
||||
_credential: UserCredentials,
|
||||
params: UpdateUserProfileParams,
|
||||
) -> FutureResult<(), FlowyError> {
|
||||
let postgrest = self.postgrest.clone();
|
||||
FutureResult::new(async move {
|
||||
let _ = update_user_profile(postgrest, params).await?;
|
||||
Ok(())
|
||||
})
|
||||
let server = self.server.clone();
|
||||
let (tx, rx) = channel();
|
||||
tokio::spawn(async move {
|
||||
tx.send(
|
||||
async move {
|
||||
let client = server.get_pg_client().await.recv().await?;
|
||||
update_user_profile(&client, params).await
|
||||
}
|
||||
.await,
|
||||
)
|
||||
});
|
||||
FutureResult::new(async { rx.await.map_err(internal_error)? })
|
||||
}
|
||||
|
||||
fn get_user_profile(
|
||||
&self,
|
||||
_token: Option<String>,
|
||||
uid: i64,
|
||||
credential: UserCredentials,
|
||||
) -> FutureResult<Option<UserProfile>, FlowyError> {
|
||||
let postgrest = self.postgrest.clone();
|
||||
FutureResult::new(async move {
|
||||
let user_profile_resp = get_user_profile(postgrest, GetUserProfileParams::Uid(uid)).await?;
|
||||
|
||||
let profile = UserProfile {
|
||||
id: user_profile_resp.uid,
|
||||
email: user_profile_resp.email,
|
||||
name: user_profile_resp.name,
|
||||
token: "".to_string(),
|
||||
icon_url: "".to_string(),
|
||||
openai_key: "".to_string(),
|
||||
workspace_id: user_profile_resp.workspace_id,
|
||||
};
|
||||
|
||||
Ok(Some(profile))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use dotenv::dotenv;
|
||||
|
||||
use flowy_user::entities::UpdateUserProfileParams;
|
||||
|
||||
use crate::supabase::request::{
|
||||
create_user_with_uuid, get_user_id_with_uuid, get_user_profile, get_user_workspace_with_uid,
|
||||
update_user_profile, GetUserProfileParams,
|
||||
};
|
||||
use crate::supabase::{SupabaseConfiguration, SupabaseServer};
|
||||
|
||||
#[tokio::test]
|
||||
async fn read_user_table_test() {
|
||||
dotenv().ok();
|
||||
if let Ok(config) = SupabaseConfiguration::from_env() {
|
||||
let server = Arc::new(SupabaseServer::new(config));
|
||||
let uid = get_user_id_with_uuid(
|
||||
server.postgres.clone(),
|
||||
"c8c674fc-506f-403c-b052-209e09817f6e".to_string(),
|
||||
let server = self.server.clone();
|
||||
let (tx, rx) = channel();
|
||||
tokio::spawn(async move {
|
||||
tx.send(
|
||||
async move {
|
||||
let client = server.get_pg_client().await.recv().await?;
|
||||
let uid = credential
|
||||
.uid
|
||||
.ok_or(FlowyError::new(ErrorCode::InvalidParams, "uid is required"))?;
|
||||
let user_profile = get_user_profile(&client, GetUserProfileParams::Uid(uid))
|
||||
.await
|
||||
.ok()
|
||||
.map(|user_profile| UserProfile {
|
||||
id: user_profile.uid,
|
||||
email: user_profile.email,
|
||||
name: user_profile.name,
|
||||
token: "".to_string(),
|
||||
icon_url: "".to_string(),
|
||||
openai_key: "".to_string(),
|
||||
workspace_id: user_profile.workspace_id,
|
||||
});
|
||||
Ok(user_profile)
|
||||
}
|
||||
.await,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
println!("uid: {:?}", uid);
|
||||
}
|
||||
});
|
||||
FutureResult::new(async { rx.await.map_err(internal_error)? })
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn insert_user_table_test() {
|
||||
dotenv().ok();
|
||||
if let Ok(config) = SupabaseConfiguration::from_env() {
|
||||
let server = Arc::new(SupabaseServer::new(config));
|
||||
let uuid = uuid::Uuid::new_v4();
|
||||
// let uuid = "c8c674fc-506f-403c-b052-209e09817f6e";
|
||||
let uid = create_user_with_uuid(server.postgres.clone(), uuid.to_string()).await;
|
||||
println!("uid: {:?}", uid);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn create_and_then_update_user_profile_test() {
|
||||
dotenv().ok();
|
||||
if let Ok(config) = SupabaseConfiguration::from_env() {
|
||||
let server = Arc::new(SupabaseServer::new(config));
|
||||
let uuid = uuid::Uuid::new_v4();
|
||||
let uid = create_user_with_uuid(server.postgres.clone(), uuid.to_string())
|
||||
.await
|
||||
.unwrap()
|
||||
.uid;
|
||||
let params = UpdateUserProfileParams {
|
||||
id: uid,
|
||||
name: Some("nathan".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
let result = update_user_profile(server.postgres.clone(), params)
|
||||
.await
|
||||
.unwrap();
|
||||
println!("result: {:?}", result);
|
||||
|
||||
let result = get_user_profile(server.postgres.clone(), GetUserProfileParams::Uid(uid))
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(result.name, "nathan".to_string());
|
||||
|
||||
let result = get_user_workspace_with_uid(server.postgres.clone(), uid)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!result.is_empty());
|
||||
}
|
||||
fn check_user(&self, credential: UserCredentials) -> FutureResult<(), FlowyError> {
|
||||
let uuid = credential.uuid.and_then(|uuid| Uuid::from_str(&uuid).ok());
|
||||
let server = self.server.clone();
|
||||
let (tx, rx) = channel();
|
||||
tokio::spawn(async move {
|
||||
tx.send(
|
||||
async move {
|
||||
let client = server.get_pg_client().await.recv().await?;
|
||||
check_user(&client, credential.uid, uuid).await
|
||||
}
|
||||
.await,
|
||||
)
|
||||
});
|
||||
FutureResult::new(async { rx.await.map_err(internal_error)? })
|
||||
}
|
||||
}
|
||||
|
||||
async fn create_user_with_uuid(
|
||||
client: &PostgresObject,
|
||||
uuid: Uuid,
|
||||
) -> Result<SignUpResponse, FlowyError> {
|
||||
let mut is_new = true;
|
||||
if let Err(e) = client
|
||||
.execute(
|
||||
&format!("INSERT INTO {} (uuid) VALUES ($1);", USER_TABLE),
|
||||
&[&uuid],
|
||||
)
|
||||
.await
|
||||
{
|
||||
if let Some(code) = e.code() {
|
||||
if code == &SqlState::UNIQUE_VIOLATION {
|
||||
is_new = false;
|
||||
} else {
|
||||
return Err(FlowyError::new(ErrorCode::PgDatabaseError, e));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let user_profile = get_user_profile(client, GetUserProfileParams::Uuid(uuid)).await?;
|
||||
Ok(SignUpResponse {
|
||||
user_id: user_profile.uid,
|
||||
name: user_profile.name,
|
||||
workspace_id: user_profile.workspace_id,
|
||||
is_new,
|
||||
email: Some(user_profile.email),
|
||||
token: None,
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_user_profile(
|
||||
client: &PostgresObject,
|
||||
params: GetUserProfileParams,
|
||||
) -> Result<UserProfileResponse, FlowyError> {
|
||||
let rows = match params {
|
||||
GetUserProfileParams::Uid(uid) => {
|
||||
let stmt = client
|
||||
.prepare_cached(&format!(
|
||||
"SELECT * FROM {} WHERE uid = $1",
|
||||
USER_PROFILE_TABLE
|
||||
))
|
||||
.await
|
||||
.map_err(|e| FlowyError::new(ErrorCode::PgDatabaseError, e))?;
|
||||
|
||||
client
|
||||
.query(&stmt, &[&uid])
|
||||
.await
|
||||
.map_err(|e| FlowyError::new(ErrorCode::PgDatabaseError, e))?
|
||||
},
|
||||
GetUserProfileParams::Uuid(uuid) => {
|
||||
let stmt = client
|
||||
.prepare_cached(&format!(
|
||||
"SELECT * FROM {} WHERE uuid = $1",
|
||||
USER_PROFILE_TABLE
|
||||
))
|
||||
.await
|
||||
.map_err(|e| FlowyError::new(ErrorCode::PgDatabaseError, e))?;
|
||||
|
||||
client
|
||||
.query(&stmt, &[&uuid])
|
||||
.await
|
||||
.map_err(|e| FlowyError::new(ErrorCode::PgDatabaseError, e))?
|
||||
},
|
||||
};
|
||||
|
||||
let mut user_profiles = rows
|
||||
.into_iter()
|
||||
.map(UserProfileResponse::from)
|
||||
.collect::<Vec<_>>();
|
||||
if user_profiles.is_empty() {
|
||||
Err(FlowyError::record_not_found())
|
||||
} else {
|
||||
Ok(user_profiles.remove(0))
|
||||
}
|
||||
}
|
||||
|
||||
async fn update_user_profile(
|
||||
client: &PostgresObject,
|
||||
params: UpdateUserProfileParams,
|
||||
) -> Result<(), FlowyError> {
|
||||
if params.is_empty() {
|
||||
return Err(FlowyError::new(
|
||||
ErrorCode::InvalidParams,
|
||||
format!("Update user profile params is empty: {:?}", params),
|
||||
));
|
||||
}
|
||||
let (sql, pg_params) = UpdateSqlBuilder::new(USER_PROFILE_TABLE)
|
||||
.set("name", params.name)
|
||||
.set("email", params.email)
|
||||
.where_clause("uid", params.id)
|
||||
.build();
|
||||
|
||||
let stmt = client.prepare_cached(&sql).await.map_err(|e| {
|
||||
FlowyError::new(
|
||||
ErrorCode::PgDatabaseError,
|
||||
format!("Prepare update user profile sql error: {}", e),
|
||||
)
|
||||
})?;
|
||||
|
||||
let affect_rows = client
|
||||
.execute_raw(&stmt, pg_params)
|
||||
.await
|
||||
.map_err(|e| FlowyError::new(ErrorCode::PgDatabaseError, e))?;
|
||||
tracing::trace!("Update user profile affect rows: {}", affect_rows);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn check_user(
|
||||
client: &PostgresObject,
|
||||
uid: Option<i64>,
|
||||
uuid: Option<Uuid>,
|
||||
) -> Result<(), FlowyError> {
|
||||
if uid.is_none() && uuid.is_none() {
|
||||
return Err(FlowyError::new(
|
||||
ErrorCode::InvalidParams,
|
||||
"uid and uuid can't be both empty",
|
||||
));
|
||||
}
|
||||
|
||||
let (sql, params) = match uid {
|
||||
None => SelectSqlBuilder::new(USER_TABLE)
|
||||
.where_clause("uuid", uuid.unwrap())
|
||||
.build(),
|
||||
Some(uid) => SelectSqlBuilder::new(USER_TABLE)
|
||||
.where_clause("uid", uid)
|
||||
.build(),
|
||||
};
|
||||
|
||||
let stmt = client
|
||||
.prepare_cached(&sql)
|
||||
.await
|
||||
.map_err(|e| FlowyError::new(ErrorCode::PgDatabaseError, e))?;
|
||||
let rows = Box::pin(
|
||||
client
|
||||
.query_raw(&stmt, params)
|
||||
.await
|
||||
.map_err(|e| FlowyError::new(ErrorCode::PgDatabaseError, e))?,
|
||||
);
|
||||
pin_mut!(rows);
|
||||
|
||||
// TODO(nathan): would it be better to use token.
|
||||
if rows.next().await.is_some() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(FlowyError::new(
|
||||
ErrorCode::UserNotExist,
|
||||
"Can't find the user in pg database",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
100
frontend/rust-lib/flowy-server/src/supabase/migration.rs
Normal file
100
frontend/rust-lib/flowy-server/src/supabase/migration.rs
Normal file
@ -0,0 +1,100 @@
|
||||
use refinery::embed_migrations;
|
||||
use tokio_postgres::Client;
|
||||
|
||||
embed_migrations!("./src/supabase/migrations");
|
||||
|
||||
const AF_MIGRATION_HISTORY: &str = "af_migration_history";
|
||||
|
||||
pub(crate) async fn run_migrations(client: &mut Client) -> Result<(), anyhow::Error> {
|
||||
match migrations::runner()
|
||||
.set_migration_table_name(AF_MIGRATION_HISTORY)
|
||||
.run_async(client)
|
||||
.await
|
||||
{
|
||||
Ok(report) => {
|
||||
if !report.applied_migrations().is_empty() {
|
||||
tracing::info!("Run postgres db migration: {:?}", report);
|
||||
}
|
||||
Ok(())
|
||||
},
|
||||
Err(e) => {
|
||||
tracing::error!("postgres db migration error: {}", e);
|
||||
Err(anyhow::anyhow!("postgres db migration error: {}", e))
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Drop all tables and dependencies defined in the v1_initial_up.sql.
|
||||
/// Be careful when using this function. It will drop all tables and dependencies.
|
||||
/// Mostly used for testing.
|
||||
#[allow(dead_code)]
|
||||
#[cfg(debug_assertions)]
|
||||
pub(crate) async fn run_initial_drop(client: &Client) {
|
||||
// let sql = include_str!("migrations/initial/initial_down.sql");
|
||||
let sql = r#"DROP TABLE IF EXISTS af_user;
|
||||
DROP TABLE IF EXISTS af_workspace;
|
||||
DROP TABLE IF EXISTS af_user_profile;
|
||||
DROP TABLE IF EXISTS af_collab;
|
||||
DROP VIEW IF EXISTS af_collab_state;
|
||||
DROP TABLE IF EXISTS af_collab_snapshot;
|
||||
DROP TABLE IF EXISTS af_collab_statistics;
|
||||
|
||||
DROP TRIGGER IF EXISTS create_af_user_profile_trigger ON af_user_profile CASCADE;
|
||||
DROP FUNCTION IF EXISTS create_af_user_profile_trigger_func;
|
||||
|
||||
DROP TRIGGER IF EXISTS create_af_workspace_trigger ON af_workspace CASCADE;
|
||||
DROP FUNCTION IF EXISTS create_af_workspace_trigger_func;
|
||||
|
||||
DROP TRIGGER IF EXISTS af_collab_insert_trigger ON af_collab CASCADE;
|
||||
DROP FUNCTION IF EXISTS increment_af_collab_update_count;
|
||||
|
||||
DROP TRIGGER IF EXISTS af_collab_snapshot_update_edit_count_trigger ON af_collab_snapshot;
|
||||
DROP FUNCTION IF EXISTS af_collab_snapshot_update_edit_count;
|
||||
|
||||
DROP TRIGGER IF EXISTS check_and_delete_snapshots_trigger ON af_collab_snapshot CASCADE;
|
||||
DROP FUNCTION IF EXISTS check_and_delete_snapshots;
|
||||
"#;
|
||||
client.batch_execute(sql).await.unwrap();
|
||||
client
|
||||
.batch_execute("DROP TABLE IF EXISTS af_migration_history")
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use tokio_postgres::NoTls;
|
||||
|
||||
use crate::supabase::migration::run_initial_drop;
|
||||
use crate::supabase::*;
|
||||
|
||||
// ‼️‼️‼️ Warning: this test will create a table in the database
|
||||
#[tokio::test]
|
||||
async fn test_postgres_db() -> Result<(), anyhow::Error> {
|
||||
if dotenv::from_filename(".env.test.danger").is_err() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let configuration = PostgresConfiguration::from_env().unwrap();
|
||||
let mut config = tokio_postgres::Config::new();
|
||||
config
|
||||
.host(&configuration.url)
|
||||
.user(&configuration.user_name)
|
||||
.password(&configuration.password)
|
||||
.port(configuration.port);
|
||||
|
||||
// Using the https://docs.rs/postgres-openssl/latest/postgres_openssl/ to enable tls connection.
|
||||
let (client, connection) = config.connect(NoTls).await?;
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = connection.await {
|
||||
tracing::error!("postgres db connection error: {}", e);
|
||||
}
|
||||
});
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
run_initial_drop(&client).await;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
DROP TABLE IF EXISTS af_user;
|
||||
DROP TABLE IF EXISTS af_workspace;
|
||||
DROP TABLE IF EXISTS af_user_profile;
|
||||
DROP TABLE IF EXISTS af_collab;
|
||||
DROP VIEW IF EXISTS af_collab_state;
|
||||
DROP TABLE IF EXISTS af_collab_snapshot;
|
||||
DROP TABLE IF EXISTS af_collab_statistics;
|
||||
|
||||
DROP TRIGGER IF EXISTS create_af_user_profile_trigger ON af_user_profile CASCADE;
|
||||
DROP FUNCTION IF EXISTS create_af_user_profile_trigger_func;
|
||||
|
||||
DROP TRIGGER IF EXISTS create_af_workspace_trigger ON af_workspace CASCADE;
|
||||
DROP FUNCTION IF EXISTS create_af_workspace_trigger_func;
|
||||
|
||||
DROP TRIGGER IF EXISTS af_collab_insert_trigger ON af_collab CASCADE;
|
||||
DROP FUNCTION IF EXISTS increment_af_collab_update_count;
|
||||
|
||||
DROP TRIGGER IF EXISTS af_collab_snapshot_update_edit_count_trigger ON af_collab_snapshot;
|
||||
DROP FUNCTION IF EXISTS af_collab_snapshot_update_edit_count;
|
||||
|
||||
DROP TRIGGER IF EXISTS check_and_delete_snapshots_trigger ON af_collab_snapshot CASCADE;
|
||||
DROP FUNCTION IF EXISTS check_and_delete_snapshots;
|
||||
|
@ -0,0 +1,127 @@
|
||||
-- user table
|
||||
CREATE TABLE IF NOT EXISTS af_user (
|
||||
uuid UUID PRIMARY KEY,
|
||||
uid BIGINT GENERATED ALWAYS AS IDENTITY,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
-- user profile table
|
||||
CREATE TABLE IF NOT EXISTS af_user_profile (
|
||||
uid BIGINT PRIMARY KEY,
|
||||
uuid UUID,
|
||||
name TEXT,
|
||||
email TEXT,
|
||||
workspace_id UUID DEFAULT uuid_generate_v4()
|
||||
);
|
||||
-- user_profile trigger
|
||||
CREATE OR REPLACE FUNCTION create_af_user_profile_trigger_func() RETURNS TRIGGER AS $$ BEGIN
|
||||
INSERT INTO af_user_profile (uid, uuid)
|
||||
VALUES (NEW.uid, NEW.uuid);
|
||||
RETURN NEW;
|
||||
END $$ LANGUAGE plpgsql;
|
||||
CREATE TRIGGER create_af_user_profile_trigger BEFORE
|
||||
INSERT ON af_user FOR EACH ROW EXECUTE FUNCTION create_af_user_profile_trigger_func();
|
||||
-- workspace table
|
||||
CREATE TABLE IF NOT EXISTS af_workspace (
|
||||
workspace_id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
|
||||
uid BIGINT,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
workspace_name TEXT DEFAULT 'My Workspace'
|
||||
);
|
||||
-- workspace trigger
|
||||
CREATE OR REPLACE FUNCTION create_af_workspace_trigger_func() RETURNS TRIGGER AS $$ BEGIN
|
||||
INSERT INTO af_workspace (uid, workspace_id)
|
||||
VALUES (NEW.uid, NEW.workspace_id);
|
||||
RETURN NEW;
|
||||
END $$ LANGUAGE plpgsql;
|
||||
CREATE TRIGGER create_af_workspace_trigger BEFORE
|
||||
INSERT ON af_user_profile FOR EACH ROW EXECUTE FUNCTION create_af_workspace_trigger_func();
|
||||
-- collab table.
|
||||
CREATE TABLE IF NOT EXISTS af_collab (
|
||||
oid TEXT NOT NULL,
|
||||
name TEXT DEFAULT '',
|
||||
key BIGINT GENERATED ALWAYS AS IDENTITY,
|
||||
value BYTEA NOT NULL,
|
||||
value_size INTEGER,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (oid, key)
|
||||
);
|
||||
-- collab statistics. It will be used to store the edit_count of the collab.
|
||||
CREATE TABLE IF NOT EXISTS af_collab_statistics (
|
||||
oid TEXT PRIMARY KEY,
|
||||
edit_count BIGINT DEFAULT 0
|
||||
);
|
||||
-- collab statistics trigger. It will increment the edit_count of the collab when a new row is inserted in the af_collab table.
|
||||
CREATE OR REPLACE FUNCTION increment_af_collab_edit_count() RETURNS TRIGGER AS $$ BEGIN IF EXISTS(
|
||||
SELECT 1
|
||||
FROM af_collab_statistics
|
||||
WHERE oid = NEW.oid
|
||||
) THEN
|
||||
UPDATE af_collab_statistics
|
||||
SET edit_count = edit_count + 1
|
||||
WHERE oid = NEW.oid;
|
||||
ELSE
|
||||
INSERT INTO af_collab_statistics (oid, edit_count)
|
||||
VALUES (NEW.oid, 1);
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
CREATE TRIGGER af_collab_insert_trigger
|
||||
AFTER
|
||||
INSERT ON af_collab FOR EACH ROW EXECUTE FUNCTION increment_af_collab_edit_count();
|
||||
-- collab snapshot. It will be used to store the snapshots of the collab.
|
||||
CREATE TABLE IF NOT EXISTS af_collab_snapshot (
|
||||
sid BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
|
||||
oid TEXT NOT NULL,
|
||||
name TEXT DEFAULT '',
|
||||
blob BYTEA NOT NULL,
|
||||
blob_size INTEGER NOT NULL,
|
||||
edit_count BIGINT DEFAULT 0,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
-- auto insert edit_count in the snapshot table.
|
||||
CREATE OR REPLACE FUNCTION af_collab_snapshot_update_edit_count() RETURNS TRIGGER AS $$ BEGIN NEW.edit_count := (
|
||||
SELECT edit_count
|
||||
FROM af_collab_statistics
|
||||
WHERE oid = NEW.oid
|
||||
);
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
CREATE TRIGGER af_collab_snapshot_update_edit_count_trigger BEFORE
|
||||
INSERT ON af_collab_snapshot FOR EACH ROW EXECUTE FUNCTION af_collab_snapshot_update_edit_count();
|
||||
-- collab snapshot trigger. It will delete the oldest snapshot if the number of snapshots is greater than 20.
|
||||
-- It can use the PG_CRON extension to run this trigger periodically.
|
||||
CREATE OR REPLACE FUNCTION check_and_delete_snapshots() RETURNS TRIGGER AS $$
|
||||
DECLARE row_count INT;
|
||||
BEGIN
|
||||
SELECT COUNT(*) INTO row_count
|
||||
FROM af_collab_snapshot
|
||||
WHERE oid = NEW.oid;
|
||||
IF row_count > 20 THEN
|
||||
DELETE FROM af_collab_snapshot
|
||||
WHERE id IN (
|
||||
SELECT id
|
||||
FROM af_collab_snapshot
|
||||
WHERE created_at < NOW() - INTERVAL '10 days'
|
||||
AND oid = NEW.oid
|
||||
ORDER BY created_at ASC
|
||||
LIMIT row_count - 20
|
||||
);
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
CREATE TRIGGER check_and_delete_snapshots_trigger
|
||||
AFTER
|
||||
INSERT
|
||||
OR
|
||||
UPDATE ON af_collab_snapshot FOR EACH ROW EXECUTE FUNCTION check_and_delete_snapshots();
|
||||
-- collab state view. It will be used to get the current state of the collab.
|
||||
CREATE VIEW af_collab_state AS
|
||||
SELECT a.oid,
|
||||
a.created_at AS snapshot_created_at,
|
||||
a.edit_count AS snapshot_edit_count,
|
||||
b.edit_count AS current_edit_count
|
||||
FROM af_collab_snapshot AS a
|
||||
JOIN af_collab_statistics AS b ON a.oid = b.oid;
|
@ -1,7 +1,12 @@
|
||||
pub use configuration::*;
|
||||
pub use server::*;
|
||||
|
||||
mod entities;
|
||||
pub mod impls;
|
||||
mod request;
|
||||
mod response;
|
||||
mod retry;
|
||||
mod pg_db;
|
||||
mod sql_builder;
|
||||
// mod postgres_http;
|
||||
mod configuration;
|
||||
mod migration;
|
||||
mod queue;
|
||||
mod server;
|
||||
|
123
frontend/rust-lib/flowy-server/src/supabase/pg_db.rs
Normal file
123
frontend/rust-lib/flowy-server/src/supabase/pg_db.rs
Normal file
@ -0,0 +1,123 @@
|
||||
use std::cmp::Ordering;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use std::sync::Arc;
|
||||
|
||||
use deadpool_postgres::{Manager, ManagerConfig, Object, Pool, RecyclingMethod};
|
||||
use tokio_postgres::NoTls;
|
||||
|
||||
use flowy_error::{ErrorCode, FlowyError, FlowyResult};
|
||||
|
||||
use crate::supabase::migration::run_migrations;
|
||||
use crate::supabase::queue::RequestPayload;
|
||||
use crate::supabase::PostgresConfiguration;
|
||||
|
||||
pub type PostgresObject = Object;
|
||||
pub struct PostgresDB {
|
||||
pub configuration: PostgresConfiguration,
|
||||
pub client: Arc<Pool>,
|
||||
}
|
||||
|
||||
impl PostgresDB {
|
||||
#[allow(dead_code)]
|
||||
pub async fn from_env() -> Result<Self, anyhow::Error> {
|
||||
let configuration = PostgresConfiguration::from_env()?;
|
||||
Self::new(configuration).await
|
||||
}
|
||||
|
||||
pub async fn new(configuration: PostgresConfiguration) -> Result<Self, anyhow::Error> {
|
||||
let mut pg_config = tokio_postgres::Config::new();
|
||||
pg_config
|
||||
.host(&configuration.url)
|
||||
.user(&configuration.user_name)
|
||||
.password(&configuration.password)
|
||||
.port(configuration.port);
|
||||
|
||||
let mgr_config = ManagerConfig {
|
||||
recycling_method: RecyclingMethod::Fast,
|
||||
};
|
||||
|
||||
// Using the https://docs.rs/postgres-openssl/latest/postgres_openssl/ to enable tls connection.
|
||||
let mgr = Manager::from_config(pg_config, NoTls, mgr_config);
|
||||
let pool = Pool::builder(mgr).max_size(16).build()?;
|
||||
let mut client = pool.get().await?;
|
||||
// Run migrations
|
||||
run_migrations(&mut client).await?;
|
||||
|
||||
Ok(Self {
|
||||
configuration,
|
||||
client: Arc::new(pool),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub type PgClientSender = tokio::sync::mpsc::Sender<PostgresObject>;
|
||||
|
||||
pub struct PgClientReceiver(pub tokio::sync::mpsc::Receiver<PostgresObject>);
|
||||
impl PgClientReceiver {
|
||||
pub async fn recv(&mut self) -> FlowyResult<PostgresObject> {
|
||||
match self.0.recv().await {
|
||||
None => Err(FlowyError::new(
|
||||
ErrorCode::PgConnectError,
|
||||
"Can't connect to the postgres db".to_string(),
|
||||
)),
|
||||
Some(object) => Ok(object),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum PostgresEvent {
|
||||
ConnectDB,
|
||||
/// The ID is utilized to sequence the events within the priority queue.
|
||||
/// The sender is employed for transmitting the PostgresObject back to the original sender.
|
||||
/// At present, the sender is invoked subsequent to the processing of the previous PostgresObject.
|
||||
/// For future optimizations, we could potentially perform batch processing of the [GetPgClient] events utilizing the [Pool].
|
||||
GetPgClient {
|
||||
id: u32,
|
||||
sender: PgClientSender,
|
||||
},
|
||||
}
|
||||
|
||||
impl Debug for PostgresEvent {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
PostgresEvent::ConnectDB => f.write_str("ConnectDB"),
|
||||
PostgresEvent::GetPgClient { id, .. } => f.write_fmt(format_args!("GetPgClient({})", id)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for PostgresEvent {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
match (self, other) {
|
||||
(PostgresEvent::ConnectDB, PostgresEvent::ConnectDB) => Ordering::Equal,
|
||||
(PostgresEvent::ConnectDB, PostgresEvent::GetPgClient { .. }) => Ordering::Greater,
|
||||
(PostgresEvent::GetPgClient { .. }, PostgresEvent::ConnectDB) => Ordering::Less,
|
||||
(PostgresEvent::GetPgClient { id: id1, .. }, PostgresEvent::GetPgClient { id: id2, .. }) => {
|
||||
id1.cmp(id2).reverse()
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for PostgresEvent {}
|
||||
|
||||
impl PartialEq<Self> for PostgresEvent {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
match (self, other) {
|
||||
(PostgresEvent::ConnectDB, PostgresEvent::ConnectDB) => true,
|
||||
(PostgresEvent::GetPgClient { id: id1, .. }, PostgresEvent::GetPgClient { id: id2, .. }) => {
|
||||
id1 == id2
|
||||
},
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd<Self> for PostgresEvent {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestPayload for PostgresEvent {}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user