feat: migrate user data to cloud (#3078)

* refactor: weak passed-in params in handler

* refactor: rename struct

* chore: update tables

* chore: update schema

* chore: add permission

* chore: update tables

* chore: support transaction mode

* chore: workspace database id

* chore: add user workspace

* feat: return list of workspaces

* chore: add user to workspace

* feat: separate database row table

* refactor: update schema

* chore: partition table

* chore: use transaction

* refactor: dir

* refactor: collab db ref

* fix: collab db lock

* chore: rename files

* chore: add tables descriptions

* chore: update readme

* docs: update documentation

* chore: rename crate

* chore: update ref

* chore: update tests

* chore: update tests

* refactor: crate deps

* chore: update crate ref

* chore: remove unused deps

* chore: remove unused deps

* chore: update collab crate refs

* chore: replace client with transaction in pooler

* refactor: return error type

* refactor: use anyhow error in deps

* feat: supabase postgrest user signin (wip)

* fix: Cargo.toml source git deps, changed Error to anyhow::Error

* fix: uuid serialization

* chore: fix conflict

* chore: extend the response

* feat: add implementation place holders

* feat: impl get_user_workspaces

* feat: impl get_user_profile

* test: create workspace

* fix: postgrest: field names and alias

* chore: implement folder restful api

* chore: implement collab storate with restful api

* feat: added placeholders for impl: update_user_profile, check_user

* feat: impl: update_user_profile

* feat: impl: check_user

* fix: use UidResponse, add more debug info for serde serialization error

* fix: get_user_profile: use Optional<UserProfileResponse>

* chore: imple init sync

* chore: support soft delete

* feat: postgresql: add migration test

* feat: postgresql migration test: added UID display and colored output

* feat: postgresql migration test: workspace role

* feat: postgresql migration test: create shared common utils

* feat: postgresql migration test: fixed shebang

* chore: add flush_collab_update pg function

* chore: implement datbaase and document restful api

* chore: migrate to use restful api

* chore: update table schema

* chore: fix tests

* chore: remove unused code

* chore: format code

* chore: remove unused env

* fix: tauri build

* fix: tauri build

---------

Co-authored-by: Fu Zi Xiang <speed2exe@live.com.sg>
This commit is contained in:
Nathan.fooo
2023-07-29 09:46:24 +08:00
committed by GitHub
parent a885170869
commit 2cd88594e8
179 changed files with 4999 additions and 5314 deletions

View File

@ -7,6 +7,7 @@ edition = "2021"
[dependencies]
tracing = { version = "0.1" }
futures = "0.3.26"
futures-util = "0.3.26"
reqwest = "0.11.14"
hyper = "0.14"
@ -14,40 +15,31 @@ config = { version = "0.10.1", default-features = false, features = ["yaml"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde-aux = "4.2.0"
nanoid = "0.4.0"
thiserror = "1.0"
tokio = { version = "1.26", features = ["sync"]}
parking_lot = "0.12"
lazy_static = "1.4.0"
bytes = "1.0.1"
bytes = { version = "1.0.1", features = ["serde"] }
tokio-retry = "0.3"
anyhow = "1.0"
uuid = { version = "1.3.3", features = ["v4"] }
chrono = { version = "0.4.22", default-features = false, features = ["clock"] }
appflowy-integrate = { version = "0.1.0" }
postgrest = "1.0"
tokio-postgres = { version = "0.7.8", optional = true, features = ["with-uuid-1","with-chrono-0_4"] }
deadpool-postgres = "0.10.5"
refinery= { version = "0.8.10", optional = true, features = ["tokio-postgres"] }
async-stream = "0.3.4"
futures = "0.3.26"
lib-infra = { path = "../../../shared-lib/lib-infra" }
flowy-user = { path = "../flowy-user" }
flowy-folder2 = { path = "../flowy-folder2" }
flowy-database2 = { path = "../flowy-database2" }
flowy-document2 = { path = "../flowy-document2" }
flowy-error = { path = "../flowy-error" }
flowy-server-config = { path = "../flowy-server-config" }
collab-folder = { version = "0.1.0" }
collab = { version = "0.1.0" }
collab-plugins = { version = "0.1.0" }
collab-document = { version = "0.1.0" }
hex = "0.4.3"
postgrest = "1.0"
lib-infra = { path = "../../../shared-lib/lib-infra" }
flowy-user-deps = { path = "../flowy-user-deps" }
flowy-folder-deps = { path = "../flowy-folder-deps" }
flowy-database-deps = { path = "../flowy-database-deps" }
flowy-document-deps = { path = "../flowy-document-deps" }
flowy-error = { path = "../flowy-error", features = ["impl_from_postgres", "impl_from_serde", "impl_from_reqwest"] }
flowy-server-config = { path = "../flowy-server-config" }
[dev-dependencies]
uuid = { version = "1.3.3", features = ["v4"] }
tracing-subscriber = { version = "0.3.3", features = ["env-filter"] }
dotenv = "0.15.0"
yrs = "0.16.5"
[features]
default = ["postgres_storage"]
postgres_storage = ["tokio-postgres", "refinery", ]

View File

@ -0,0 +1,108 @@
# AppFlowy Cloud Architecture
AppFlowy supports multiple cloud solutions. Users can choose their preferred cloud provider, such as Supabase, Firebase,
AWS, or our own AppFlowyCloud (Self-hosted server).
![](architecture-Application.png)
## Design
AppFlowy use the traits [AppFlowyServer] to abstract the cloud provider. Each cloud provider implements the [AppFlowyServer]
trait. As the image below shows. Users can choose their preferred cloud provider or simply use the default option, which is the LocalServer. When using the
LocalServer, data is stored on the local file system. Users can migrate to a cloud provider if needed. For instance, one
could migrate from LocalServer to AppFlowyCloud. This migration would create a new user in the cloud and transfer all the
data from the local database to the cloud.
![](architecture.png)
## AppFlowy Cloud Implementation (WIP)
### Restful API
### Table schema
## Supabase Implementation
### Table schema
![](./schema.png)
1. `af_roles` table: This table contains a list of roles that are used in your application, such as 'Owner', 'Member', and 'Guest'.
2. `af_permissions` table: This table stores permissions that are used in your application. Each permission has a name, a description, and an access level.
3. `af_role_permissions` table: This is a many-to-many relation table between roles and permissions. It represents which permissions a role has.
4. `af_user` table: This stores the details of users like uuid, email, uid, name, created_at. Here, uid is an auto-incrementing integer that uniquely identifies a user.
5. `af_workspace` table: This table contains all the workspaces. Each workspace has an owner which is associated with the uid of a user in the `af_user` table.
6. `af_workspace_member` table: This table maintains a list of all the members associated with a workspace and their roles.
7. `af_collab` and `af_collab_member` tables: These tables store the collaborations and their members respectively. Each collaboration has an owner and a workspace associated with it.
8. `af_collab_update`, `af_collab_update_document`, `af_collab_update_database`, `af_collab_update_w_database`, `af_collab_update_folder`, `af_database_row_update` tables: These tables are used for handling updates to collaborations.
9. `af_collab_statistics`, `af_collab_snapshot`, `af_collab_state`: These tables and view are used for maintaining statistics and snapshots of collaborations.
10. `af_user_profile_view` view: This view is used to get the latest workspace_id for each user.
![](./schema-Triggers_in_Database.png)
Here's a detailed description for each of these triggers:
1. `create_af_workspace_trigger`:
This trigger is designed to automate the process of workspace creation in the `af_workspace` table after a new user is inserted into the `af_user` table. When a new user is added, this trigger fires and inserts a new record into the `af_workspace` table, setting the `owner_uid` to the UID of the new user.
2. `manage_af_workspace_member_role_trigger`:
This trigger helps to manage the roles of workspace members. After an insert operation on the `af_workspace` table, this trigger automatically fires and creates a new record in the `af_workspace_member` table. The new record identifies the user as a member of the workspace with the role 'Owner'. This ensures that every new workspace has an owner.
3. `insert_into_af_collab_trigger`:
The purpose of this trigger is to ensure consistency between the `af_collab_update` and `af_collab` tables. When an insert operation is about to be performed on the `af_collab_update` table, this trigger fires before the insert operation. It checks if a corresponding collaboration exists in the `af_collab` table using the oid and uid. If a corresponding collaboration does not exist, the trigger creates one, using the oid, uid, and current timestamp. This way, every collab update operation corresponds to a valid collaboration.
4. `insert_into_af_collab_member_trigger`:
This trigger helps to manage the membership of users in collaborations. After a new collaboration is inserted into the `af_collab` table, this trigger fires. It checks if a corresponding collaboration member exists in the `af_collab_member` table. If a corresponding member does not exist, the trigger creates one, using the collaboration id and user id. This ensures that every collaboration has at least one member.
5. `af_collab_snapshot_update_edit_count_trigger`:
This trigger is designed to keep track of the number of edits on each collaboration snapshot in the `af_collab_snapshot` table. When an update operation is performed on the `af_collab_snapshot` table, this trigger fires. It increments the `edit_count` of the corresponding record in the `af_collab_snapshot` table by one. This ensures that the application can keep track of how many times each collaboration snapshot has been edited.
### Supabase configuration
#### Test
In order to run the test, you need to set up the .env.test file.
```dotenv
# Supabase configuration
SUPABASE_URL="your-supabase-url"
SUPABASE_ANON_KEY="your-supabase-anonymous-key"
SUPABASE_KEY="your-supabase-key"
SUPABASE_JWT_SECRET="your-supabase-jwt-secret"
# Supabase Database configuration
SUPABASE_DB="your-supabase-db-url"
SUPABASE_DB_USER="your-db-username"
SUPABASE_DB_PORT="your-db-port"
SUPABASE_DB_PASSWORD="your-db-password"
```
1. `SUPABASE_URL`: This is the URL of your Supabase server instance. Your application will use this URL to interact with the Supabase service.
2. `SUPABASE_ANON_KEY`: This is the anonymous API key from Supabase, used for operations that don't require user authentication. Operations performed with this key are done as the anonymous role in the database.
3. `SUPABASE_KEY`: This is the API key with higher privileges from Supabase. It is generally used for server-side operations that require more permissions than an anonymous user.
4. `SUPABASE_JWT_SECRET`: This is the secret used to verify JWT tokens generated by Supabase. JWT or JSON Web Token is a standard method for securely transferring data between parties as a JSON object.
5. `SUPABASE_DB`: This is the URL for the database your Supabase server instance is using.
6. `SUPABASE_DB_USER`: This is the username used to authenticate with the Supabase database, in this case, it's 'postgres', which is a common default for PostgreSQL.
7. `SUPABASE_DB_PORT`: This is the port number where your Supabase database service is accessible. The default PostgreSQL port is 5432, and you are using this default port.
8. `SUPABASE_DB_PASSWORD`: This is the password used to authenticate the `SUPABASE_DB_USER` with the Supabase database.
For example, if you want to run the supabase tests located in flowy-test crate. You need to put the `.env.test` file under
the flowy-test folder.

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

View File

@ -0,0 +1,78 @@
@startuml
title "Application"
left to right direction
package "AppFlowy Application" {
[User]
}
cloud "Supabase Server" {
[RESTful Component]
[Realtime Component]
[Postgres DB]
}
database "LocalServer" {
[Local Server Component]
}
cloud "AppFlowy Cloud Server" {
[RESTful Component] as [AppFlowy RESTful Component]
[Realtime Component] as [AppFlowy Realtime Component]
[Postgres DB] as [AppFlowy Postgres DB]
}
User --> [AppFlowy Application]
[AppFlowy Application] --> [Local Server Component] : Connect
[AppFlowy Application] --> [RESTful Component] : RESTful API Communication
[AppFlowy Application] <..> [Realtime Component] : WebSocket Communication
[AppFlowy Application] --> [AppFlowy RESTful Component] : RESTful API Communication
[AppFlowy Application] <..> [AppFlowy Realtime Component] : WebSocket Communication
@enduml
@startuml
left to right direction
interface AppFlowyServer {
+ enable_sync(_enable: bool)
+ user_service(): Arc<dyn UserService>
+ folder_service(): Arc<dyn FolderCloudService>
+ database_service(): Arc<dyn DatabaseCloudService>
+ document_service(): Arc<dyn DocumentCloudService>
+ collab_storage(): Option<Arc<dyn RemoteCollabStorage>>
}
class SupabaseServer {
+ enable_sync(_enable: bool)
+ user_service(): Arc<dyn UserService>
+ folder_service(): Arc<dyn FolderCloudService>
+ database_service(): Arc<dyn DatabaseCloudService>
+ document_service(): Arc<dyn DocumentCloudService>
+ collab_storage(): Option<Arc<dyn RemoteCollabStorage>>
}
class SelfHostServer {
+ user_service(): Arc<dyn UserService>
+ folder_service(): Arc<dyn FolderCloudService>
+ database_service(): Arc<dyn DatabaseCloudService>
+ document_service(): Arc<dyn DocumentCloudService>
+ collab_storage(): Option<Arc<dyn RemoteCollabStorage>>
}
class LocalServer {
+ user_service(): Arc<dyn UserService>
+ folder_service(): Arc<dyn FolderCloudService>
+ database_service(): Arc<dyn DatabaseCloudService>
+ document_service(): Arc<dyn DocumentCloudService>
+ collab_storage(): Option<Arc<dyn RemoteCollabStorage>>
}
SupabaseServer -u-|> AppFlowyServer
SelfHostServer -u-|> AppFlowyServer
LocalServer -u-|> AppFlowyServer
@enduml

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 75 KiB

View File

@ -0,0 +1,203 @@
@startuml
left to right direction
entity "af_roles" as roles {
id : SERIAL (PK)
name : TEXT
}
entity "af_permissions" as permissions {
id : SERIAL (PK)
name : VARCHAR(255)
access_level : INTEGER
description : TEXT
}
entity "af_role_permissions" as role_permissions {
role_id : INT (FK af_roles.id)
permission_id : INT (FK af_permissions.id)
--
(role_id, permission_id) : PK
}
entity "af_user" as user {
uuid : UUID (PK)
email : TEXT
uid : BIGSERIAL
name : TEXT
created_at : TIMESTAMP WITH TIME ZONE
}
entity "af_workspace" as workspace {
workspace_id : UUID (PK)
database_storage_id : UUID
owner_uid : BIGINT (FK af_user.uid)
created_at : TIMESTAMP WITH TIME ZONE
workspace_type : INTEGER
workspace_name : TEXT
}
entity "af_workspace_member" as workspace_member {
uid : BIGINT
role_id : INT (FK af_roles.id)
workspace_id : UUID (FK af_workspace.workspace_id)
created_at : TIMESTAMP WITH TIME ZONE
updated_at : TIMESTAMP WITH TIME ZONE
--
(uid, workspace_id) : PK
}
entity "af_collab" as collab {
oid : TEXT (PK)
owner_uid : BIGINT
workspace_id : UUID (FK af_workspace.workspace_id)
access_level : INTEGER
created_at : TIMESTAMP WITH TIME ZONE
}
entity "af_collab_update" as collab_update {
oid : TEXT (FK af_collab.oid)
key : BIGSERIAL
value : BYTEA
value_size : INTEGER
partition_key : INTEGER
uid : BIGINT
md5 : TEXT
created_at : TIMESTAMP WITH TIME ZONE
workspace_id : UUID (FK af_workspace.workspace_id)
--
(oid, key, partition_key) : PK
}
entity "af_collab_update_document" as af_collab_update_document {
Inherits af_collab_update (partition_key = 0)
}
entity "af_collab_update_database" as af_collab_update_database {
Inherits af_collab_update (partition_key = 1)
}
entity "af_collab_update_w_database" as af_collab_update_w_database {
Inherits af_collab_update (partition_key = 2)
}
entity "af_collab_update_folder" as af_collab_update_folder {
Inherits af_collab_update (partition_key = 3)
}
af_collab_update_document -u-|> collab_update
af_collab_update_database -u-|> collab_update
af_collab_update_w_database -u-|> collab_update
af_collab_update_folder -u-|> collab_update
entity "af_database_row_update" as database_row_update {
oid : TEXT
key : BIGSERIAL
value : BYTEA
value_size : INTEGER
partition_key : INTEGER
uid : BIGINT
md5 : TEXT
workspace_id : UUID (FK af_workspace.workspace_id)
--
(oid, key) : PK
}
entity "af_collab_member" as collab_member {
uid : BIGINT (FK af_user.uid)
oid : TEXT (FK af_collab.oid)
role_id : INTEGER (FK af_roles.id)
--
(uid, oid) : PK
}
entity "af_collab_statistics" as collab_statistics {
oid : TEXT (PK)
edit_count : BIGINT
}
entity "af_collab_snapshot" as collab_snapshot {
sid : BIGSERIAL (PK)
oid : TEXT (FK af_collab.oid)
name : TEXT
blob : BYTEA
blob_size : INTEGER
edit_count : BIGINT
created_at : TIMESTAMP WITH TIME ZONE
}
roles <-- role_permissions : FK
permissions <-u- role_permissions : FK
user <-- collab : FK
user <-- workspace : FK
user <-- collab_member : FK
roles <-- workspace_member : FK
workspace <-- workspace_member : FK
workspace <-- collab : FK
workspace <-- database_row_update : FK
collab <-- collab_update : FK
collab <-- collab_snapshot: FK
collab <-u- collab_member : FK
collab <-- collab_statistics : PK
roles <-- collab_member : FK
@enduml
@startuml
title Triggers in Database Schema
participant "af_user" as A
participant "af_workspace" as B
participant "af_workspace_member" as C
participant "af_collab" as D
participant "af_collab_update" as E
participant "af_collab_member" as F
participant "af_collab_statistics" as G
participant "af_collab_snapshot" as H
A -> B: create_af_workspace_trigger
note right
This trigger fires after an insert on af_user. It automatically creates a workspace
with the uid of the new user as the owner_uid.
end note
B -> C: manage_af_workspace_member_role_trigger
note right
This trigger fires after an insert on af_workspace. It automatically
creates a workspace member in the af_workspace_member table with the
role 'Owner'.
end note
E -> D: insert_into_af_collab_trigger
note right
This trigger fires before an insert on af_collab_update.
It checks if a corresponding collab exists in the af_collab table.
If not, it creates one with the oid, uid, and current timestamp.
end note
D -> F: insert_into_af_collab_member_trigger
note right
This trigger fires after an insert on af_collab.
It automatically adds the collab's owner to the af_collab_member
table with the role 'Owner'.
end note
E -> G: af_collab_update_edit_count_trigger
note right
This trigger fires after an insert on af_collab_update.
It increments the edit_count of the corresponding collab in
the af_collab_statistics table.
end note
H -> G: af_collab_snapshot_update_edit_count_trigger
note right
This trigger fires after an insert on af_collab_snapshot.
It sets the edit_count of the new snapshot to the current
edit_count of the collab in the af_collab_statistics table.
end note
@enduml

Binary file not shown.

After

Width:  |  Height:  |  Size: 192 KiB

View File

@ -1,11 +1,11 @@
use std::sync::Arc;
use appflowy_integrate::RemoteCollabStorage;
use collab_plugins::cloud_storage::RemoteCollabStorage;
use flowy_database2::deps::DatabaseCloudService;
use flowy_document2::deps::DocumentCloudService;
use flowy_folder2::deps::FolderCloudService;
use flowy_user::event_map::UserAuthService;
use flowy_database_deps::cloud::DatabaseCloudService;
use flowy_document_deps::cloud::DocumentCloudService;
use flowy_folder_deps::cloud::FolderCloudService;
use flowy_user_deps::cloud::UserService;
pub mod local_server;
mod request;
@ -14,23 +14,9 @@ pub mod self_host;
pub mod supabase;
pub mod util;
/// In order to run this the supabase test, you need to create a .env file in the root directory of this project
/// and add the following environment variables:
/// - SUPABASE_URL
/// - SUPABASE_ANON_KEY
/// - SUPABASE_KEY
/// - SUPABASE_JWT_SECRET
///
/// the .env file should look like this:
/// SUPABASE_URL=https://<your-supabase-url>.supabase.co
/// SUPABASE_ANON_KEY=<your-supabase-anon-key>
/// SUPABASE_KEY=<your-supabase-key>
/// SUPABASE_JWT_SECRET=<your-supabase-jwt-secret>
///
pub trait AppFlowyServer: Send + Sync + 'static {
fn enable_sync(&self, _enable: bool) {}
fn user_service(&self) -> Arc<dyn UserAuthService>;
fn user_service(&self) -> Arc<dyn UserService>;
fn folder_service(&self) -> Arc<dyn FolderCloudService>;
fn database_service(&self) -> Arc<dyn DatabaseCloudService>;
fn document_service(&self) -> Arc<dyn DocumentCloudService>;

View File

@ -1,27 +1,35 @@
use flowy_database2::deps::{
use anyhow::Error;
use collab_plugins::cloud_storage::CollabType;
use flowy_database_deps::cloud::{
CollabObjectUpdate, CollabObjectUpdateByOid, DatabaseCloudService, DatabaseSnapshot,
};
use flowy_error::FlowyError;
use lib_infra::future::FutureResult;
pub(crate) struct LocalServerDatabaseCloudServiceImpl();
impl DatabaseCloudService for LocalServerDatabaseCloudServiceImpl {
fn get_collab_update(&self, _object_id: &str) -> FutureResult<CollabObjectUpdate, FlowyError> {
fn get_collab_update(
&self,
_object_id: &str,
_object_ty: CollabType,
) -> FutureResult<CollabObjectUpdate, Error> {
FutureResult::new(async move { Ok(vec![]) })
}
fn batch_get_collab_updates(
&self,
_object_ids: Vec<String>,
) -> FutureResult<CollabObjectUpdateByOid, FlowyError> {
_object_ty: CollabType,
) -> FutureResult<CollabObjectUpdateByOid, Error> {
FutureResult::new(async move { Ok(CollabObjectUpdateByOid::default()) })
}
fn get_collab_latest_snapshot(
&self,
_object_id: &str,
) -> FutureResult<Option<DatabaseSnapshot>, FlowyError> {
) -> FutureResult<Option<DatabaseSnapshot>, Error> {
FutureResult::new(async move { Ok(None) })
}
}

View File

@ -1,25 +1,23 @@
use flowy_document2::deps::{DocumentCloudService, DocumentData, DocumentSnapshot};
use flowy_error::FlowyError;
use anyhow::Error;
use flowy_document_deps::cloud::*;
use lib_infra::future::FutureResult;
pub(crate) struct LocalServerDocumentCloudServiceImpl();
impl DocumentCloudService for LocalServerDocumentCloudServiceImpl {
fn get_document_updates(&self, _document_id: &str) -> FutureResult<Vec<Vec<u8>>, FlowyError> {
fn get_document_updates(&self, _document_id: &str) -> FutureResult<Vec<Vec<u8>>, Error> {
FutureResult::new(async move { Ok(vec![]) })
}
fn get_document_latest_snapshot(
&self,
_document_id: &str,
) -> FutureResult<Option<DocumentSnapshot>, FlowyError> {
) -> FutureResult<Option<DocumentSnapshot>, Error> {
FutureResult::new(async move { Ok(None) })
}
fn get_document_data(
&self,
_document_id: &str,
) -> FutureResult<Option<DocumentData>, FlowyError> {
fn get_document_data(&self, _document_id: &str) -> FutureResult<Option<DocumentData>, Error> {
FutureResult::new(async move { Ok(None) })
}
}

View File

@ -1,8 +1,9 @@
use anyhow::Error;
use std::sync::Arc;
use flowy_error::FlowyError;
use flowy_folder2::deps::{FolderCloudService, FolderData, FolderSnapshot, Workspace};
use flowy_folder2::gen_workspace_id;
use flowy_folder_deps::cloud::{
gen_workspace_id, FolderCloudService, FolderData, FolderSnapshot, Workspace,
};
use lib_infra::future::FutureResult;
use lib_infra::util::timestamp;
@ -13,11 +14,11 @@ pub(crate) struct LocalServerFolderCloudServiceImpl {
}
impl FolderCloudService for LocalServerFolderCloudServiceImpl {
fn create_workspace(&self, _uid: i64, name: &str) -> FutureResult<Workspace, FlowyError> {
fn create_workspace(&self, _uid: i64, name: &str) -> FutureResult<Workspace, Error> {
let name = name.to_string();
FutureResult::new(async move {
Ok(Workspace {
id: gen_workspace_id(),
id: gen_workspace_id().to_string(),
name: name.to_string(),
child_views: Default::default(),
created_at: timestamp(),
@ -25,22 +26,18 @@ impl FolderCloudService for LocalServerFolderCloudServiceImpl {
})
}
fn get_folder_data(&self, _workspace_id: &str) -> FutureResult<Option<FolderData>, FlowyError> {
fn get_folder_data(&self, _workspace_id: &str) -> FutureResult<Option<FolderData>, Error> {
FutureResult::new(async move { Ok(None) })
}
fn get_folder_latest_snapshot(
&self,
_workspace_id: &str,
) -> FutureResult<Option<FolderSnapshot>, FlowyError> {
) -> FutureResult<Option<FolderSnapshot>, Error> {
FutureResult::new(async move { Ok(None) })
}
fn get_folder_updates(
&self,
workspace_id: &str,
uid: i64,
) -> FutureResult<Vec<Vec<u8>>, FlowyError> {
fn get_folder_updates(&self, workspace_id: &str, uid: i64) -> FutureResult<Vec<Vec<u8>>, Error> {
let weak_db = Arc::downgrade(&self.db);
let workspace_id = workspace_id.to_string();
FutureResult::new(async move {

View File

@ -1,13 +1,11 @@
use anyhow::Error;
use std::sync::Arc;
use lazy_static::lazy_static;
use parking_lot::Mutex;
use flowy_error::FlowyError;
use flowy_user::entities::{
SignInParams, SignInResponse, SignUpParams, SignUpResponse, UpdateUserProfileParams, UserProfile,
};
use flowy_user::event_map::{UserAuthService, UserCredentials};
use flowy_user_deps::cloud::UserService;
use flowy_user_deps::entities::*;
use lib_infra::box_any::BoxAny;
use lib_infra::future::FutureResult;
@ -19,19 +17,22 @@ lazy_static! {
}
pub(crate) struct LocalServerUserAuthServiceImpl {
#[allow(dead_code)]
pub db: Arc<dyn LocalServerDB>,
}
impl UserAuthService for LocalServerUserAuthServiceImpl {
fn sign_up(&self, params: BoxAny) -> FutureResult<SignUpResponse, FlowyError> {
impl UserService for LocalServerUserAuthServiceImpl {
fn sign_up(&self, params: BoxAny) -> FutureResult<SignUpResponse, Error> {
FutureResult::new(async move {
let params = params.unbox_or_error::<SignUpParams>()?;
let uid = ID_GEN.lock().next_id();
let workspace_id = uuid::Uuid::new_v4().to_string();
let user_workspace = UserWorkspace::new(&workspace_id, uid);
Ok(SignUpResponse {
user_id: uid,
name: params.name,
workspace_id,
latest_workspace: user_workspace.clone(),
user_workspaces: vec![user_workspace],
is_new: true,
email: Some(params.email),
token: None,
@ -39,8 +40,8 @@ impl UserAuthService for LocalServerUserAuthServiceImpl {
})
}
fn sign_in(&self, params: BoxAny) -> FutureResult<SignInResponse, FlowyError> {
let weak_db = Arc::downgrade(&self.db);
fn sign_in(&self, params: BoxAny) -> FutureResult<SignInResponse, Error> {
let db = self.db.clone();
FutureResult::new(async move {
let params: SignInParams = params.unbox_or_error::<SignInParams>()?;
let uid = match params.uid {
@ -48,24 +49,21 @@ impl UserAuthService for LocalServerUserAuthServiceImpl {
Some(uid) => uid,
};
// Get the workspace id from the database if it exists, otherwise generate a new one.
let workspace_id = weak_db
.upgrade()
.and_then(|db| db.get_user_profile(uid).ok())
.and_then(|user_profile| user_profile.map(|user_profile| user_profile.workspace_id))
.unwrap_or(uuid::Uuid::new_v4().to_string());
let user_workspace = db
.get_user_workspace(uid)?
.unwrap_or_else(make_user_workspace);
Ok(SignInResponse {
user_id: uid,
name: params.name,
workspace_id,
latest_workspace: user_workspace.clone(),
user_workspaces: vec![user_workspace],
email: Some(params.email),
token: None,
})
})
}
fn sign_out(&self, _token: Option<String>) -> FutureResult<(), FlowyError> {
fn sign_out(&self, _token: Option<String>) -> FutureResult<(), Error> {
FutureResult::new(async { Ok(()) })
}
@ -73,18 +71,47 @@ impl UserAuthService for LocalServerUserAuthServiceImpl {
&self,
_credential: UserCredentials,
_params: UpdateUserProfileParams,
) -> FutureResult<(), FlowyError> {
) -> FutureResult<(), Error> {
FutureResult::new(async { Ok(()) })
}
fn get_user_profile(
&self,
_credential: UserCredentials,
) -> FutureResult<Option<UserProfile>, FlowyError> {
) -> FutureResult<Option<UserProfile>, Error> {
FutureResult::new(async { Ok(None) })
}
fn check_user(&self, _credential: UserCredentials) -> FutureResult<(), FlowyError> {
fn get_user_workspaces(&self, _uid: i64) -> FutureResult<Vec<UserWorkspace>, Error> {
FutureResult::new(async { Ok(vec![]) })
}
fn check_user(&self, _credential: UserCredentials) -> FutureResult<(), Error> {
FutureResult::new(async { Ok(()) })
}
fn add_workspace_member(
&self,
_user_email: String,
_workspace_id: String,
) -> FutureResult<(), Error> {
FutureResult::new(async { Ok(()) })
}
fn remove_workspace_member(
&self,
_user_email: String,
_workspace_id: String,
) -> FutureResult<(), Error> {
FutureResult::new(async { Ok(()) })
}
}
fn make_user_workspace() -> UserWorkspace {
UserWorkspace {
id: uuid::Uuid::new_v4().to_string(),
name: "My Workspace".to_string(),
created_at: Default::default(),
database_storage_id: uuid::Uuid::new_v4().to_string(),
}
}

View File

@ -1,17 +1,18 @@
use std::sync::Arc;
use appflowy_integrate::RemoteCollabStorage;
use collab_document::YrsDocAction;
use collab_plugins::cloud_storage::RemoteCollabStorage;
use parking_lot::RwLock;
use tokio::sync::mpsc;
use flowy_database2::deps::DatabaseCloudService;
use flowy_document2::deps::DocumentCloudService;
use flowy_database_deps::cloud::DatabaseCloudService;
use flowy_document_deps::cloud::DocumentCloudService;
use flowy_error::FlowyError;
use flowy_folder2::deps::FolderCloudService;
use flowy_user::entities::UserProfile;
use flowy_user::event_map::UserAuthService;
use flowy_user::services::database::{get_user_profile, open_collab_db, open_user_db};
use flowy_folder_deps::cloud::FolderCloudService;
// use flowy_user::services::database::{
// get_user_profile, get_user_workspace, open_collab_db, open_user_db,
// };
use flowy_user_deps::cloud::UserService;
use flowy_user_deps::entities::*;
use crate::local_server::impls::{
LocalServerDatabaseCloudServiceImpl, LocalServerDocumentCloudServiceImpl,
@ -21,18 +22,19 @@ use crate::AppFlowyServer;
pub trait LocalServerDB: Send + Sync + 'static {
fn get_user_profile(&self, uid: i64) -> Result<Option<UserProfile>, FlowyError>;
fn get_user_workspace(&self, uid: i64) -> Result<Option<UserWorkspace>, FlowyError>;
fn get_collab_updates(&self, uid: i64, object_id: &str) -> Result<Vec<Vec<u8>>, FlowyError>;
}
pub struct LocalServer {
storage_path: String,
local_db: Arc<dyn LocalServerDB>,
stop_tx: RwLock<Option<mpsc::Sender<()>>>,
}
impl LocalServer {
pub fn new(storage_path: &str) -> Self {
pub fn new(local_db: Arc<dyn LocalServerDB>) -> Self {
Self {
storage_path: storage_path.to_string(),
local_db,
stop_tx: Default::default(),
}
}
@ -46,18 +48,16 @@ impl LocalServer {
}
impl AppFlowyServer for LocalServer {
fn user_service(&self) -> Arc<dyn UserAuthService> {
let db = LocalServerDBImpl {
storage_path: self.storage_path.clone(),
};
Arc::new(LocalServerUserAuthServiceImpl { db: Arc::new(db) })
fn user_service(&self) -> Arc<dyn UserService> {
Arc::new(LocalServerUserAuthServiceImpl {
db: self.local_db.clone(),
})
}
fn folder_service(&self) -> Arc<dyn FolderCloudService> {
let db = LocalServerDBImpl {
storage_path: self.storage_path.clone(),
};
Arc::new(LocalServerFolderCloudServiceImpl { db: Arc::new(db) })
Arc::new(LocalServerFolderCloudServiceImpl {
db: self.local_db.clone(),
})
}
fn database_service(&self) -> Arc<dyn DatabaseCloudService> {
@ -72,25 +72,3 @@ impl AppFlowyServer for LocalServer {
None
}
}
struct LocalServerDBImpl {
storage_path: String,
}
impl LocalServerDB for LocalServerDBImpl {
fn get_user_profile(&self, uid: i64) -> Result<Option<UserProfile>, FlowyError> {
let sqlite_db = open_user_db(&self.storage_path, uid)?;
let user_profile = get_user_profile(&sqlite_db, uid).ok();
Ok(user_profile)
}
fn get_collab_updates(&self, uid: i64, object_id: &str) -> Result<Vec<Vec<u8>>, FlowyError> {
let collab_db = open_collab_db(&self.storage_path, uid)?;
let read_txn = collab_db.read_txn();
let updates = read_txn
.get_all_updates(uid, object_id)
.map_err(|e| FlowyError::internal().context(format!("Failed to open collab db: {:?}", e)))?;
Ok(updates)
}
}

View File

@ -83,7 +83,7 @@ impl HttpRequestBuilder {
where
T: serde::Serialize,
{
let bytes = Bytes::from(serde_json::to_vec(&body).map_err(internal_error)?);
let bytes = Bytes::from(serde_json::to_vec(&body)?);
self.bytes(bytes)
}
@ -104,7 +104,10 @@ impl HttpRequestBuilder {
let builder = self.inner_send().await?;
match builder.response {
None => Err(unexpected_empty_payload(&builder.url)),
Some(data) => serde_json::from_slice(&data).map_err(internal_error),
Some(data) => {
let value = serde_json::from_slice(&data)?;
Ok(value)
},
}
}
@ -137,9 +140,9 @@ impl HttpRequestBuilder {
let _ = tx.send(response);
});
let response = rx.await.map_err(internal_error)?.map_err(internal_error)?;
let response = rx.await.map_err(internal_error)?;
tracing::trace!("Http Response: {:?}", response);
let flowy_response = flowy_response_from(response).await?;
let flowy_response = flowy_response_from(response?).await?;
let token = self.token();
self.middleware.iter().for_each(|middleware| {
middleware.receive_response(&token, &flowy_response);
@ -160,16 +163,16 @@ fn unexpected_empty_payload(url: &str) -> FlowyError {
}
async fn flowy_response_from(original: Response) -> Result<HttpResponse, FlowyError> {
let bytes = original.bytes().await.map_err(internal_error)?;
let response: HttpResponse = serde_json::from_slice(&bytes).map_err(internal_error)?;
let bytes = original.bytes().await?;
let response: HttpResponse = serde_json::from_slice(&bytes)?;
Ok(response)
}
#[allow(dead_code)]
async fn get_response_data(original: Response) -> Result<Bytes, FlowyError> {
if original.status() == http::StatusCode::OK {
let bytes = original.bytes().await.map_err(internal_error)?;
let response: HttpResponse = serde_json::from_slice(&bytes).map_err(internal_error)?;
let bytes = original.bytes().await?;
let response: HttpResponse = serde_json::from_slice(&bytes)?;
match response.error {
None => Ok(response.data),
Some(error) => Err(FlowyError::new(error.code, &error.msg)),

View File

@ -1,27 +1,35 @@
use flowy_database2::deps::{
use anyhow::Error;
use collab_plugins::cloud_storage::CollabType;
use flowy_database_deps::cloud::{
CollabObjectUpdate, CollabObjectUpdateByOid, DatabaseCloudService, DatabaseSnapshot,
};
use flowy_error::FlowyError;
use lib_infra::future::FutureResult;
pub(crate) struct SelfHostedDatabaseCloudServiceImpl();
impl DatabaseCloudService for SelfHostedDatabaseCloudServiceImpl {
fn get_collab_update(&self, _object_id: &str) -> FutureResult<CollabObjectUpdate, FlowyError> {
fn get_collab_update(
&self,
_object_id: &str,
_object_ty: CollabType,
) -> FutureResult<CollabObjectUpdate, Error> {
FutureResult::new(async move { Ok(vec![]) })
}
fn batch_get_collab_updates(
&self,
_object_ids: Vec<String>,
) -> FutureResult<CollabObjectUpdateByOid, FlowyError> {
_object_ty: CollabType,
) -> FutureResult<CollabObjectUpdateByOid, Error> {
FutureResult::new(async move { Ok(CollabObjectUpdateByOid::default()) })
}
fn get_collab_latest_snapshot(
&self,
_object_id: &str,
) -> FutureResult<Option<DatabaseSnapshot>, FlowyError> {
) -> FutureResult<Option<DatabaseSnapshot>, Error> {
FutureResult::new(async move { Ok(None) })
}
}

View File

@ -1,25 +1,23 @@
use flowy_document2::deps::{DocumentCloudService, DocumentData, DocumentSnapshot};
use flowy_error::FlowyError;
use anyhow::Error;
use flowy_document_deps::cloud::*;
use lib_infra::future::FutureResult;
pub(crate) struct SelfHostedDocumentCloudServiceImpl();
impl DocumentCloudService for SelfHostedDocumentCloudServiceImpl {
fn get_document_updates(&self, _document_id: &str) -> FutureResult<Vec<Vec<u8>>, FlowyError> {
fn get_document_updates(&self, _document_id: &str) -> FutureResult<Vec<Vec<u8>>, Error> {
FutureResult::new(async move { Ok(vec![]) })
}
fn get_document_latest_snapshot(
&self,
_document_id: &str,
) -> FutureResult<Option<DocumentSnapshot>, FlowyError> {
) -> FutureResult<Option<DocumentSnapshot>, Error> {
FutureResult::new(async move { Ok(None) })
}
fn get_document_data(
&self,
_document_id: &str,
) -> FutureResult<Option<DocumentData>, FlowyError> {
fn get_document_data(&self, _document_id: &str) -> FutureResult<Option<DocumentData>, Error> {
FutureResult::new(async move { Ok(None) })
}
}

View File

@ -1,17 +1,18 @@
use flowy_error::FlowyError;
use flowy_folder2::deps::{FolderCloudService, FolderData, FolderSnapshot, Workspace};
use flowy_folder2::gen_workspace_id;
use anyhow::Error;
use flowy_folder_deps::cloud::{
gen_workspace_id, FolderCloudService, FolderData, FolderSnapshot, Workspace,
};
use lib_infra::future::FutureResult;
use lib_infra::util::timestamp;
pub(crate) struct SelfHostedServerFolderCloudServiceImpl();
impl FolderCloudService for SelfHostedServerFolderCloudServiceImpl {
fn create_workspace(&self, _uid: i64, name: &str) -> FutureResult<Workspace, FlowyError> {
fn create_workspace(&self, _uid: i64, name: &str) -> FutureResult<Workspace, Error> {
let name = name.to_string();
FutureResult::new(async move {
Ok(Workspace {
id: gen_workspace_id(),
id: gen_workspace_id().to_string(),
name: name.to_string(),
child_views: Default::default(),
created_at: timestamp(),
@ -19,14 +20,14 @@ impl FolderCloudService for SelfHostedServerFolderCloudServiceImpl {
})
}
fn get_folder_data(&self, _workspace_id: &str) -> FutureResult<Option<FolderData>, FlowyError> {
fn get_folder_data(&self, _workspace_id: &str) -> FutureResult<Option<FolderData>, Error> {
FutureResult::new(async move { Ok(None) })
}
fn get_folder_latest_snapshot(
&self,
_workspace_id: &str,
) -> FutureResult<Option<FolderSnapshot>, FlowyError> {
) -> FutureResult<Option<FolderSnapshot>, Error> {
FutureResult::new(async move { Ok(None) })
}
@ -34,7 +35,7 @@ impl FolderCloudService for SelfHostedServerFolderCloudServiceImpl {
&self,
_workspace_id: &str,
_uid: i64,
) -> FutureResult<Vec<Vec<u8>>, FlowyError> {
) -> FutureResult<Vec<Vec<u8>>, Error> {
FutureResult::new(async move { Ok(vec![]) })
}

View File

@ -1,8 +1,7 @@
use anyhow::Error;
use flowy_error::{ErrorCode, FlowyError};
use flowy_user::entities::{
SignInParams, SignInResponse, SignUpParams, SignUpResponse, UpdateUserProfileParams, UserProfile,
};
use flowy_user::event_map::{UserAuthService, UserCredentials};
use flowy_user_deps::cloud::UserService;
use flowy_user_deps::entities::*;
use lib_infra::box_any::BoxAny;
use lib_infra::future::FutureResult;
@ -19,8 +18,8 @@ impl SelfHostedUserAuthServiceImpl {
}
}
impl UserAuthService for SelfHostedUserAuthServiceImpl {
fn sign_up(&self, params: BoxAny) -> FutureResult<SignUpResponse, FlowyError> {
impl UserService for SelfHostedUserAuthServiceImpl {
fn sign_up(&self, params: BoxAny) -> FutureResult<SignUpResponse, Error> {
let url = self.config.sign_up_url();
FutureResult::new(async move {
let params = params.unbox_or_error::<SignUpParams>()?;
@ -29,7 +28,7 @@ impl UserAuthService for SelfHostedUserAuthServiceImpl {
})
}
fn sign_in(&self, params: BoxAny) -> FutureResult<SignInResponse, FlowyError> {
fn sign_in(&self, params: BoxAny) -> FutureResult<SignInResponse, Error> {
let url = self.config.sign_in_url();
FutureResult::new(async move {
let params = params.unbox_or_error::<SignInParams>()?;
@ -38,13 +37,10 @@ impl UserAuthService for SelfHostedUserAuthServiceImpl {
})
}
fn sign_out(&self, token: Option<String>) -> FutureResult<(), FlowyError> {
fn sign_out(&self, token: Option<String>) -> FutureResult<(), Error> {
match token {
None => FutureResult::new(async {
Err(FlowyError::new(
ErrorCode::InvalidParams,
"Token should not be empty",
))
Err(FlowyError::new(ErrorCode::InvalidParams, "Token should not be empty").into())
}),
Some(token) => {
let token = token;
@ -61,13 +57,10 @@ impl UserAuthService for SelfHostedUserAuthServiceImpl {
&self,
credential: UserCredentials,
params: UpdateUserProfileParams,
) -> FutureResult<(), FlowyError> {
) -> FutureResult<(), Error> {
match credential.token {
None => FutureResult::new(async {
Err(FlowyError::new(
ErrorCode::InvalidParams,
"Token should not be empty",
))
Err(FlowyError::new(ErrorCode::InvalidParams, "Token should not be empty").into())
}),
Some(token) => {
let token = token;
@ -83,14 +76,13 @@ impl UserAuthService for SelfHostedUserAuthServiceImpl {
fn get_user_profile(
&self,
credential: UserCredentials,
) -> FutureResult<Option<UserProfile>, FlowyError> {
) -> FutureResult<Option<UserProfile>, Error> {
let url = self.config.user_profile_url();
FutureResult::new(async move {
match credential.token {
None => Err(FlowyError::new(
ErrorCode::UnexpectedEmpty,
"Token should not be empty",
)),
None => {
Err(FlowyError::new(ErrorCode::UnexpectedEmpty, "Token should not be empty").into())
},
Some(token) => {
let profile = get_user_profile_request(&token, &url).await?;
Ok(Some(profile))
@ -99,8 +91,34 @@ impl UserAuthService for SelfHostedUserAuthServiceImpl {
})
}
fn check_user(&self, _credential: UserCredentials) -> FutureResult<(), FlowyError> {
// TODO(nathan): implement the OpenAPI for this
fn get_user_workspaces(
&self,
_uid: i64,
) -> FutureResult<std::vec::Vec<flowy_user_deps::entities::UserWorkspace>, Error> {
// TODO(nathan): implement the RESTful API for this
todo!()
}
fn check_user(&self, _credential: UserCredentials) -> FutureResult<(), Error> {
// TODO(nathan): implement the RESTful API for this
FutureResult::new(async { Ok(()) })
}
fn add_workspace_member(
&self,
_user_email: String,
_workspace_id: String,
) -> FutureResult<(), Error> {
// TODO(nathan): implement the RESTful API for this
FutureResult::new(async { Ok(()) })
}
fn remove_workspace_member(
&self,
_user_email: String,
_workspace_id: String,
) -> FutureResult<(), Error> {
// TODO(nathan): implement the RESTful API for this
FutureResult::new(async { Ok(()) })
}
}

View File

@ -1,11 +1,11 @@
use std::sync::Arc;
use appflowy_integrate::RemoteCollabStorage;
use collab_plugins::cloud_storage::RemoteCollabStorage;
use flowy_database2::deps::DatabaseCloudService;
use flowy_document2::deps::DocumentCloudService;
use flowy_folder2::deps::FolderCloudService;
use flowy_user::event_map::UserAuthService;
use flowy_database_deps::cloud::DatabaseCloudService;
use flowy_document_deps::cloud::DocumentCloudService;
use flowy_folder_deps::cloud::FolderCloudService;
use flowy_user_deps::cloud::UserService;
use crate::self_host::configuration::SelfHostedConfiguration;
use crate::self_host::impls::{
@ -25,7 +25,7 @@ impl SelfHostServer {
}
impl AppFlowyServer for SelfHostServer {
fn user_service(&self) -> Arc<dyn UserAuthService> {
fn user_service(&self) -> Arc<dyn UserService> {
Arc::new(SelfHostedUserAuthServiceImpl::new(self.config.clone()))
}

View File

@ -0,0 +1,218 @@
use std::str::FromStr;
use std::sync::Arc;
use anyhow::Error;
use chrono::{DateTime, Utc};
use collab::preclude::merge_updates_v1;
use collab_plugins::cloud_storage::{
CollabObject, MsgId, RemoteCollabSnapshot, RemoteCollabState, RemoteCollabStorage,
RemoteUpdateReceiver,
};
use tokio::task::spawn_blocking;
use lib_infra::async_trait::async_trait;
use lib_infra::util::md5;
use crate::supabase::api::request::{
create_snapshot, get_latest_snapshot_from_server, get_updates_from_server,
FetchObjectUpdateAction, UpdateItem,
};
use crate::supabase::api::util::{ExtendedResponse, InsertParamsBuilder};
use crate::supabase::api::{PostgresWrapper, SupabaseServerService};
use crate::supabase::define::*;
pub struct RESTfulSupabaseCollabStorageImpl<T>(T);
impl<T> RESTfulSupabaseCollabStorageImpl<T> {
pub fn new(server: T) -> Self {
Self(server)
}
}
#[async_trait]
impl<T> RemoteCollabStorage for RESTfulSupabaseCollabStorageImpl<T>
where
T: SupabaseServerService,
{
fn is_enable(&self) -> bool {
true
}
async fn get_all_updates(&self, object: &CollabObject) -> Result<Vec<Vec<u8>>, Error> {
let postgrest = self.0.try_get_weak_postgrest()?;
let action = FetchObjectUpdateAction::new(object.id.clone(), object.ty.clone(), postgrest);
let updates = action.run().await?;
Ok(updates)
}
async fn get_latest_snapshot(&self, object_id: &str) -> Option<RemoteCollabSnapshot> {
let postgrest = self.0.try_get_postgrest().ok()?;
get_latest_snapshot_from_server(object_id, postgrest)
.await
.ok()?
}
async fn get_collab_state(&self, object_id: &str) -> Result<Option<RemoteCollabState>, Error> {
let postgrest = self.0.try_get_postgrest()?;
let json = postgrest
.from("af_collab_state")
.select("*")
.eq("oid", object_id)
.order("snapshot_created_at.desc".to_string())
.limit(1)
.execute()
.await?
.get_json()
.await?;
Ok(
json
.as_array()
.and_then(|array| array.first())
.and_then(|value| {
let created_at = value.get("snapshot_created_at").and_then(|created_at| {
created_at
.as_str()
.map(|id| DateTime::<Utc>::from_str(id).ok())
.and_then(|date| date)
})?;
let current_edit_count = value.get("current_edit_count").and_then(|id| id.as_i64())?;
let last_snapshot_edit_count = value
.get("last_snapshot_edit_count")
.and_then(|id| id.as_i64())?;
Some(RemoteCollabState {
current_edit_count,
last_snapshot_edit_count,
last_snapshot_created_at: created_at.timestamp(),
})
}),
)
}
async fn create_snapshot(&self, object: &CollabObject, snapshot: Vec<u8>) -> Result<i64, Error> {
let postgrest = self.0.try_get_postgrest()?;
create_snapshot(&postgrest, object, snapshot).await
}
async fn send_update(
&self,
object: &CollabObject,
_id: MsgId,
update: Vec<u8>,
) -> Result<(), Error> {
let postgrest = self.0.try_get_postgrest()?;
let workspace_id = object
.get_workspace_id()
.ok_or(anyhow::anyhow!("Invalid workspace id"))?;
send_update(workspace_id, object, update, &postgrest).await
}
async fn send_init_sync(
&self,
object: &CollabObject,
_id: MsgId,
init_update: Vec<u8>,
) -> Result<(), Error> {
let postgrest = self.0.try_get_postgrest()?;
let workspace_id = object
.get_workspace_id()
.ok_or(anyhow::anyhow!("Invalid workspace id"))?;
let update_items = get_updates_from_server(&object.id, &object.ty, postgrest.clone()).await?;
// If the update_items is empty, we can send the init_update directly
if update_items.is_empty() {
send_update(workspace_id, object, init_update, &postgrest).await?;
} else {
// 2.Merge the updates into one and then delete the merged updates
let merge_result = spawn_blocking(move || merge_updates(update_items, init_update)).await??;
tracing::trace!("Merged updates count: {}", merge_result.merged_keys.len());
let override_key = merge_result.merged_keys.last().cloned().unwrap();
let value_size = merge_result.new_update.len() as i32;
let md5 = md5(&merge_result.new_update);
let new_update = format!("\\x{}", hex::encode(merge_result.new_update));
let params = InsertParamsBuilder::new()
.insert("oid", object.id.clone())
.insert("new_key", override_key)
.insert("new_value", new_update)
.insert("md5", md5)
.insert("value_size", value_size)
.insert("partition_key", partition_key(&object.ty))
.insert("uid", object.uid)
.insert("workspace_id", workspace_id)
.insert("removed_keys", merge_result.merged_keys)
.build();
postgrest
.rpc("flush_collab_updates", params)
.execute()
.await?
.success()
.await?;
}
Ok(())
}
async fn subscribe_remote_updates(&self, _object: &CollabObject) -> Option<RemoteUpdateReceiver> {
todo!()
}
}
async fn send_update(
workspace_id: String,
object: &CollabObject,
update: Vec<u8>,
postgrest: &Arc<PostgresWrapper>,
) -> Result<(), Error> {
let value_size = update.len() as i32;
let md5 = md5(&update);
let update = format!("\\x{}", hex::encode(update));
let builder = InsertParamsBuilder::new()
.insert("oid", object.id.clone())
.insert("partition_key", partition_key(&object.ty))
.insert("value", update)
.insert("uid", object.uid)
.insert("md5", md5)
.insert("workspace_id", workspace_id)
.insert("value_size", value_size);
let params = builder.build();
postgrest
.from(&table_name(&object.ty))
.insert(params)
.execute()
.await?
.success()
.await?;
Ok(())
}
fn merge_updates(update_items: Vec<UpdateItem>, new_update: Vec<u8>) -> Result<MergeResult, Error> {
let mut updates = vec![];
let mut merged_keys = vec![];
for item in update_items {
merged_keys.push(item.key);
updates.push(item.value);
}
if !new_update.is_empty() {
updates.push(new_update);
}
let updates = updates
.iter()
.map(|update| update.as_ref())
.collect::<Vec<&[u8]>>();
let new_update = merge_updates_v1(&updates)?;
Ok(MergeResult {
merged_keys,
new_update,
})
}
struct MergeResult {
merged_keys: Vec<i64>,
new_update: Vec<u8>,
}

View File

@ -0,0 +1,91 @@
use anyhow::Error;
use collab_plugins::cloud_storage::CollabType;
use tokio::sync::oneshot::channel;
use flowy_database_deps::cloud::{
CollabObjectUpdate, CollabObjectUpdateByOid, DatabaseCloudService, DatabaseSnapshot,
};
use lib_infra::future::FutureResult;
use crate::supabase::api::request::{
get_latest_snapshot_from_server, BatchFetchObjectUpdateAction, FetchObjectUpdateAction,
};
use crate::supabase::api::SupabaseServerService;
pub struct RESTfulSupabaseDatabaseServiceImpl<T> {
server: T,
}
impl<T> RESTfulSupabaseDatabaseServiceImpl<T> {
pub fn new(server: T) -> Self {
Self { server }
}
}
impl<T> DatabaseCloudService for RESTfulSupabaseDatabaseServiceImpl<T>
where
T: SupabaseServerService,
{
fn get_collab_update(
&self,
object_id: &str,
object_ty: CollabType,
) -> FutureResult<CollabObjectUpdate, Error> {
let try_get_postgrest = self.server.try_get_weak_postgrest();
let object_id = object_id.to_string();
let (tx, rx) = channel();
tokio::spawn(async move {
tx.send(
async move {
let postgrest = try_get_postgrest?;
FetchObjectUpdateAction::new(object_id.to_string(), object_ty, postgrest)
.run_with_fix_interval(5, 10)
.await
}
.await,
)
});
FutureResult::new(async { rx.await? })
}
fn batch_get_collab_updates(
&self,
object_ids: Vec<String>,
object_ty: CollabType,
) -> FutureResult<CollabObjectUpdateByOid, Error> {
let try_get_postgrest = self.server.try_get_weak_postgrest();
let (tx, rx) = channel();
tokio::spawn(async move {
tx.send(
async move {
let postgrest = try_get_postgrest?;
BatchFetchObjectUpdateAction::new(object_ids, object_ty, postgrest)
.run()
.await
}
.await,
)
});
FutureResult::new(async { rx.await? })
}
fn get_collab_latest_snapshot(
&self,
object_id: &str,
) -> FutureResult<Option<DatabaseSnapshot>, Error> {
let try_get_postgrest = self.server.try_get_postgrest();
let object_id = object_id.to_string();
FutureResult::new(async move {
let postgrest = try_get_postgrest?;
let snapshot = get_latest_snapshot_from_server(&object_id, postgrest)
.await?
.map(|snapshot| DatabaseSnapshot {
snapshot_id: snapshot.sid,
database_id: snapshot.oid,
data: snapshot.blob,
created_at: snapshot.created_at,
});
Ok(snapshot)
})
}
}

View File

@ -0,0 +1,82 @@
use anyhow::Error;
use collab::core::origin::CollabOrigin;
use collab_document::blocks::DocumentData;
use collab_document::document::Document;
use collab_plugins::cloud_storage::CollabType;
use tokio::sync::oneshot::channel;
use flowy_document_deps::cloud::{DocumentCloudService, DocumentSnapshot};
use lib_infra::future::FutureResult;
use crate::supabase::api::request::{get_latest_snapshot_from_server, FetchObjectUpdateAction};
use crate::supabase::api::SupabaseServerService;
pub struct RESTfulSupabaseDocumentServiceImpl<T>(T);
impl<T> RESTfulSupabaseDocumentServiceImpl<T> {
pub fn new(server: T) -> Self {
Self(server)
}
}
impl<T> DocumentCloudService for RESTfulSupabaseDocumentServiceImpl<T>
where
T: SupabaseServerService,
{
fn get_document_updates(&self, document_id: &str) -> FutureResult<Vec<Vec<u8>>, Error> {
let try_get_postgrest = self.0.try_get_weak_postgrest();
let document_id = document_id.to_string();
let (tx, rx) = channel();
tokio::spawn(async move {
tx.send(
async move {
let postgrest = try_get_postgrest?;
let action = FetchObjectUpdateAction::new(document_id, CollabType::Document, postgrest);
action.run_with_fix_interval(5, 5).await
}
.await,
)
});
FutureResult::new(async { rx.await? })
}
fn get_document_latest_snapshot(
&self,
document_id: &str,
) -> FutureResult<Option<DocumentSnapshot>, Error> {
let try_get_postgrest = self.0.try_get_postgrest();
let document_id = document_id.to_string();
FutureResult::new(async move {
let postgrest = try_get_postgrest?;
let snapshot = get_latest_snapshot_from_server(&document_id, postgrest)
.await?
.map(|snapshot| DocumentSnapshot {
snapshot_id: snapshot.sid,
document_id: snapshot.oid,
data: snapshot.blob,
created_at: snapshot.created_at,
});
Ok(snapshot)
})
}
fn get_document_data(&self, document_id: &str) -> FutureResult<Option<DocumentData>, Error> {
let try_get_postgrest = self.0.try_get_weak_postgrest();
let document_id = document_id.to_string();
let (tx, rx) = channel();
tokio::spawn(async move {
tx.send(
async move {
let postgrest = try_get_postgrest?;
let action =
FetchObjectUpdateAction::new(document_id.clone(), CollabType::Document, postgrest);
let updates = action.run_with_fix_interval(5, 10).await?;
let document =
Document::from_updates(CollabOrigin::Empty, updates, &document_id, vec![])?;
Ok(document.get_document_data().ok())
}
.await,
)
});
FutureResult::new(async { rx.await? })
}
}

View File

@ -0,0 +1,149 @@
use std::str::FromStr;
use anyhow::Error;
use chrono::{DateTime, Utc};
use collab::core::origin::CollabOrigin;
use collab_plugins::cloud_storage::CollabType;
use serde_json::Value;
use tokio::sync::oneshot::channel;
use flowy_folder_deps::cloud::{
gen_workspace_id, Folder, FolderCloudService, FolderData, FolderSnapshot, Workspace,
};
use lib_infra::future::FutureResult;
use crate::supabase::api::request::{
get_latest_snapshot_from_server, get_updates_from_server, FetchObjectUpdateAction,
};
use crate::supabase::api::util::{ExtendedResponse, InsertParamsBuilder};
use crate::supabase::api::SupabaseServerService;
use crate::supabase::define::*;
pub struct RESTfulSupabaseFolderServiceImpl<T>(T);
impl<T> RESTfulSupabaseFolderServiceImpl<T> {
pub fn new(server: T) -> Self {
Self(server)
}
}
impl<T> FolderCloudService for RESTfulSupabaseFolderServiceImpl<T>
where
T: SupabaseServerService,
{
fn create_workspace(&self, uid: i64, name: &str) -> FutureResult<Workspace, Error> {
let try_get_postgrest = self.0.try_get_postgrest();
let name = name.to_string();
let new_workspace_id = gen_workspace_id().to_string();
FutureResult::new(async move {
let postgrest = try_get_postgrest?;
let insert_params = InsertParamsBuilder::new()
.insert(OWNER_USER_UID, uid)
.insert(WORKSPACE_ID, new_workspace_id.clone())
.insert(WORKSPACE_NAME, name.to_string())
.build();
postgrest
.from(WORKSPACE_TABLE)
.insert(insert_params)
.execute()
.await?
.success()
.await?;
// read the workspace
let json = postgrest
.from(WORKSPACE_TABLE)
.select("*")
.eq(WORKSPACE_ID, new_workspace_id)
.execute()
.await?
.get_json()
.await?;
let workspace = workspace_from_json_value(json)?;
Ok(workspace)
})
}
fn get_folder_data(&self, workspace_id: &str) -> FutureResult<Option<FolderData>, Error> {
let try_get_postgrest = self.0.try_get_postgrest();
let workspace_id = workspace_id.to_string();
FutureResult::new(async move {
let postgrest = try_get_postgrest?;
get_updates_from_server(&workspace_id, &CollabType::Folder, postgrest)
.await
.map(|updates| {
let updates = updates.into_iter().map(|item| item.value).collect();
let folder =
Folder::from_collab_raw_data(CollabOrigin::Empty, updates, &workspace_id, vec![])
.ok()?;
folder.get_folder_data()
})
})
}
fn get_folder_latest_snapshot(
&self,
workspace_id: &str,
) -> FutureResult<Option<FolderSnapshot>, Error> {
let try_get_postgrest = self.0.try_get_postgrest();
let workspace_id = workspace_id.to_string();
FutureResult::new(async move {
let postgrest = try_get_postgrest?;
let snapshot = get_latest_snapshot_from_server(&workspace_id, postgrest)
.await?
.map(|snapshot| FolderSnapshot {
snapshot_id: snapshot.sid,
database_id: snapshot.oid,
data: snapshot.blob,
created_at: snapshot.created_at,
});
Ok(snapshot)
})
}
fn get_folder_updates(&self, workspace_id: &str, _uid: i64) -> FutureResult<Vec<Vec<u8>>, Error> {
let try_get_postgrest = self.0.try_get_weak_postgrest();
let workspace_id = workspace_id.to_string();
let (tx, rx) = channel();
tokio::spawn(async move {
tx.send(
async move {
let postgrest = try_get_postgrest?;
let action = FetchObjectUpdateAction::new(workspace_id, CollabType::Folder, postgrest);
action.run_with_fix_interval(5, 10).await
}
.await,
)
});
FutureResult::new(async { rx.await? })
}
fn service_name(&self) -> String {
"Supabase".to_string()
}
}
fn workspace_from_json_value(value: Value) -> Result<Workspace, Error> {
let json = value
.as_array()
.and_then(|values| values.first())
.ok_or(anyhow::anyhow!("workspace not found"))?;
Ok(Workspace {
id: json
.get(WORKSPACE_ID)
.ok_or(anyhow::anyhow!("workspace id not found"))?
.to_string(),
name: json
.get(WORKSPACE_NAME)
.map(|value| value.to_string())
.unwrap_or_default(),
child_views: Default::default(),
created_at: json
.get(CREATED_AT)
.and_then(|value| value.as_str())
.and_then(|s| DateTime::<Utc>::from_str(s).ok())
.map(|date| date.timestamp())
.unwrap_or_default(),
})
}

View File

@ -2,10 +2,14 @@ pub use collab_storage::*;
pub use database::*;
pub use document::*;
pub use folder::*;
pub use postgres_server::*;
pub use user::*;
mod collab_storage;
mod database;
mod document;
mod folder;
mod postgres_server;
mod request;
mod user;
mod util;

View File

@ -0,0 +1,82 @@
use anyhow::Error;
use parking_lot::RwLock;
use std::ops::Deref;
use std::sync::{Arc, Weak};
use flowy_error::{ErrorCode, FlowyError};
use postgrest::Postgrest;
use flowy_server_config::supabase_config::SupabaseConfiguration;
/// Creates a wrapper for Postgrest, which allows us to extend the functionality of Postgrest.
pub struct PostgresWrapper(Postgrest);
impl Deref for PostgresWrapper {
type Target = Postgrest;
fn deref(&self) -> &Self::Target {
&self.0
}
}
pub struct RESTfulPostgresServer {
pub postgrest: Arc<PostgresWrapper>,
}
impl RESTfulPostgresServer {
pub fn new(config: SupabaseConfiguration) -> Self {
let url = format!("{}/rest/v1", config.url);
let auth = format!("Bearer {}", config.anon_key);
let postgrest = Postgrest::new(url)
.insert_header("apikey", config.anon_key)
.insert_header("Authorization", auth);
Self {
postgrest: Arc::new(PostgresWrapper(postgrest)),
}
}
}
pub trait SupabaseServerService: Send + Sync + 'static {
fn get_postgrest(&self) -> Option<Arc<PostgresWrapper>>;
fn try_get_postgrest(&self) -> Result<Arc<PostgresWrapper>, Error>;
fn try_get_weak_postgrest(&self) -> Result<Weak<PostgresWrapper>, Error>;
}
#[derive(Clone)]
pub struct SupabaseServerServiceImpl(pub Arc<RwLock<Option<Arc<RESTfulPostgresServer>>>>);
impl SupabaseServerServiceImpl {
pub fn new(postgrest: Arc<RESTfulPostgresServer>) -> Self {
Self(Arc::new(RwLock::new(Some(postgrest))))
}
}
impl SupabaseServerService for SupabaseServerServiceImpl {
fn get_postgrest(&self) -> Option<Arc<PostgresWrapper>> {
self
.0
.read()
.as_ref()
.map(|server| server.postgrest.clone())
}
fn try_get_postgrest(&self) -> Result<Arc<PostgresWrapper>, Error> {
self
.0
.read()
.as_ref()
.map(|server| server.postgrest.clone())
.ok_or_else(|| {
FlowyError::new(
ErrorCode::SupabaseSyncRequired,
"Supabase sync is disabled, please enable it first",
)
.into()
})
}
fn try_get_weak_postgrest(&self) -> Result<Weak<PostgresWrapper>, Error> {
let postgrest = self.try_get_postgrest()?;
Ok(Arc::downgrade(&postgrest))
}
}

View File

@ -0,0 +1,304 @@
use std::future::Future;
use std::iter::Take;
use std::pin::Pin;
use std::str::FromStr;
use std::sync::{Arc, Weak};
use std::time::Duration;
use anyhow::Error;
use chrono::{DateTime, Utc};
use collab_plugins::cloud_storage::{CollabObject, CollabType, RemoteCollabSnapshot};
use serde_json::Value;
use tokio_retry::strategy::FixedInterval;
use tokio_retry::{Action, Retry};
use flowy_database_deps::cloud::{CollabObjectUpdate, CollabObjectUpdateByOid};
use lib_infra::util::md5;
use crate::supabase::api::util::{ExtendedResponse, InsertParamsBuilder};
use crate::supabase::api::PostgresWrapper;
use crate::supabase::define::*;
pub struct FetchObjectUpdateAction {
object_id: String,
object_ty: CollabType,
postgrest: Weak<PostgresWrapper>,
}
impl FetchObjectUpdateAction {
pub fn new(object_id: String, object_ty: CollabType, postgrest: Weak<PostgresWrapper>) -> Self {
Self {
postgrest,
object_id,
object_ty,
}
}
pub fn run(self) -> Retry<Take<FixedInterval>, FetchObjectUpdateAction> {
let retry_strategy = FixedInterval::new(Duration::from_secs(5)).take(3);
Retry::spawn(retry_strategy, self)
}
pub fn run_with_fix_interval(
self,
secs: u64,
times: usize,
) -> Retry<Take<FixedInterval>, FetchObjectUpdateAction> {
let retry_strategy = FixedInterval::new(Duration::from_secs(secs)).take(times);
Retry::spawn(retry_strategy, self)
}
}
impl Action for FetchObjectUpdateAction {
type Future = Pin<Box<dyn Future<Output = Result<Self::Item, Self::Error>> + Send>>;
type Item = CollabObjectUpdate;
type Error = anyhow::Error;
fn run(&mut self) -> Self::Future {
let weak_postgres = self.postgrest.clone();
let object_id = self.object_id.clone();
let object_ty = self.object_ty.clone();
Box::pin(async move {
match weak_postgres.upgrade() {
None => Ok(vec![]),
Some(postgrest) => {
let items = get_updates_from_server(&object_id, &object_ty, postgrest).await?;
Ok(items.into_iter().map(|item| item.value).collect())
},
}
})
}
}
pub struct BatchFetchObjectUpdateAction {
object_ids: Vec<String>,
object_ty: CollabType,
postgrest: Weak<PostgresWrapper>,
}
impl BatchFetchObjectUpdateAction {
pub fn new(
object_ids: Vec<String>,
object_ty: CollabType,
postgrest: Weak<PostgresWrapper>,
) -> Self {
Self {
postgrest,
object_ty,
object_ids,
}
}
pub fn run(self) -> Retry<Take<FixedInterval>, BatchFetchObjectUpdateAction> {
let retry_strategy = FixedInterval::new(Duration::from_secs(5)).take(3);
Retry::spawn(retry_strategy, self)
}
}
impl Action for BatchFetchObjectUpdateAction {
type Future = Pin<Box<dyn Future<Output = Result<Self::Item, Self::Error>> + Send>>;
type Item = CollabObjectUpdateByOid;
type Error = anyhow::Error;
fn run(&mut self) -> Self::Future {
let weak_postgrest = self.postgrest.clone();
let object_ids = self.object_ids.clone();
let object_ty = self.object_ty.clone();
Box::pin(async move {
match weak_postgrest.upgrade() {
None => Ok(CollabObjectUpdateByOid::default()),
Some(server) => batch_get_updates_from_server(object_ids, &object_ty, server).await,
}
})
}
}
pub async fn create_snapshot(
postgrest: &Arc<PostgresWrapper>,
object: &CollabObject,
snapshot: Vec<u8>,
) -> Result<i64, Error> {
let value_size = snapshot.len() as i32;
let snapshot = format!("\\x{}", hex::encode(snapshot));
postgrest
.from(AF_COLLAB_SNAPSHOT_TABLE)
.insert(
InsertParamsBuilder::new()
.insert(AF_COLLAB_SNAPSHOT_OID_COLUMN, object.id.clone())
.insert("name", object.ty.to_string())
.insert(AF_COLLAB_SNAPSHOT_BLOB_COLUMN, snapshot)
.insert(AF_COLLAB_SNAPSHOT_BLOB_SIZE_COLUMN, value_size)
.build(),
)
.execute()
.await?
.success()
.await?;
Ok(1)
}
pub async fn get_latest_snapshot_from_server(
object_id: &str,
postgrest: Arc<PostgresWrapper>,
) -> Result<Option<RemoteCollabSnapshot>, Error> {
let json = postgrest
.from(AF_COLLAB_SNAPSHOT_TABLE)
.select(format!(
"{},{},{}",
AF_COLLAB_SNAPSHOT_ID_COLUMN,
AF_COLLAB_SNAPSHOT_BLOB_COLUMN,
AF_COLLAB_SNAPSHOT_CREATED_AT_COLUMN
))
.order(format!("{}.desc", AF_COLLAB_SNAPSHOT_ID_COLUMN))
.limit(1)
.eq(AF_COLLAB_SNAPSHOT_OID_COLUMN, object_id)
.execute()
.await?
.get_json()
.await?;
let snapshot = json
.as_array()
.and_then(|array| array.first())
.and_then(|value| {
let blob = value
.get("blob")
.and_then(|blob| blob.as_str())
.and_then(decode_hex_string)?;
let sid = value.get("sid").and_then(|id| id.as_i64())?;
let created_at = value.get("created_at").and_then(|created_at| {
created_at
.as_str()
.map(|id| DateTime::<Utc>::from_str(id).ok())
.and_then(|date| date)
})?;
Some(RemoteCollabSnapshot {
sid,
oid: object_id.to_string(),
blob,
created_at: created_at.timestamp(),
})
});
Ok(snapshot)
}
pub async fn batch_get_updates_from_server(
object_ids: Vec<String>,
object_ty: &CollabType,
postgrest: Arc<PostgresWrapper>,
) -> Result<CollabObjectUpdateByOid, Error> {
let json = postgrest
.from(table_name(object_ty))
.select("oid, key, value, md5")
.order(format!("{}.asc", AF_COLLAB_KEY_COLUMN))
.in_("oid", object_ids)
.execute()
.await?
.get_json()
.await?;
let mut updates_by_oid = CollabObjectUpdateByOid::new();
if let Some(records) = json.as_array() {
for record in records {
if let Some(oid) = record.get("oid").and_then(|value| value.as_str()) {
if let Ok(updates) = parser_updates_form_json(record.clone()) {
let object_updates = updates_by_oid
.entry(oid.to_string())
.or_insert_with(Vec::new);
tracing::debug!("get updates from server: {:?}", record);
for update in updates {
object_updates.push(update.value);
}
}
}
}
}
Ok(updates_by_oid)
}
pub async fn get_updates_from_server(
object_id: &str,
object_ty: &CollabType,
postgrest: Arc<PostgresWrapper>,
) -> Result<Vec<UpdateItem>, Error> {
let json = postgrest
.from(table_name(object_ty))
.select("key, value, md5")
.order(format!("{}.asc", AF_COLLAB_KEY_COLUMN))
.eq("oid", object_id)
.execute()
.await?
.get_json()
.await?;
parser_updates_form_json(json)
}
/// json format:
/// ```json
/// [
/// {
/// "value": "\\x...",
/// "md5": "..."
/// },
/// {
/// "value": "\\x...",
/// "md5": "..."
/// },
/// ...
/// ]
/// ```
fn parser_updates_form_json(json: Value) -> Result<Vec<UpdateItem>, Error> {
let mut updates = vec![];
match json.as_array() {
None => {
updates.push(parser_update_from_json(&json)?);
},
Some(values) => {
for value in values {
updates.push(parser_update_from_json(value)?);
}
},
}
Ok(updates)
}
fn parser_update_from_json(json: &Value) -> Result<UpdateItem, Error> {
let some_record = json
.get("value")
.and_then(|value| value.as_str())
.and_then(decode_hex_string);
let some_key = json.get("key").and_then(|value| value.as_i64());
if let (Some(value), Some(key)) = (some_record, some_key) {
// Check the md5 of the value that we received from the server is equal to the md5 of the value
// that we calculated locally.
if let Some(expected_md5) = json.get("md5").and_then(|v| v.as_str()) {
let value_md5 = md5(&value);
debug_assert!(
value_md5 == expected_md5,
"md5 not match: {} != {}",
value_md5,
expected_md5
);
}
Ok(UpdateItem { key, value })
} else {
Err(anyhow::anyhow!(
"missing key or value column in json: {:?}",
json
))
}
}
pub struct UpdateItem {
pub key: i64,
pub value: Vec<u8>,
}
fn decode_hex_string(s: &str) -> Option<Vec<u8>> {
let s = s.strip_prefix("\\x")?;
hex::decode(s).ok()
}

View File

@ -0,0 +1,308 @@
use std::str::FromStr;
use std::sync::Arc;
use anyhow::Error;
use uuid::Uuid;
use flowy_user_deps::cloud::*;
use flowy_user_deps::entities::*;
use lib_infra::box_any::BoxAny;
use lib_infra::future::FutureResult;
use crate::supabase::api::util::{ExtendedResponse, InsertParamsBuilder};
use crate::supabase::api::{PostgresWrapper, SupabaseServerService};
use crate::supabase::define::*;
use crate::supabase::entities::GetUserProfileParams;
use crate::supabase::entities::UidResponse;
use crate::supabase::entities::UserProfileResponse;
pub struct RESTfulSupabaseUserAuthServiceImpl<T> {
server: T,
}
impl<T> RESTfulSupabaseUserAuthServiceImpl<T> {
pub fn new(server: T) -> Self {
Self { server }
}
}
impl<T> UserService for RESTfulSupabaseUserAuthServiceImpl<T>
where
T: SupabaseServerService,
{
fn sign_up(&self, params: BoxAny) -> FutureResult<SignUpResponse, Error> {
let try_get_postgrest = self.server.try_get_postgrest();
FutureResult::new(async move {
let postgrest = try_get_postgrest?;
let params = third_party_params_from_box_any(params)?;
let is_new_user = postgrest
.from(USER_TABLE)
.select("uid")
.eq("uuid", params.uuid.to_string())
.execute()
.await?
.get_value::<Vec<UidResponse>>()
.await?
.is_empty();
// Insert the user if it's a new user. After the user is inserted, we can query the user profile
// and workspaces. The profile and workspaces are created by the database trigger.
if is_new_user {
let insert_params = InsertParamsBuilder::new()
.insert(USER_UUID, params.uuid.to_string())
.insert(USER_EMAIL, params.email)
.build();
let resp = postgrest
.from(USER_TABLE)
.insert(insert_params)
.execute()
.await?
.success_with_body()
.await?;
tracing::debug!("Create user response: {:?}", resp);
}
// Query the user profile and workspaces
tracing::debug!("user uuid: {}", params.uuid);
let user_profile =
get_user_profile(postgrest.clone(), GetUserProfileParams::Uuid(params.uuid))
.await?
.unwrap();
let user_workspaces = get_user_workspaces(postgrest.clone(), user_profile.uid).await?;
let latest_workspace = user_workspaces
.iter()
.find(|user_workspace| user_workspace.id == user_profile.latest_workspace_id)
.cloned();
Ok(SignUpResponse {
user_id: user_profile.uid,
name: user_profile.name,
latest_workspace: latest_workspace.unwrap(),
user_workspaces,
is_new: is_new_user,
email: Some(user_profile.email),
token: None,
})
})
}
fn sign_in(&self, params: BoxAny) -> FutureResult<SignInResponse, Error> {
let try_get_postgrest = self.server.try_get_postgrest();
FutureResult::new(async move {
let postgrest = try_get_postgrest?;
let params = third_party_params_from_box_any(params)?;
let uuid = params.uuid;
let user_profile = get_user_profile(postgrest.clone(), GetUserProfileParams::Uuid(uuid))
.await?
.unwrap();
let user_workspaces = get_user_workspaces(postgrest.clone(), user_profile.uid).await?;
let latest_workspace = user_workspaces
.iter()
.find(|user_workspace| user_workspace.id == user_profile.latest_workspace_id)
.cloned();
Ok(SignInResponse {
user_id: user_profile.uid,
name: "".to_string(),
latest_workspace: latest_workspace.unwrap(),
user_workspaces,
email: None,
token: None,
})
})
}
fn sign_out(&self, _token: Option<String>) -> FutureResult<(), Error> {
FutureResult::new(async { Ok(()) })
}
fn update_user(
&self,
_credential: UserCredentials,
params: UpdateUserProfileParams,
) -> FutureResult<(), Error> {
let try_get_postgrest = self.server.try_get_postgrest();
FutureResult::new(async move {
let postgrest = try_get_postgrest?;
update_user_profile(postgrest, params).await?;
Ok(())
})
}
fn get_user_profile(
&self,
credential: UserCredentials,
) -> FutureResult<Option<UserProfile>, Error> {
let try_get_postgrest = self.server.try_get_postgrest();
let uid = credential
.uid
.ok_or(anyhow::anyhow!("uid is required"))
.unwrap();
FutureResult::new(async move {
let postgrest = try_get_postgrest?;
let user_profile_resp = get_user_profile(postgrest, GetUserProfileParams::Uid(uid)).await?;
match user_profile_resp {
None => Ok(None),
Some(user_profile_resp) => Ok(Some(UserProfile {
id: user_profile_resp.uid,
email: user_profile_resp.email,
name: user_profile_resp.name,
token: "".to_string(),
icon_url: "".to_string(),
openai_key: "".to_string(),
workspace_id: user_profile_resp.latest_workspace_id,
auth_type: AuthType::Supabase,
})),
}
})
}
fn get_user_workspaces(&self, uid: i64) -> FutureResult<Vec<UserWorkspace>, Error> {
let try_get_postgrest = self.server.try_get_postgrest();
FutureResult::new(async move {
let postgrest = try_get_postgrest?;
let user_workspaces = get_user_workspaces(postgrest, uid).await?;
Ok(user_workspaces)
})
}
fn check_user(&self, credential: UserCredentials) -> FutureResult<(), Error> {
let try_get_postgrest = self.server.try_get_postgrest();
let uuid = credential.uuid.and_then(|uuid| Uuid::from_str(&uuid).ok());
let uid = credential.uid;
FutureResult::new(async move {
let postgrest = try_get_postgrest?;
check_user(postgrest, uid, uuid).await?;
Ok(())
})
}
fn add_workspace_member(
&self,
_user_email: String,
_workspace_id: String,
) -> FutureResult<(), Error> {
todo!()
}
fn remove_workspace_member(
&self,
_user_email: String,
_workspace_id: String,
) -> FutureResult<(), Error> {
todo!()
}
}
async fn get_user_profile(
postgrest: Arc<PostgresWrapper>,
params: GetUserProfileParams,
) -> Result<Option<UserProfileResponse>, Error> {
let mut builder = postgrest
.from(USER_PROFILE_VIEW)
.select("uid, email, name, latest_workspace_id");
match params {
GetUserProfileParams::Uid(uid) => builder = builder.eq("uid", uid.to_string()),
GetUserProfileParams::Uuid(uuid) => builder = builder.eq("uuid", uuid.to_string()),
}
let mut profiles = builder
.execute()
.await?
.error_for_status()?
.get_value::<Vec<UserProfileResponse>>()
.await?;
match profiles.len() {
0 => Ok(None),
1 => Ok(Some(profiles.swap_remove(0))),
_ => unreachable!(),
}
}
async fn get_user_workspaces(
postgrest: Arc<PostgresWrapper>,
uid: i64,
) -> Result<Vec<UserWorkspace>, Error> {
postgrest
.from(WORKSPACE_TABLE)
.select("id:workspace_id, name:workspace_name, created_at, database_storage_id")
.eq("owner_uid", uid.to_string())
.execute()
.await?
.error_for_status()?
.get_value::<Vec<UserWorkspace>>()
.await
}
async fn update_user_profile(
postgrest: Arc<PostgresWrapper>,
params: UpdateUserProfileParams,
) -> Result<(), Error> {
if params.is_empty() {
anyhow::bail!("no params to update");
}
// check if user exists
let exists = !postgrest
.from(USER_TABLE)
.select("uid")
.eq("uid", params.id.to_string())
.execute()
.await?
.error_for_status()?
.get_value::<Vec<UidResponse>>()
.await?
.is_empty();
if !exists {
anyhow::bail!("user uid {} does not exist", params.id);
}
let mut update_params = serde_json::Map::new();
if let Some(name) = params.name {
update_params.insert("name".to_string(), serde_json::json!(name));
}
if let Some(email) = params.email {
update_params.insert("email".to_string(), serde_json::json!(email));
}
let update_payload = serde_json::to_string(&update_params).unwrap();
let resp = postgrest
.from(USER_TABLE)
.update(update_payload)
.eq("uid", params.id.to_string())
.execute()
.await?
.success_with_body()
.await?;
tracing::debug!("update user profile resp: {:?}", resp);
Ok(())
}
async fn check_user(
postgrest: Arc<PostgresWrapper>,
uid: Option<i64>,
uuid: Option<Uuid>,
) -> Result<(), Error> {
let mut builder = postgrest.from(USER_TABLE);
if let Some(uid) = uid {
builder = builder.eq("uid", uid.to_string());
} else if let Some(uuid) = uuid {
builder = builder.eq("uuid", uuid.to_string());
} else {
anyhow::bail!("uid or uuid is required");
}
let exists = !builder
.execute()
.await?
.error_for_status()?
.get_value::<Vec<UidResponse>>()
.await?
.is_empty();
if !exists {
anyhow::bail!("user does not exist, uid: {:?}, uuid: {:?}", uid, uuid);
}
Ok(())
}

View File

@ -0,0 +1,128 @@
use anyhow::Error;
use reqwest::{Response, StatusCode};
use serde_json::Value;
use flowy_error::{ErrorCode, FlowyError};
use lib_infra::future::{to_fut, Fut};
pub struct InsertParamsBuilder {
map: serde_json::Map<String, Value>,
}
impl InsertParamsBuilder {
pub fn new() -> Self {
Self {
map: serde_json::Map::new(),
}
}
pub fn insert<T: serde::Serialize>(mut self, key: &str, value: T) -> Self {
self
.map
.insert(key.to_string(), serde_json::to_value(value).unwrap());
self
}
pub fn build(self) -> String {
serde_json::to_string(&self.map).unwrap()
}
}
/// Trait `ExtendedResponse` provides an extension method to handle and transform the response data.
///
/// This trait introduces a single method:
///
/// - `get_value`: It extracts the value from the response, and returns it as an instance of a type `T`.
/// This method will return an error if the status code of the response signifies a failure (not success).
/// Otherwise, it attempts to parse the response body into an instance of type `T`, which must implement
/// `serde::de::DeserializeOwned`, `Send`, `Sync`, and have a static lifetime ('static).
pub trait ExtendedResponse {
/// Returns the value of the response as a Future of `Result<T, Error>`.
///
/// If the status code of the response is not a success, returns an `Error`.
/// Otherwise, attempts to parse the response into an instance of type `T`.
///
/// # Type Parameters
///
/// * `T`: The type of the value to be returned. Must implement `serde::de::DeserializeOwned`,
/// `Send`, `Sync`, and have a static lifetime ('static).
fn get_value<T>(self) -> Fut<Result<T, Error>>
where
T: serde::de::DeserializeOwned + Send + Sync + 'static;
fn get_json(self) -> Fut<Result<Value, Error>>;
fn success(self) -> Fut<Result<(), Error>>;
fn success_with_body(self) -> Fut<Result<String, Error>>;
}
impl ExtendedResponse for Response {
fn get_value<T>(self) -> Fut<Result<T, Error>>
where
T: serde::de::DeserializeOwned + Send + Sync + 'static,
{
to_fut(async move {
let status_code = self.status();
if !status_code.is_success() {
return Err(parse_response_as_error(self).await.into());
}
let bytes = self.bytes().await?;
let value = serde_json::from_slice(&bytes).map_err(|e| {
FlowyError::new(
ErrorCode::Serde,
format!(
"failed to parse json: {}, body: {}",
e,
String::from_utf8_lossy(&bytes)
),
)
})?;
Ok(value)
})
}
fn get_json(self) -> Fut<Result<Value, Error>> {
to_fut(async move {
if !self.status().is_success() {
return Err(parse_response_as_error(self).await.into());
}
let bytes = self.bytes().await?;
let value = serde_json::from_slice::<Value>(&bytes)?;
Ok(value)
})
}
fn success(self) -> Fut<Result<(), Error>> {
to_fut(async move {
if !self.status().is_success() {
return Err(parse_response_as_error(self).await.into());
}
Ok(())
})
}
fn success_with_body(self) -> Fut<Result<String, Error>> {
to_fut(async move {
if !self.status().is_success() {
return Err(parse_response_as_error(self).await.into());
}
Ok(self.text().await?)
})
}
}
async fn parse_response_as_error(response: Response) -> FlowyError {
let status_code = response.status();
let msg = response.text().await.unwrap_or_default();
if status_code == StatusCode::CONFLICT {
return FlowyError::new(ErrorCode::Conflict, msg);
}
FlowyError::new(
ErrorCode::HttpError,
format!(
"expected status code 2XX, but got {}, body: {}",
status_code, msg
),
)
}

View File

@ -0,0 +1,36 @@
use collab_plugins::cloud_storage::CollabType;
pub const AF_COLLAB_UPDATE_TABLE: &str = "af_collab_update";
pub const AF_COLLAB_KEY_COLUMN: &str = "key";
pub const AF_COLLAB_SNAPSHOT_OID_COLUMN: &str = "oid";
pub const AF_COLLAB_SNAPSHOT_ID_COLUMN: &str = "sid";
pub const AF_COLLAB_SNAPSHOT_BLOB_COLUMN: &str = "blob";
pub const AF_COLLAB_SNAPSHOT_BLOB_SIZE_COLUMN: &str = "blob_size";
pub const AF_COLLAB_SNAPSHOT_CREATED_AT_COLUMN: &str = "created_at";
pub const AF_COLLAB_SNAPSHOT_TABLE: &str = "af_collab_snapshot";
pub const USER_UUID: &str = "uuid";
pub const USER_UID: &str = "uid";
pub const OWNER_USER_UID: &str = "owner_uid";
pub const USER_EMAIL: &str = "email";
pub const USER_TABLE: &str = "af_user";
pub const WORKSPACE_TABLE: &str = "af_workspace";
pub const USER_PROFILE_VIEW: &str = "af_user_profile_view";
pub(crate) const WORKSPACE_ID: &str = "workspace_id";
pub(crate) const WORKSPACE_NAME: &str = "workspace_name";
pub(crate) const CREATED_AT: &str = "created_at";
pub fn table_name(ty: &CollabType) -> String {
match ty {
CollabType::DatabaseRow => format!("{}_database_row", AF_COLLAB_UPDATE_TABLE),
CollabType::Document => format!("{}_document", AF_COLLAB_UPDATE_TABLE),
CollabType::Database => format!("{}_database", AF_COLLAB_UPDATE_TABLE),
CollabType::WorkspaceDatabase => format!("{}_w_database", AF_COLLAB_UPDATE_TABLE),
CollabType::Folder => format!("{}_folder", AF_COLLAB_UPDATE_TABLE),
}
}
pub fn partition_key(ty: &CollabType) -> i32 {
ty.value()
}

View File

@ -1,7 +1,6 @@
use serde::Deserialize;
use uuid::Uuid;
use crate::supabase::impls::WORKSPACE_ID;
use crate::util::deserialize_null_or_default;
pub enum GetUserProfileParams {
@ -20,20 +19,14 @@ pub(crate) struct UserProfileResponse {
pub email: String,
#[serde(deserialize_with = "deserialize_null_or_default")]
pub workspace_id: String,
}
impl From<tokio_postgres::Row> for UserProfileResponse {
fn from(row: tokio_postgres::Row) -> Self {
let workspace_id: Uuid = row.get(WORKSPACE_ID);
Self {
uid: row.get("uid"),
name: row.try_get("name").unwrap_or_default(),
email: row.try_get("email").unwrap_or_default(),
workspace_id: workspace_id.to_string(),
}
}
pub latest_workspace_id: String,
}
#[derive(Debug, Deserialize)]
pub(crate) struct UserProfileResponseList(pub Vec<UserProfileResponse>);
#[derive(Deserialize, Clone)]
pub(crate) struct UidResponse {
#[allow(dead_code)]
pub uid: i64,
}

View File

@ -1,506 +0,0 @@
use std::future::Future;
use std::iter::Take;
use std::pin::Pin;
use std::sync::Weak;
use std::time::Duration;
use anyhow::Error;
use appflowy_integrate::{
merge_updates_v1, CollabObject, MsgId, RemoteCollabSnapshot, RemoteCollabState,
RemoteCollabStorage, RemoteUpdateReceiver,
};
use chrono::{DateTime, Utc};
use deadpool_postgres::GenericClient;
use futures::pin_mut;
use futures_util::{StreamExt, TryStreamExt};
use tokio::task::spawn_blocking;
use tokio_postgres::types::ToSql;
use tokio_postgres::Row;
use tokio_retry::strategy::FixedInterval;
use tokio_retry::{Action, Retry};
use flowy_database2::deps::{CollabObjectUpdate, CollabObjectUpdateByOid};
use lib_infra::async_trait::async_trait;
use lib_infra::util::md5;
use crate::supabase::postgres_db::PostgresObject;
use crate::supabase::sql_builder::{
DeleteSqlBuilder, InsertSqlBuilder, SelectSqlBuilder, WhereCondition,
};
use crate::supabase::{PostgresServer, SupabaseServerService};
pub struct PgCollabStorageImpl<T> {
server: T,
}
const AF_COLLAB_KEY_COLUMN: &str = "key";
const AF_COLLAB_SNAPSHOT_OID_COLUMN: &str = "oid";
const AF_COLLAB_SNAPSHOT_ID_COLUMN: &str = "sid";
const AF_COLLAB_SNAPSHOT_BLOB_COLUMN: &str = "blob";
const AF_COLLAB_SNAPSHOT_BLOB_SIZE_COLUMN: &str = "blob_size";
const AF_COLLAB_SNAPSHOT_CREATED_AT_COLUMN: &str = "created_at";
const AF_COLLAB_SNAPSHOT_TABLE: &str = "af_collab_snapshot";
impl<T> PgCollabStorageImpl<T>
where
T: SupabaseServerService,
{
pub fn new(server: T) -> Self {
Self { server }
}
pub async fn get_client(&self) -> Option<PostgresObject> {
self
.server
.get_pg_server()?
.upgrade()?
.get_pg_client()
.await
.recv()
.await
.ok()
}
}
#[async_trait]
impl<T> RemoteCollabStorage for PgCollabStorageImpl<T>
where
T: SupabaseServerService,
{
fn is_enable(&self) -> bool {
self
.server
.get_pg_server()
.and_then(|server| server.upgrade())
.is_some()
}
async fn get_all_updates(&self, object_id: &str) -> Result<Vec<Vec<u8>>, Error> {
let pg_server = self.server.try_get_pg_server()?;
let action = FetchObjectUpdateAction::new(object_id, pg_server);
let updates = action.run().await?;
Ok(updates)
}
async fn get_latest_snapshot(
&self,
object_id: &str,
) -> Result<Option<RemoteCollabSnapshot>, Error> {
match self.server.get_pg_server() {
None => Ok(None),
Some(weak_server) => get_latest_snapshot_from_server(object_id, weak_server).await,
}
}
async fn get_collab_state(&self, object_id: &str) -> Result<Option<RemoteCollabState>, Error> {
let client = self.get_client().await;
if client.is_none() {
return Ok(None);
}
let client = client.unwrap();
let (sql, params) = SelectSqlBuilder::new("af_collab_state")
.column("*")
.where_clause("oid", object_id.to_string())
.order_by("snapshot_created_at", false)
.limit(1)
.build();
let stmt = client.prepare_cached(&sql).await?;
if let Some(row) = client
.query_raw(&stmt, params)
.await?
.try_collect::<Vec<_>>()
.await?
.first()
{
let created_at = row.try_get::<&str, DateTime<Utc>>("snapshot_created_at")?;
let current_edit_count = row.try_get::<_, i64>("current_edit_count")?;
let last_snapshot_edit_count = row.try_get::<_, i64>("snapshot_edit_count")?;
let state = RemoteCollabState {
current_edit_count,
last_snapshot_edit_count,
last_snapshot_created_at: created_at.timestamp(),
};
return Ok(Some(state));
}
Ok(None)
}
async fn create_snapshot(&self, object: &CollabObject, snapshot: Vec<u8>) -> Result<i64, Error> {
let client = self
.get_client()
.await
.ok_or_else(|| anyhow::anyhow!("Create snapshot failed. No client available"))?;
let value_size = snapshot.len() as i32;
let (sql, params) = InsertSqlBuilder::new("af_collab_snapshot")
.value(AF_COLLAB_SNAPSHOT_OID_COLUMN, object.id.clone())
.value("name", object.name.clone())
.value(AF_COLLAB_SNAPSHOT_BLOB_COLUMN, snapshot)
.value(AF_COLLAB_SNAPSHOT_BLOB_SIZE_COLUMN, value_size)
.returning(AF_COLLAB_SNAPSHOT_ID_COLUMN)
.build();
let stmt = client.prepare_cached(&sql).await?;
let all_rows = client
.query_raw(&stmt, params)
.await?
.try_collect::<Vec<_>>()
.await?;
let row = all_rows
.first()
.ok_or(anyhow::anyhow!("Create snapshot failed. No row returned"))?;
let sid = row.try_get::<&str, i64>(AF_COLLAB_SNAPSHOT_ID_COLUMN)?;
return Ok(sid);
}
async fn send_update(
&self,
object: &CollabObject,
_id: MsgId,
update: Vec<u8>,
) -> Result<(), Error> {
if let Some(client) = self.get_client().await {
let value_size = update.len() as i32;
let md5 = md5(&update);
let (sql, params) = InsertSqlBuilder::new("af_collab")
.value("oid", object.id.clone())
.value("name", object.name.clone())
.value("value", update)
.value("uid", object.uid)
.value("md5", md5)
.value("value_size", value_size)
.build();
let stmt = client.prepare_cached(&sql).await?;
client.execute_raw(&stmt, params).await?;
}
Ok(())
}
async fn send_init_sync(
&self,
object: &CollabObject,
_id: MsgId,
init_update: Vec<u8>,
) -> Result<(), Error> {
let client = self.get_client().await;
if client.is_none() {
return Ok(());
}
let mut client = client.unwrap();
let txn = client.transaction().await?;
// 1.Get all updates and lock the table. It means that a subsequent UPDATE, DELETE, or SELECT
// FOR UPDATE by this transaction will not result in a lock wait. other transactions that try
// to update or lock these specific rows will be blocked until the current transaction ends
let (sql, params) = SelectSqlBuilder::new("af_collab")
.column(AF_COLLAB_KEY_COLUMN)
.column("value")
.order_by(AF_COLLAB_KEY_COLUMN, true)
.where_clause("oid", object.id.clone())
.lock()
.build();
let get_all_update_stmt = txn.prepare_cached(&sql).await?;
let row_stream = txn.query_raw(&get_all_update_stmt, params).await?;
let pg_rows = row_stream.try_collect::<Vec<_>>().await?;
let insert_builder = InsertSqlBuilder::new("af_collab")
.value("oid", object.id.clone())
.value("uid", object.uid)
.value("name", object.name.clone());
let (sql, params) = if !pg_rows.is_empty() {
let last_row_key = pg_rows
.last()
.map(|row| row.get::<_, i64>(AF_COLLAB_KEY_COLUMN))
.unwrap();
// 2.Merge the updates into one and then delete the merged updates
let merge_result =
spawn_blocking(move || merge_update_from_rows(pg_rows, init_update)).await??;
tracing::trace!("Merged updates count: {}", merge_result.merged_keys.len());
// 3. Delete merged updates
let (sql, params) = DeleteSqlBuilder::new("af_collab")
.where_condition(WhereCondition::Equals(
"oid".to_string(),
Box::new(object.id.clone()),
))
.where_condition(WhereCondition::In(
AF_COLLAB_KEY_COLUMN.to_string(),
merge_result
.merged_keys
.into_iter()
.map(|key| Box::new(key) as Box<dyn ToSql + Send + Sync>)
.collect::<Vec<_>>(),
))
.build();
let delete_stmt = txn.prepare_cached(&sql).await?;
txn.execute_raw(&delete_stmt, params).await?;
// 4. Insert the merged update. The new_update contains the merged update and the
// init_update.
let new_update = merge_result.new_update;
let value_size = new_update.len() as i32;
let md5 = md5(&new_update);
insert_builder
.value("value", new_update)
.value("value_size", value_size)
.value("md5", md5)
.value(AF_COLLAB_KEY_COLUMN, last_row_key)
.overriding_system_value()
.build()
} else {
let value_size = init_update.len() as i32;
let md5 = md5(&init_update);
insert_builder
.value("value", init_update)
.value("md5", md5)
.value("value_size", value_size)
.build()
};
// 4.Insert the merged update
let stmt = txn.prepare_cached(&sql).await?;
txn.execute_raw(&stmt, params).await?;
// 4.commit the transaction
txn.commit().await?;
tracing::trace!("{} init sync done", object.id);
Ok(())
}
async fn subscribe_remote_updates(&self, _object: &CollabObject) -> Option<RemoteUpdateReceiver> {
// using pg_notify to subscribe to updates
None
}
}
pub async fn get_updates_from_server(
object_id: &str,
server: Weak<PostgresServer>,
) -> Result<Vec<Vec<u8>>, Error> {
match server.upgrade() {
None => Ok(vec![]),
Some(server) => {
let client = server.get_pg_client().await.recv().await?;
let (sql, params) = SelectSqlBuilder::new("af_collab")
.column("value")
.order_by(AF_COLLAB_KEY_COLUMN, true)
.where_clause("oid", object_id.to_string())
.build();
let stmt = client.prepare_cached(&sql).await?;
let row_stream = client.query_raw(&stmt, params).await?;
Ok(
row_stream
.try_collect::<Vec<_>>()
.await?
.into_iter()
.flat_map(|row| update_from_row(&row).ok())
.collect(),
)
},
}
}
pub async fn get_latest_snapshot_from_server(
object_id: &str,
server: Weak<PostgresServer>,
) -> Result<Option<RemoteCollabSnapshot>, Error> {
match server.upgrade() {
None => Ok(None),
Some(server) => {
let client = server.get_pg_client().await.recv().await?;
let (sql, params) = SelectSqlBuilder::new(AF_COLLAB_SNAPSHOT_TABLE)
.column(AF_COLLAB_SNAPSHOT_ID_COLUMN)
.column(AF_COLLAB_SNAPSHOT_BLOB_COLUMN)
.column(AF_COLLAB_SNAPSHOT_CREATED_AT_COLUMN)
.order_by(AF_COLLAB_SNAPSHOT_ID_COLUMN, false)
.limit(1)
.where_clause(AF_COLLAB_SNAPSHOT_OID_COLUMN, object_id.to_string())
.build();
let stmt = client.prepare_cached(&sql).await?;
let all_rows = client
.query_raw(&stmt, params)
.await?
.try_collect::<Vec<_>>()
.await?;
let row = all_rows.first().ok_or(anyhow::anyhow!(
"Get latest snapshot failed. No row returned"
))?;
let snapshot_id = row.try_get::<_, i64>(AF_COLLAB_SNAPSHOT_ID_COLUMN)?;
let update = row.try_get::<_, Vec<u8>>(AF_COLLAB_SNAPSHOT_BLOB_COLUMN)?;
let created_at = row
.try_get::<_, DateTime<Utc>>(AF_COLLAB_SNAPSHOT_CREATED_AT_COLUMN)?
.timestamp();
Ok(Some(RemoteCollabSnapshot {
snapshot_id,
oid: object_id.to_string(),
data: update,
created_at,
}))
},
}
}
fn update_from_row(row: &Row) -> Result<Vec<u8>, anyhow::Error> {
let update = row.try_get::<_, Vec<u8>>("value")?;
Ok(update)
}
fn merge_update_from_rows(
rows: Vec<Row>,
new_update: Vec<u8>,
) -> Result<MergeResult, anyhow::Error> {
let mut updates = vec![];
let mut merged_keys = vec![];
for row in rows {
merged_keys.push(row.try_get::<_, i64>(AF_COLLAB_KEY_COLUMN)?);
let update = update_from_row(&row)?;
updates.push(update);
}
updates.push(new_update);
let updates = updates
.iter()
.map(|update| update.as_ref())
.collect::<Vec<&[u8]>>();
let new_update = merge_updates_v1(&updates)?;
Ok(MergeResult {
merged_keys,
new_update,
})
}
struct MergeResult {
merged_keys: Vec<i64>,
new_update: Vec<u8>,
}
pub struct FetchObjectUpdateAction {
object_id: String,
pg_server: Weak<PostgresServer>,
}
impl FetchObjectUpdateAction {
pub fn new(object_id: &str, pg_server: Weak<PostgresServer>) -> Self {
Self {
pg_server,
object_id: object_id.to_string(),
}
}
pub fn run(self) -> Retry<Take<FixedInterval>, FetchObjectUpdateAction> {
let retry_strategy = FixedInterval::new(Duration::from_secs(5)).take(3);
Retry::spawn(retry_strategy, self)
}
pub fn run_with_fix_interval(
self,
secs: u64,
times: usize,
) -> Retry<Take<FixedInterval>, FetchObjectUpdateAction> {
let retry_strategy = FixedInterval::new(Duration::from_secs(secs)).take(times);
Retry::spawn(retry_strategy, self)
}
}
impl Action for FetchObjectUpdateAction {
type Future = Pin<Box<dyn Future<Output = Result<Self::Item, Self::Error>> + Send>>;
type Item = CollabObjectUpdate;
type Error = anyhow::Error;
fn run(&mut self) -> Self::Future {
let weak_pb_server = self.pg_server.clone();
let object_id = self.object_id.clone();
Box::pin(async move {
match weak_pb_server.upgrade() {
None => Ok(vec![]),
Some(server) => {
let client = server.get_pg_client().await.recv().await?;
let (sql, params) = SelectSqlBuilder::new("af_collab")
.column("value")
.order_by(AF_COLLAB_KEY_COLUMN, true)
.where_clause("oid", object_id)
.build();
let stmt = client.prepare_cached(&sql).await?;
let row_stream = client.query_raw(&stmt, params).await?;
Ok(
row_stream
.try_collect::<Vec<_>>()
.await?
.into_iter()
.flat_map(|row| update_from_row(&row).ok())
.collect(),
)
},
}
})
}
}
pub struct BatchFetchObjectUpdateAction {
object_ids: Vec<String>,
pg_server: Weak<PostgresServer>,
}
impl BatchFetchObjectUpdateAction {
pub fn new(object_ids: Vec<String>, pg_server: Weak<PostgresServer>) -> Self {
Self {
pg_server,
object_ids,
}
}
pub fn run(self) -> Retry<Take<FixedInterval>, BatchFetchObjectUpdateAction> {
let retry_strategy = FixedInterval::new(Duration::from_secs(5)).take(3);
Retry::spawn(retry_strategy, self)
}
}
impl Action for BatchFetchObjectUpdateAction {
type Future = Pin<Box<dyn Future<Output = Result<Self::Item, Self::Error>> + Send>>;
type Item = CollabObjectUpdateByOid;
type Error = anyhow::Error;
fn run(&mut self) -> Self::Future {
let weak_pb_server = self.pg_server.clone();
let object_ids = self.object_ids.clone();
Box::pin(async move {
match weak_pb_server.upgrade() {
None => Ok(CollabObjectUpdateByOid::default()),
Some(server) => {
let client = server.get_pg_client().await.recv().await?;
let mut updates_by_oid = CollabObjectUpdateByOid::new();
// Group the updates by oid
let (sql, params) = SelectSqlBuilder::new("af_collab")
.column("oid")
.array_agg("value")
.group_by("oid")
.where_clause_in("oid", object_ids)
.build();
let stmt = client.prepare_cached(&sql).await?;
// Poll the rows
let rows = Box::pin(client.query_raw(&stmt, params).await?);
pin_mut!(rows);
while let Some(Ok(row)) = rows.next().await {
let oid = row.try_get::<_, String>("oid")?;
let updates = row.try_get::<_, Vec<Vec<u8>>>("value")?;
updates_by_oid.insert(oid, updates);
}
Ok(updates_by_oid)
},
}
})
}
}

View File

@ -1,107 +0,0 @@
use tokio::sync::oneshot::channel;
use flowy_database2::deps::{
CollabObjectUpdate, CollabObjectUpdateByOid, DatabaseCloudService, DatabaseSnapshot,
};
use flowy_error::{internal_error, FlowyError};
use lib_infra::future::FutureResult;
use crate::supabase::impls::{
get_latest_snapshot_from_server, BatchFetchObjectUpdateAction, FetchObjectUpdateAction,
};
use crate::supabase::SupabaseServerService;
pub struct SupabaseDatabaseCloudServiceImpl<T> {
server: T,
}
impl<T> SupabaseDatabaseCloudServiceImpl<T> {
pub fn new(server: T) -> Self {
Self { server }
}
}
impl<T> DatabaseCloudService for SupabaseDatabaseCloudServiceImpl<T>
where
T: SupabaseServerService,
{
fn get_collab_update(&self, object_id: &str) -> FutureResult<CollabObjectUpdate, FlowyError> {
let weak_server = self.server.get_pg_server();
let (tx, rx) = channel();
let database_id = object_id.to_string();
tokio::spawn(async move {
tx.send(
async move {
match weak_server {
None => Ok(CollabObjectUpdate::default()),
Some(weak_server) => {
FetchObjectUpdateAction::new(&database_id, weak_server)
.run()
.await
},
}
}
.await,
)
});
FutureResult::new(async { rx.await.map_err(internal_error)?.map_err(internal_error) })
}
fn batch_get_collab_updates(
&self,
object_ids: Vec<String>,
) -> FutureResult<CollabObjectUpdateByOid, FlowyError> {
let weak_server = self.server.get_pg_server();
let (tx, rx) = channel();
tokio::spawn(async move {
tx.send(
async move {
match weak_server {
None => Ok(CollabObjectUpdateByOid::default()),
Some(weak_server) => {
BatchFetchObjectUpdateAction::new(object_ids, weak_server)
.run()
.await
},
}
}
.await,
)
});
FutureResult::new(async { rx.await.map_err(internal_error)?.map_err(internal_error) })
}
fn get_collab_latest_snapshot(
&self,
object_id: &str,
) -> FutureResult<Option<DatabaseSnapshot>, FlowyError> {
let weak_server = self.server.get_pg_server();
let (tx, rx) = channel();
let database_id = object_id.to_string();
tokio::spawn(async move {
tx.send(
async move {
match weak_server {
None => Ok(None),
Some(weak_server) => get_latest_snapshot_from_server(&database_id, weak_server)
.await
.map_err(internal_error),
}
}
.await,
)
});
FutureResult::new(async {
Ok(
rx.await
.map_err(internal_error)??
.map(|snapshot| DatabaseSnapshot {
snapshot_id: snapshot.snapshot_id,
database_id: snapshot.oid,
data: snapshot.data,
created_at: snapshot.created_at,
}),
)
})
}
}

View File

@ -1,108 +0,0 @@
use collab_document::document::Document;
use collab_folder::core::CollabOrigin;
use tokio::sync::oneshot::channel;
use flowy_document2::deps::{DocumentCloudService, DocumentData, DocumentSnapshot};
use flowy_error::{internal_error, FlowyError};
use lib_infra::future::FutureResult;
use crate::supabase::impls::{get_latest_snapshot_from_server, FetchObjectUpdateAction};
use crate::supabase::SupabaseServerService;
pub struct SupabaseDocumentCloudServiceImpl<T> {
server: T,
}
impl<T> SupabaseDocumentCloudServiceImpl<T> {
pub fn new(server: T) -> Self {
Self { server }
}
}
impl<T> DocumentCloudService for SupabaseDocumentCloudServiceImpl<T>
where
T: SupabaseServerService,
{
fn get_document_updates(&self, document_id: &str) -> FutureResult<Vec<Vec<u8>>, FlowyError> {
let weak_server = self.server.get_pg_server();
let (tx, rx) = channel();
let document_id = document_id.to_string();
tokio::spawn(async move {
tx.send(
async move {
match weak_server {
None => Ok(vec![]),
Some(weak_server) => FetchObjectUpdateAction::new(&document_id, weak_server)
.run_with_fix_interval(5, 5)
.await
.map_err(internal_error),
}
}
.await,
)
});
FutureResult::new(async { rx.await.map_err(internal_error)? })
}
fn get_document_latest_snapshot(
&self,
document_id: &str,
) -> FutureResult<Option<DocumentSnapshot>, FlowyError> {
let weak_server = self.server.get_pg_server();
let (tx, rx) = channel();
let document_id = document_id.to_string();
tokio::spawn(async move {
tx.send(
async move {
match weak_server {
None => Ok(None),
Some(weak_server) => get_latest_snapshot_from_server(&document_id, weak_server)
.await
.map_err(internal_error),
}
}
.await,
)
});
FutureResult::new(async {
{
Ok(
rx.await
.map_err(internal_error)??
.map(|snapshot| DocumentSnapshot {
snapshot_id: snapshot.snapshot_id,
document_id: snapshot.oid,
data: snapshot.data,
created_at: snapshot.created_at,
}),
)
}
})
}
fn get_document_data(&self, document_id: &str) -> FutureResult<Option<DocumentData>, FlowyError> {
let weak_server = self.server.get_pg_server();
let (tx, rx) = channel();
let document_id = document_id.to_string();
tokio::spawn(async move {
tx.send(
async move {
match weak_server {
None => Ok(Ok(None)),
Some(weak_server) => {
let action = FetchObjectUpdateAction::new(&document_id, weak_server);
action.run().await.map(|updates| {
let document =
Document::from_updates(CollabOrigin::Empty, updates, &document_id, vec![])?;
Ok(document.get_document_data().ok())
})
},
}
}
.await,
)
});
FutureResult::new(async { rx.await.map_err(internal_error)?.map_err(internal_error)? })
}
}

View File

@ -1,257 +0,0 @@
use chrono::{DateTime, Utc};
use collab_folder::core::{CollabOrigin, Folder};
use futures_util::{pin_mut, StreamExt};
use tokio::sync::oneshot::channel;
use uuid::Uuid;
use flowy_error::{internal_error, ErrorCode, FlowyError};
use flowy_folder2::deps::{FolderCloudService, FolderData, FolderSnapshot, Workspace};
use lib_infra::future::FutureResult;
use crate::supabase::impls::{
get_latest_snapshot_from_server, get_updates_from_server, FetchObjectUpdateAction,
};
use crate::supabase::postgres_db::PostgresObject;
use crate::supabase::sql_builder::{InsertSqlBuilder, SelectSqlBuilder};
use crate::supabase::SupabaseServerService;
pub(crate) const WORKSPACE_TABLE: &str = "af_workspace";
pub(crate) const WORKSPACE_ID: &str = "workspace_id";
const WORKSPACE_NAME: &str = "workspace_name";
const CREATED_AT: &str = "created_at";
pub struct SupabaseFolderCloudServiceImpl<T> {
server: T,
}
impl<T> SupabaseFolderCloudServiceImpl<T> {
pub fn new(server: T) -> Self {
Self { server }
}
}
impl<T> FolderCloudService for SupabaseFolderCloudServiceImpl<T>
where
T: SupabaseServerService,
{
fn create_workspace(&self, uid: i64, name: &str) -> FutureResult<Workspace, FlowyError> {
let weak_server = self.server.try_get_pg_server();
let (tx, rx) = channel();
let name = name.to_string();
tokio::spawn(async move {
tx.send(
async move {
match weak_server?.upgrade() {
None => Err(FlowyError::new(
ErrorCode::PgDatabaseError,
"Server is close",
)),
Some(server) => {
let client = server.get_pg_client().await.recv().await?;
create_workspace(&client, uid, &name).await
},
}
}
.await,
)
});
FutureResult::new(async { rx.await.map_err(internal_error)? })
}
fn get_folder_data(&self, workspace_id: &str) -> FutureResult<Option<FolderData>, FlowyError> {
let weak_server = self.server.get_pg_server();
let (tx, rx) = channel();
let workspace_id = workspace_id.to_string();
tokio::spawn(async move {
tx.send(
async move {
match weak_server {
None => Ok(Ok(None)),
Some(weak_server) => get_updates_from_server(&workspace_id, weak_server)
.await
.map(|updates| {
let folder = Folder::from_collab_raw_data(
CollabOrigin::Empty,
updates,
&workspace_id,
vec![],
)?;
Ok(folder.get_folder_data())
}),
}
}
.await,
)
});
FutureResult::new(async { rx.await.map_err(internal_error)?.map_err(internal_error)? })
}
fn get_folder_latest_snapshot(
&self,
workspace_id: &str,
) -> FutureResult<Option<FolderSnapshot>, FlowyError> {
let weak_server = self.server.get_pg_server();
let workspace_id = workspace_id.to_string();
let (tx, rx) = channel();
tokio::spawn(async move {
tx.send(
async {
match weak_server {
None => Ok(None),
Some(weak_server) => get_latest_snapshot_from_server(&workspace_id, weak_server)
.await
.map_err(internal_error),
}
}
.await,
)
});
FutureResult::new(async {
Ok(
rx.await
.map_err(internal_error)??
.map(|snapshot| FolderSnapshot {
snapshot_id: snapshot.snapshot_id,
database_id: snapshot.oid,
data: snapshot.data,
created_at: snapshot.created_at,
}),
)
})
}
fn get_folder_updates(
&self,
workspace_id: &str,
_uid: i64,
) -> FutureResult<Vec<Vec<u8>>, FlowyError> {
let weak_server = self.server.get_pg_server();
let (tx, rx) = channel();
let workspace_id = workspace_id.to_string();
tokio::spawn(async move {
tx.send(
async move {
match weak_server {
None => Ok(vec![]),
Some(weak_server) => {
let action = FetchObjectUpdateAction::new(&workspace_id, weak_server);
action.run_with_fix_interval(5, 10).await
},
}
}
.await,
)
});
FutureResult::new(async { rx.await.map_err(internal_error)?.map_err(internal_error) })
}
fn service_name(&self) -> String {
"Supabase".to_string()
}
}
async fn create_workspace(
client: &PostgresObject,
uid: i64,
name: &str,
) -> Result<Workspace, FlowyError> {
let new_workspace_id = Uuid::new_v4();
// Create workspace
let (sql, params) = InsertSqlBuilder::new(WORKSPACE_TABLE)
.value("uid", uid)
.value(WORKSPACE_ID, new_workspace_id)
.value(WORKSPACE_NAME, name.to_string())
.build();
let stmt = client
.prepare_cached(&sql)
.await
.map_err(|e| FlowyError::new(ErrorCode::PgDatabaseError, e))?;
client
.execute_raw(&stmt, params)
.await
.map_err(|e| FlowyError::new(ErrorCode::PgDatabaseError, e))?;
// Read the workspace
let (sql, params) = SelectSqlBuilder::new(WORKSPACE_TABLE)
.column(WORKSPACE_ID)
.column(WORKSPACE_NAME)
.column(CREATED_AT)
.where_clause(WORKSPACE_ID, new_workspace_id)
.build();
let stmt = client
.prepare_cached(&sql)
.await
.map_err(|e| FlowyError::new(ErrorCode::PgDatabaseError, e))?;
let rows = Box::pin(
client
.query_raw(&stmt, params)
.await
.map_err(|e| FlowyError::new(ErrorCode::PgDatabaseError, e))?,
);
pin_mut!(rows);
if let Some(Ok(row)) = rows.next().await {
let created_at = row
.try_get::<&str, DateTime<Utc>>(CREATED_AT)
.unwrap_or_default();
let workspace_id: Uuid = row.get(WORKSPACE_ID);
Ok(Workspace {
id: workspace_id.to_string(),
name: row.get(WORKSPACE_NAME),
child_views: Default::default(),
created_at: created_at.timestamp(),
})
} else {
Err(FlowyError::new(
ErrorCode::PgDatabaseError,
"Create workspace failed",
))
}
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use std::sync::Arc;
use parking_lot::RwLock;
use uuid::Uuid;
use flowy_folder2::deps::FolderCloudService;
use flowy_server_config::supabase_config::PostgresConfiguration;
use flowy_user::event_map::UserAuthService;
use lib_infra::box_any::BoxAny;
use crate::supabase::impls::folder::SupabaseFolderCloudServiceImpl;
use crate::supabase::impls::SupabaseUserAuthServiceImpl;
use crate::supabase::{PostgresServer, SupabaseServerServiceImpl};
#[tokio::test]
async fn create_user_workspace() {
if dotenv::from_filename("./.env.workspace.test").is_err() {
return;
}
let server = Arc::new(PostgresServer::new(
PostgresConfiguration::from_env().unwrap(),
));
let weak_server = SupabaseServerServiceImpl(Arc::new(RwLock::new(Some(server.clone()))));
let user_service = SupabaseUserAuthServiceImpl::new(weak_server.clone());
// create user
let mut params = HashMap::new();
params.insert("uuid".to_string(), Uuid::new_v4().to_string());
let user = user_service.sign_up(BoxAny::new(params)).await.unwrap();
// create workspace
let folder_service = SupabaseFolderCloudServiceImpl::new(weak_server);
let workspace = folder_service
.create_workspace(user.user_id, "my test workspace")
.await
.unwrap();
assert_eq!(workspace.name, "my test workspace");
}
}

View File

@ -1,336 +0,0 @@
use std::str::FromStr;
use deadpool_postgres::GenericClient;
use futures::pin_mut;
use futures_util::StreamExt;
use tokio::sync::oneshot::channel;
use tokio_postgres::error::SqlState;
use uuid::Uuid;
use flowy_error::{internal_error, ErrorCode, FlowyError};
use flowy_user::entities::{SignInResponse, SignUpResponse, UpdateUserProfileParams, UserProfile};
use flowy_user::event_map::{UserAuthService, UserCredentials};
use flowy_user::services::{uuid_from_box_any, AuthType};
use lib_infra::box_any::BoxAny;
use lib_infra::future::FutureResult;
use crate::supabase::entities::{GetUserProfileParams, UserProfileResponse};
use crate::supabase::postgres_db::PostgresObject;
use crate::supabase::sql_builder::{SelectSqlBuilder, UpdateSqlBuilder};
use crate::supabase::SupabaseServerService;
pub(crate) const USER_TABLE: &str = "af_user";
pub(crate) const USER_PROFILE_TABLE: &str = "af_user_profile";
pub const USER_UUID: &str = "uuid";
pub struct SupabaseUserAuthServiceImpl<T> {
server: T,
}
impl<T> SupabaseUserAuthServiceImpl<T> {
pub fn new(server: T) -> Self {
Self { server }
}
}
impl<T> UserAuthService for SupabaseUserAuthServiceImpl<T>
where
T: SupabaseServerService,
{
fn sign_up(&self, params: BoxAny) -> FutureResult<SignUpResponse, FlowyError> {
let weak_server = self.server.try_get_pg_server();
let (tx, rx) = channel();
tokio::spawn(async move {
tx.send(
async move {
match weak_server?.upgrade() {
Some(server) => {
let client = server.get_pg_client().await.recv().await?;
let params = uuid_from_box_any(params)?;
create_user_with_uuid(&client, params.uuid, params.email).await
},
None => Err(FlowyError::new(
ErrorCode::PgDatabaseError,
"Server is close",
)),
}
}
.await,
)
});
FutureResult::new(async { rx.await.map_err(internal_error)? })
}
fn sign_in(&self, params: BoxAny) -> FutureResult<SignInResponse, FlowyError> {
let server = self.server.try_get_pg_server();
let (tx, rx) = channel();
tokio::spawn(async move {
tx.send(
async {
match server?.upgrade() {
None => Err(FlowyError::new(
ErrorCode::PgDatabaseError,
"Server is close",
)),
Some(server) => {
let client = server.get_pg_client().await.recv().await?;
let uuid = uuid_from_box_any(params)?.uuid;
let user_profile =
get_user_profile(&client, GetUserProfileParams::Uuid(uuid)).await?;
Ok(SignInResponse {
user_id: user_profile.uid,
workspace_id: user_profile.workspace_id,
..Default::default()
})
},
}
}
.await,
)
});
FutureResult::new(async { rx.await.map_err(internal_error)? })
}
fn sign_out(&self, _token: Option<String>) -> FutureResult<(), FlowyError> {
FutureResult::new(async { Ok(()) })
}
fn update_user(
&self,
_credential: UserCredentials,
params: UpdateUserProfileParams,
) -> FutureResult<(), FlowyError> {
let weak_server = self.server.try_get_pg_server();
let (tx, rx) = channel();
tokio::spawn(async move {
tx.send(
async move {
if let Some(server) = weak_server?.upgrade() {
let client = server.get_pg_client().await.recv().await?;
update_user_profile(&client, params).await
} else {
Ok(())
}
}
.await,
)
});
FutureResult::new(async { rx.await.map_err(internal_error)? })
}
fn get_user_profile(
&self,
credential: UserCredentials,
) -> FutureResult<Option<UserProfile>, FlowyError> {
let weak_server = self.server.try_get_pg_server();
let (tx, rx) = channel();
tokio::spawn(async move {
tx.send(
async move {
if let Some(server) = weak_server?.upgrade() {
let client = server.get_pg_client().await.recv().await?;
let uid = credential
.uid
.ok_or(FlowyError::new(ErrorCode::InvalidParams, "uid is required"))?;
let user_profile = get_user_profile(&client, GetUserProfileParams::Uid(uid))
.await
.ok()
.map(|user_profile| UserProfile {
id: user_profile.uid,
email: user_profile.email,
name: user_profile.name,
token: "".to_string(),
icon_url: "".to_string(),
openai_key: "".to_string(),
workspace_id: user_profile.workspace_id,
auth_type: AuthType::Supabase,
});
Ok(user_profile)
} else {
Ok(None)
}
}
.await,
)
});
FutureResult::new(async { rx.await.map_err(internal_error)? })
}
fn check_user(&self, credential: UserCredentials) -> FutureResult<(), FlowyError> {
let uuid = credential.uuid.and_then(|uuid| Uuid::from_str(&uuid).ok());
let weak_server = self.server.try_get_pg_server();
let (tx, rx) = channel();
tokio::spawn(async move {
tx.send(
async move {
match weak_server?.upgrade() {
None => Err(FlowyError::new(
ErrorCode::PgDatabaseError,
"Server is close",
)),
Some(server) => {
let client = server.get_pg_client().await.recv().await?;
check_user(&client, credential.uid, uuid).await
},
}
}
.await,
)
});
FutureResult::new(async { rx.await.map_err(internal_error)? })
}
}
async fn create_user_with_uuid(
client: &PostgresObject,
uuid: Uuid,
email: String,
) -> Result<SignUpResponse, FlowyError> {
let mut is_new = true;
if let Err(e) = client
.execute(
&format!("INSERT INTO {} (uuid, email) VALUES ($1,$2);", USER_TABLE),
&[&uuid, &email],
)
.await
{
if let Some(code) = e.code() {
if code == &SqlState::UNIQUE_VIOLATION {
is_new = false;
} else {
return Err(FlowyError::new(ErrorCode::PgDatabaseError, e));
}
}
};
let user_profile = get_user_profile(client, GetUserProfileParams::Uuid(uuid)).await?;
Ok(SignUpResponse {
user_id: user_profile.uid,
name: user_profile.name,
workspace_id: user_profile.workspace_id,
is_new,
email: Some(user_profile.email),
token: None,
})
}
async fn get_user_profile(
client: &PostgresObject,
params: GetUserProfileParams,
) -> Result<UserProfileResponse, FlowyError> {
let rows = match params {
GetUserProfileParams::Uid(uid) => {
let stmt = client
.prepare_cached(&format!(
"SELECT * FROM {} WHERE uid = $1",
USER_PROFILE_TABLE
))
.await
.map_err(|e| FlowyError::new(ErrorCode::PgDatabaseError, e))?;
client
.query(&stmt, &[&uid])
.await
.map_err(|e| FlowyError::new(ErrorCode::PgDatabaseError, e))?
},
GetUserProfileParams::Uuid(uuid) => {
let stmt = client
.prepare_cached(&format!(
"SELECT * FROM {} WHERE uuid = $1",
USER_PROFILE_TABLE
))
.await
.map_err(|e| FlowyError::new(ErrorCode::PgDatabaseError, e))?;
client
.query(&stmt, &[&uuid])
.await
.map_err(|e| FlowyError::new(ErrorCode::PgDatabaseError, e))?
},
};
let mut user_profiles = rows
.into_iter()
.map(UserProfileResponse::from)
.collect::<Vec<_>>();
if user_profiles.is_empty() {
Err(FlowyError::record_not_found())
} else {
Ok(user_profiles.remove(0))
}
}
async fn update_user_profile(
client: &PostgresObject,
params: UpdateUserProfileParams,
) -> Result<(), FlowyError> {
if params.is_empty() {
return Err(FlowyError::new(
ErrorCode::InvalidParams,
format!("Update user profile params is empty: {:?}", params),
));
}
let (sql, pg_params) = UpdateSqlBuilder::new(USER_PROFILE_TABLE)
.set("name", params.name)
.set("email", params.email)
.where_clause("uid", params.id)
.build();
let stmt = client.prepare_cached(&sql).await.map_err(|e| {
FlowyError::new(
ErrorCode::PgDatabaseError,
format!("Prepare update user profile sql error: {}", e),
)
})?;
let affect_rows = client
.execute_raw(&stmt, pg_params)
.await
.map_err(|e| FlowyError::new(ErrorCode::PgDatabaseError, e))?;
tracing::trace!("Update user profile affect rows: {}", affect_rows);
Ok(())
}
async fn check_user(
client: &PostgresObject,
uid: Option<i64>,
uuid: Option<Uuid>,
) -> Result<(), FlowyError> {
if uid.is_none() && uuid.is_none() {
return Err(FlowyError::new(
ErrorCode::InvalidParams,
"uid and uuid can't be both empty",
));
}
let (sql, params) = match uid {
None => SelectSqlBuilder::new(USER_TABLE)
.where_clause("uuid", uuid.unwrap())
.build(),
Some(uid) => SelectSqlBuilder::new(USER_TABLE)
.where_clause("uid", uid)
.build(),
};
let stmt = client
.prepare_cached(&sql)
.await
.map_err(|e| FlowyError::new(ErrorCode::PgDatabaseError, e))?;
let rows = Box::pin(
client
.query_raw(&stmt, params)
.await
.map_err(|e| FlowyError::new(ErrorCode::PgDatabaseError, e))?,
);
pin_mut!(rows);
// TODO(nathan): would it be better to use token.
if rows.next().await.is_some() {
Ok(())
} else {
Err(FlowyError::new(
ErrorCode::UserNotExist,
"Can't find the user in pg database",
))
}
}

View File

@ -1,101 +0,0 @@
use refinery::embed_migrations;
use tokio_postgres::Client;
embed_migrations!("./src/supabase/migrations");
const AF_MIGRATION_HISTORY: &str = "af_migration_history";
pub(crate) async fn run_migrations(client: &mut Client) -> Result<(), anyhow::Error> {
match migrations::runner()
.set_migration_table_name(AF_MIGRATION_HISTORY)
.run_async(client)
.await
{
Ok(report) => {
if !report.applied_migrations().is_empty() {
tracing::info!("Run postgres db migration: {:?}", report);
}
Ok(())
},
Err(e) => {
tracing::error!("postgres db migration error: {}", e);
Err(anyhow::anyhow!("postgres db migration error: {}", e))
},
}
}
/// Drop all tables and dependencies defined in the v1_initial_up.sql.
/// Be careful when using this function. It will drop all tables and dependencies.
/// Mostly used for testing.
#[allow(dead_code)]
#[cfg(debug_assertions)]
pub(crate) async fn run_initial_drop(client: &Client) {
// let sql = include_str!("migrations/initial/initial_down.sql");
let sql = r#"DROP TABLE IF EXISTS af_user;
DROP TABLE IF EXISTS af_workspace;
DROP TABLE IF EXISTS af_user_profile;
DROP TABLE IF EXISTS af_collab;
DROP VIEW IF EXISTS af_collab_state;
DROP TABLE IF EXISTS af_collab_snapshot;
DROP TABLE IF EXISTS af_collab_statistics;
DROP TRIGGER IF EXISTS create_af_user_profile_trigger ON af_user_profile CASCADE;
DROP FUNCTION IF EXISTS create_af_user_profile_trigger_func;
DROP TRIGGER IF EXISTS create_af_workspace_trigger ON af_workspace CASCADE;
DROP FUNCTION IF EXISTS create_af_workspace_trigger_func;
DROP TRIGGER IF EXISTS af_collab_insert_trigger ON af_collab CASCADE;
DROP FUNCTION IF EXISTS increment_af_collab_update_count;
DROP TRIGGER IF EXISTS af_collab_snapshot_update_edit_count_trigger ON af_collab_snapshot;
DROP FUNCTION IF EXISTS af_collab_snapshot_update_edit_count;
DROP TRIGGER IF EXISTS check_and_delete_snapshots_trigger ON af_collab_snapshot CASCADE;
DROP FUNCTION IF EXISTS check_and_delete_snapshots;
"#;
client.batch_execute(sql).await.unwrap();
client
.batch_execute("DROP TABLE IF EXISTS af_migration_history")
.await
.unwrap();
}
#[cfg(test)]
mod tests {
use tokio_postgres::NoTls;
use flowy_server_config::supabase_config::PostgresConfiguration;
use crate::supabase::migration::run_initial_drop;
// ‼️‼️‼️ Warning: this test will create a table in the database
#[tokio::test]
async fn test_postgres_db() -> Result<(), anyhow::Error> {
if dotenv::from_filename(".env.test").is_err() {
return Ok(());
}
let configuration = PostgresConfiguration::from_env().unwrap();
let mut config = tokio_postgres::Config::new();
config
.host(&configuration.url)
.user(&configuration.user_name)
.password(&configuration.password)
.port(configuration.port);
// Using the https://docs.rs/postgres-openssl/latest/postgres_openssl/ to enable tls connection.
let (client, connection) = config.connect(NoTls).await?;
tokio::spawn(async move {
if let Err(e) = connection.await {
tracing::error!("postgres db connection error: {}", e);
}
});
#[cfg(debug_assertions)]
{
run_initial_drop(&client).await;
}
Ok(())
}
}

View File

@ -1,26 +0,0 @@
DROP TABLE IF EXISTS af_user;
DROP TABLE IF EXISTS af_workspace;
DROP TABLE IF EXISTS af_user_profile;
DROP TABLE IF EXISTS af_collab;
DROP VIEW IF EXISTS af_collab_state;
DROP TABLE IF EXISTS af_collab_snapshot;
DROP TABLE IF EXISTS af_collab_statistics;
DROP TRIGGER IF EXISTS create_af_user_profile_trigger ON af_user_profile CASCADE;
DROP FUNCTION IF EXISTS create_af_user_profile_trigger_func;
DROP TRIGGER IF EXISTS create_af_workspace_trigger ON af_workspace CASCADE;
DROP FUNCTION IF EXISTS create_af_workspace_trigger_func;
DROP TRIGGER IF EXISTS af_collab_insert_trigger ON af_collab CASCADE;
DROP FUNCTION IF EXISTS increment_af_collab_update_count;
DROP TRIGGER IF EXISTS af_collab_snapshot_update_edit_count_trigger ON af_collab_snapshot;
DROP FUNCTION IF EXISTS af_collab_snapshot_update_edit_count;
DROP TRIGGER IF EXISTS check_and_delete_snapshots_trigger ON af_collab_snapshot CASCADE;
DROP FUNCTION IF EXISTS check_and_delete_snapshots;
DROP TRIGGER IF EXISTS new_af_collab_row_trigger ON af_collab CASCADE;
DROP FUNCTION IF EXISTS notify_on_insert_af_collab;

View File

@ -1,141 +0,0 @@
-- user table
CREATE TABLE IF NOT EXISTS af_user (
uuid UUID PRIMARY KEY,
email TEXT DEFAULT '',
uid BIGINT GENERATED ALWAYS AS IDENTITY,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
);
-- user profile table
CREATE TABLE IF NOT EXISTS af_user_profile (
uid BIGINT PRIMARY KEY,
uuid UUID,
name TEXT,
email TEXT,
workspace_id UUID DEFAULT uuid_generate_v4()
);
-- user_profile trigger
CREATE OR REPLACE FUNCTION create_af_user_profile_trigger_func() RETURNS TRIGGER AS $$ BEGIN
INSERT INTO af_user_profile (uid, uuid, email)
VALUES (NEW.uid, NEW.uuid, NEW.email);
RETURN NEW;
END $$ LANGUAGE plpgsql;
CREATE TRIGGER create_af_user_profile_trigger BEFORE
INSERT ON af_user FOR EACH ROW EXECUTE FUNCTION create_af_user_profile_trigger_func();
-- workspace table
CREATE TABLE IF NOT EXISTS af_workspace (
workspace_id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
uid BIGINT,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
workspace_name TEXT DEFAULT 'My Workspace'
);
-- workspace trigger
CREATE OR REPLACE FUNCTION create_af_workspace_trigger_func() RETURNS TRIGGER AS $$ BEGIN
INSERT INTO af_workspace (uid, workspace_id)
VALUES (NEW.uid, NEW.workspace_id);
RETURN NEW;
END $$ LANGUAGE plpgsql;
CREATE TRIGGER create_af_workspace_trigger BEFORE
INSERT ON af_user_profile FOR EACH ROW EXECUTE FUNCTION create_af_workspace_trigger_func();
-- collab table.
CREATE TABLE IF NOT EXISTS af_collab (
oid TEXT NOT NULL,
name TEXT DEFAULT '',
key BIGINT GENERATED ALWAYS AS IDENTITY,
value BYTEA NOT NULL,
value_size INTEGER,
uid BIGINT NOT NULL,
md5 TEXT DEFAULT '',
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (oid, key)
);
-- collab pg notify trigger. It will notify the frontend when a new row is inserted in the af_collab table.
CREATE OR REPLACE FUNCTION notify_on_insert_af_collab() RETURNS trigger AS $$
BEGIN
-- use pg_notify to send a notification
PERFORM pg_notify('new_row_in_af_collab', NEW.oid::text);
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER new_af_collab_row_trigger
AFTER INSERT ON af_collab
FOR EACH ROW EXECUTE PROCEDURE notify_on_insert_af_collab();
-- collab statistics. It will be used to store the edit_count of the collab.
CREATE TABLE IF NOT EXISTS af_collab_statistics (
oid TEXT PRIMARY KEY,
edit_count BIGINT NOT NULL DEFAULT 0
);
-- collab statistics trigger. It will increment the edit_count of the collab when a new row is inserted in the af_collab table.
CREATE OR REPLACE FUNCTION increment_af_collab_edit_count() RETURNS TRIGGER AS $$ BEGIN IF EXISTS(
SELECT 1
FROM af_collab_statistics
WHERE oid = NEW.oid
) THEN
UPDATE af_collab_statistics
SET edit_count = edit_count + 1
WHERE oid = NEW.oid;
ELSE
INSERT INTO af_collab_statistics (oid, edit_count)
VALUES (NEW.oid, 1);
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER af_collab_insert_trigger
AFTER
INSERT ON af_collab FOR EACH ROW EXECUTE FUNCTION increment_af_collab_edit_count();
-- collab snapshot. It will be used to store the snapshots of the collab.
CREATE TABLE IF NOT EXISTS af_collab_snapshot (
sid BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
oid TEXT NOT NULL,
name TEXT DEFAULT '',
blob BYTEA NOT NULL,
blob_size INTEGER NOT NULL,
edit_count BIGINT NOT NULL DEFAULT 0,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
);
-- auto insert edit_count in the snapshot table.
CREATE OR REPLACE FUNCTION af_collab_snapshot_update_edit_count() RETURNS TRIGGER AS $$ BEGIN NEW.edit_count := (
SELECT COALESCE(edit_count, 0)
FROM af_collab_statistics
WHERE oid = NEW.oid
);
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER af_collab_snapshot_update_edit_count_trigger AFTER
INSERT ON af_collab_snapshot FOR EACH ROW EXECUTE FUNCTION af_collab_snapshot_update_edit_count();
-- collab snapshot trigger. It will delete the oldest snapshot if the number of snapshots is greater than 20.
-- It can use the PG_CRON extension to run this trigger periodically.
CREATE OR REPLACE FUNCTION check_and_delete_snapshots() RETURNS TRIGGER AS $$
DECLARE row_count INT;
BEGIN
SELECT COUNT(*) INTO row_count
FROM af_collab_snapshot
WHERE oid = NEW.oid;
IF row_count > 20 THEN
DELETE FROM af_collab_snapshot
WHERE id IN (
SELECT id
FROM af_collab_snapshot
WHERE created_at < NOW() - INTERVAL '10 days'
AND oid = NEW.oid
ORDER BY created_at ASC
LIMIT row_count - 20
);
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER check_and_delete_snapshots_trigger
AFTER
INSERT
OR
UPDATE ON af_collab_snapshot FOR EACH ROW EXECUTE FUNCTION check_and_delete_snapshots();
-- collab state view. It will be used to get the current state of the collab.
CREATE VIEW af_collab_state AS
SELECT a.oid,
a.created_at AS snapshot_created_at,
a.edit_count AS snapshot_edit_count,
b.edit_count AS current_edit_count
FROM af_collab_snapshot AS a
JOIN af_collab_statistics AS b ON a.oid = b.oid;

View File

@ -1,10 +1,8 @@
pub use server::*;
mod entities;
pub mod impls;
mod postgres_db;
mod sql_builder;
// mod postgres_http;
mod migration;
mod queue;
// pub mod storage_impls;
pub mod define;
// mod queue;
pub mod api;
mod server;

View File

@ -1,126 +0,0 @@
use std::cmp::Ordering;
use std::fmt::{Debug, Formatter};
use std::sync::Arc;
use deadpool_postgres::{Manager, ManagerConfig, Object, Pool, RecyclingMethod};
use tokio_postgres::NoTls;
use flowy_error::{ErrorCode, FlowyError, FlowyResult};
use flowy_server_config::supabase_config::PostgresConfiguration;
use crate::supabase::migration::run_migrations;
use crate::supabase::queue::RequestPayload;
pub type PostgresObject = Object;
pub struct PostgresDB {
pub configuration: PostgresConfiguration,
pub client: Arc<Pool>,
}
impl PostgresDB {
#[allow(dead_code)]
pub async fn from_env() -> Result<Self, anyhow::Error> {
let configuration = PostgresConfiguration::from_env()?;
Self::new(configuration).await
}
pub async fn new(configuration: PostgresConfiguration) -> Result<Self, anyhow::Error> {
// TODO(nathan): Handling connection surges using
// https://supabase.com/blog/supabase-pgbouncer
// https://supabase.com/docs/guides/database/connecting-to-postgres
let mut pg_config = tokio_postgres::Config::new();
pg_config
.host(&configuration.url)
.user(&configuration.user_name)
.password(&configuration.password)
.port(configuration.port);
let mgr_config = ManagerConfig {
recycling_method: RecyclingMethod::Fast,
};
// Using the https://docs.rs/postgres-openssl/latest/postgres_openssl/ to enable tls connection.
let mgr = Manager::from_config(pg_config, NoTls, mgr_config);
let pool = Pool::builder(mgr).max_size(16).build()?;
let mut client = pool.get().await?;
// Run migrations
run_migrations(&mut client).await?;
Ok(Self {
configuration,
client: Arc::new(pool),
})
}
}
pub type PgClientSender = tokio::sync::mpsc::Sender<PostgresObject>;
pub struct PgClientReceiver(pub tokio::sync::mpsc::Receiver<PostgresObject>);
impl PgClientReceiver {
pub async fn recv(&mut self) -> FlowyResult<PostgresObject> {
match self.0.recv().await {
None => Err(FlowyError::new(
ErrorCode::PgConnectError,
"Can't connect to the postgres db".to_string(),
)),
Some(object) => Ok(object),
}
}
}
#[derive(Clone)]
pub enum PostgresEvent {
ConnectDB,
/// The ID is utilized to sequence the events within the priority queue.
/// The sender is employed for transmitting the PostgresObject back to the original sender.
/// At present, the sender is invoked subsequent to the processing of the previous PostgresObject.
/// For future optimizations, we could potentially perform batch processing of the [GetPgClient] events utilizing the [Pool].
GetPgClient {
id: u32,
sender: PgClientSender,
},
}
impl Debug for PostgresEvent {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
PostgresEvent::ConnectDB => f.write_str("ConnectDB"),
PostgresEvent::GetPgClient { id, .. } => f.write_fmt(format_args!("GetPgClient({})", id)),
}
}
}
impl Ord for PostgresEvent {
fn cmp(&self, other: &Self) -> Ordering {
match (self, other) {
(PostgresEvent::ConnectDB, PostgresEvent::ConnectDB) => Ordering::Equal,
(PostgresEvent::ConnectDB, PostgresEvent::GetPgClient { .. }) => Ordering::Greater,
(PostgresEvent::GetPgClient { .. }, PostgresEvent::ConnectDB) => Ordering::Less,
(PostgresEvent::GetPgClient { id: id1, .. }, PostgresEvent::GetPgClient { id: id2, .. }) => {
id1.cmp(id2).reverse()
},
}
}
}
impl Eq for PostgresEvent {}
impl PartialEq<Self> for PostgresEvent {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(PostgresEvent::ConnectDB, PostgresEvent::ConnectDB) => true,
(PostgresEvent::GetPgClient { id: id1, .. }, PostgresEvent::GetPgClient { id: id2, .. }) => {
id1 == id2
},
_ => false,
}
}
}
impl PartialOrd<Self> for PostgresEvent {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl RequestPayload for PostgresEvent {}

View File

@ -1,3 +0,0 @@
mod postgres_conn;
mod request;
mod response;

View File

@ -1,21 +0,0 @@
use std::sync::Arc;
use postgrest::Postgrest;
use crate::supabase::SupabaseConfiguration;
pub struct PostgresHttp {
pub postgres: Arc<Postgrest>,
}
impl PostgresHttp {
pub fn new(config: SupabaseConfiguration) -> Self {
let url = format!("{}/rest/v1/", config.url);
let auth = format!("Bearer {}", config.key);
let postgrest = Postgrest::new(url)
.insert_header("apikey", config.key)
.insert_header("Authorization", auth);
let postgres = Arc::new(postgrest);
Self { postgres }
}
}

View File

@ -1,248 +0,0 @@
use std::sync::Arc;
use postgrest::Postgrest;
use serde_json::json;
use uuid::Uuid;
use flowy_error::{ErrorCode, FlowyError};
use flowy_folder2::deps::Workspace;
use flowy_user::entities::UpdateUserProfileParams;
use crate::supabase::entities::{
GetUserProfileParams, UserProfileResponse, UserProfileResponseList,
};
use crate::supabase::impls::{
USER_PROFILE_TABLE, USER_TABLE, WORKSPACE_NAME_COLUMN, WORKSPACE_TABLE,
};
use crate::supabase::postgres_http::response::{InsertResponse, PostgrestError, UserWorkspaceList};
const USER_ID: &str = "uid";
const USER_UUID: &str = "uuid";
pub(crate) async fn create_user_with_uuid(
postgrest: Arc<Postgrest>,
uuid: Uuid,
) -> Result<UserProfileResponse, FlowyError> {
let mut insert = serde_json::Map::new();
insert.insert(USER_UUID.to_string(), json!(&uuid.to_string()));
let insert_query = serde_json::to_string(&insert).unwrap();
// Create a new user with uuid.
let resp = postgrest
.from(USER_TABLE)
.insert(insert_query)
.execute()
.await
.map_err(|e| FlowyError::new(ErrorCode::HttpError, e))?;
// Check if the request is successful.
// If the request is successful, get the user id from the response. Otherwise, try to get the
// user id with uuid if the error is unique violation,
let is_success = resp.status().is_success();
let content = resp
.text()
.await
.map_err(|e| FlowyError::new(ErrorCode::UnexpectedEmpty, e))?;
if is_success {
let record = serde_json::from_str::<InsertResponse>(&content)
.map_err(|e| FlowyError::serde().context(e))?
.first_or_error()?;
get_user_profile(postgrest, GetUserProfileParams::Uid(record.uid)).await
} else {
let err = serde_json::from_str::<PostgrestError>(&content)
.map_err(|e| FlowyError::serde().context(e))?;
// If there is a unique violation, try to get the user id with uuid. At this point, the user
// should exist.
if err.is_unique_violation() {
match get_user_profile(postgrest, GetUserProfileParams::Uuid(uuid)).await {
Ok(user) => Ok(user),
_ => Err(FlowyError::new(
ErrorCode::Internal,
"Failed to get user workspace",
)),
}
} else {
Err(FlowyError::new(ErrorCode::Internal, err))
}
}
}
#[allow(dead_code)]
pub(crate) async fn get_user_id_with_uuid(
postgrest: Arc<Postgrest>,
uuid: String,
) -> Result<Option<i64>, FlowyError> {
let resp = postgrest
.from(USER_TABLE)
.eq(USER_UUID, uuid)
.select("*")
.execute()
.await
.map_err(|e| FlowyError::new(ErrorCode::HttpError, e))?;
let is_success = resp.status().is_success();
if !is_success {
return Err(FlowyError::new(
ErrorCode::Internal,
"Failed to get user id with uuid",
));
}
let content = resp
.text()
.await
.map_err(|e| FlowyError::new(ErrorCode::UnexpectedEmpty, e))?;
let resp = serde_json::from_str::<InsertResponse>(&content).unwrap();
if resp.0.is_empty() {
Ok(None)
} else {
Ok(Some(resp.0[0].uid))
}
}
pub(crate) async fn get_user_profile(
postgrest: Arc<Postgrest>,
params: GetUserProfileParams,
) -> Result<UserProfileResponse, FlowyError> {
let mut builder = postgrest.from(USER_PROFILE_TABLE);
match params {
GetUserProfileParams::Uid(uid) => builder = builder.eq(USER_ID, uid.to_string()),
GetUserProfileParams::Uuid(uuid) => builder = builder.eq(USER_UUID, uuid.to_string()),
}
let resp = builder
.select("*")
.execute()
.await
.map_err(|e| FlowyError::new(ErrorCode::HttpError, e))?;
let content = resp
.text()
.await
.map_err(|e| FlowyError::new(ErrorCode::UnexpectedEmpty, e))?;
let mut user_profiles =
serde_json::from_str::<UserProfileResponseList>(&content).map_err(|_e| {
FlowyError::new(
ErrorCode::Serde,
"Deserialize UserProfileResponseList failed",
)
})?;
if user_profiles.0.is_empty() {
return Err(FlowyError::new(
ErrorCode::Internal,
"Failed to get user profile",
));
}
Ok(user_profiles.0.remove(0))
}
pub(crate) async fn create_workspace_with_uid(
postgrest: Arc<Postgrest>,
uid: i64,
name: &str,
) -> Result<Workspace, FlowyError> {
let mut insert = serde_json::Map::new();
insert.insert(USER_ID.to_string(), json!(uid));
insert.insert(WORKSPACE_NAME_COLUMN.to_string(), json!(name));
let insert_query = serde_json::to_string(&insert).unwrap();
let resp = postgrest
.from(WORKSPACE_TABLE)
.insert(insert_query)
.execute()
.await
.map_err(|e| FlowyError::new(ErrorCode::HttpError, e))?;
let content = resp
.text()
.await
.map_err(|e| FlowyError::new(ErrorCode::UnexpectedEmpty, e))?;
let mut workspace_list = serde_json::from_str::<UserWorkspaceList>(&content)
.map_err(|_e| FlowyError::new(ErrorCode::Serde, "Deserialize UserWorkspaceList failed"))?
.into_inner();
debug_assert!(workspace_list.len() == 1);
if workspace_list.is_empty() {
return Err(FlowyError::new(
ErrorCode::Internal,
"Failed to create workspace",
));
}
let user_workspace = workspace_list.remove(0);
Ok(Workspace {
id: user_workspace.workspace_id,
name: user_workspace.workspace_name,
child_views: Default::default(),
created_at: user_workspace.created_at.timestamp(),
})
}
#[allow(dead_code)]
pub(crate) async fn get_user_workspace_with_uid(
postgrest: Arc<Postgrest>,
uid: i64,
) -> Result<Vec<Workspace>, FlowyError> {
let resp = postgrest
.from(WORKSPACE_TABLE)
.eq(USER_ID, uid.to_string())
.select("*")
.execute()
.await
.map_err(|e| FlowyError::new(ErrorCode::HttpError, e))?;
let content = resp
.text()
.await
.map_err(|e| FlowyError::new(ErrorCode::UnexpectedEmpty, e))?;
let user_workspaces = serde_json::from_str::<UserWorkspaceList>(&content)
.map_err(|_e| FlowyError::new(ErrorCode::Serde, "Deserialize UserWorkspaceList failed"))?
.0;
Ok(
user_workspaces
.into_iter()
.map(|user_workspace| Workspace {
id: user_workspace.workspace_id,
name: user_workspace.workspace_name,
child_views: Default::default(),
created_at: user_workspace.created_at.timestamp(),
})
.collect(),
)
}
#[allow(dead_code)]
pub(crate) async fn update_user_profile(
postgrest: Arc<Postgrest>,
params: UpdateUserProfileParams,
) -> Result<Option<UserProfileResponse>, FlowyError> {
if params.is_empty() {
return Err(FlowyError::new(
ErrorCode::UnexpectedEmpty,
"Empty update params",
));
}
let mut update = serde_json::Map::new();
if let Some(name) = params.name {
update.insert("name".to_string(), json!(name));
}
let update_str = serde_json::to_string(&update).unwrap();
let resp = postgrest
.from(USER_PROFILE_TABLE)
.eq(USER_ID, params.id.to_string())
.update(update_str)
.execute()
.await
.map_err(|e| FlowyError::new(ErrorCode::HttpError, e))?;
let content = resp
.text()
.await
.map_err(|e| FlowyError::new(ErrorCode::UnexpectedEmpty, e))?;
let resp = serde_json::from_str::<UserProfileResponseList>(&content)
.map_err(|_e| FlowyError::new(ErrorCode::Serde, "Deserialize UserProfileList failed"))?;
Ok(resp.0.first().cloned())
}

View File

@ -1,77 +0,0 @@
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use thiserror::Error;
use flowy_error::{ErrorCode, FlowyError};
use crate::util::deserialize_null_or_default;
#[derive(Debug, Error, Serialize, Deserialize)]
#[error(
"PostgrestException(message: {message}, code: {code:?}, details: {details:?}, hint: {hint:?})"
)]
pub struct PostgrestError {
message: String,
code: String,
details: Value,
hint: Option<String>,
}
impl PostgrestError {
/// Error code 23505 is a PostgreSQL error code. It signifies a "unique_violation", which occurs
/// when a certain unique constraint has been violated.
pub fn is_unique_violation(&self) -> bool {
self.code == "23505"
}
}
#[allow(dead_code)]
#[derive(Debug, Deserialize)]
pub struct PostgrestResponse {
data: Option<String>,
status: i32,
count: Option<i32>,
}
#[derive(Debug, Deserialize)]
pub(crate) struct InsertResponse(pub Vec<InsertRecord>);
impl InsertResponse {
pub(crate) fn first_or_error(&self) -> Result<InsertRecord, FlowyError> {
if self.0.is_empty() {
Err(FlowyError::new(
ErrorCode::UnexpectedEmpty,
"Insert response contains no records",
))
} else {
Ok(self.0[0].clone())
}
}
}
#[derive(Debug, Deserialize, Clone)]
pub(crate) struct InsertRecord {
pub(crate) uid: i64,
#[allow(dead_code)]
pub(crate) uuid: String,
}
#[derive(Debug, Deserialize, Clone)]
pub(crate) struct UserWorkspace {
#[allow(dead_code)]
pub uid: i64,
#[serde(deserialize_with = "deserialize_null_or_default")]
pub workspace_name: String,
pub created_at: DateTime<Utc>,
pub workspace_id: String,
}
#[derive(Debug, Deserialize)]
pub(crate) struct UserWorkspaceList(pub(crate) Vec<UserWorkspace>);
impl UserWorkspaceList {
pub(crate) fn into_inner(self) -> Vec<UserWorkspace> {
self.0
}
}

View File

@ -1,65 +1,84 @@
use std::ops::Deref;
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::{Arc, Weak};
use std::time::Duration;
use std::sync::Arc;
use appflowy_integrate::RemoteCollabStorage;
use collab_plugins::cloud_storage::RemoteCollabStorage;
use parking_lot::RwLock;
use tokio::spawn;
use tokio::sync::{watch, Mutex};
use tokio::time::interval;
use flowy_database2::deps::DatabaseCloudService;
use flowy_document2::deps::DocumentCloudService;
use flowy_error::{ErrorCode, FlowyError, FlowyResult};
use flowy_folder2::deps::FolderCloudService;
use flowy_server_config::supabase_config::{PostgresConfiguration, SupabaseConfiguration};
use flowy_user::event_map::UserAuthService;
use lib_infra::async_trait::async_trait;
use flowy_database_deps::cloud::DatabaseCloudService;
use flowy_document_deps::cloud::DocumentCloudService;
use flowy_folder_deps::cloud::FolderCloudService;
use flowy_server_config::supabase_config::SupabaseConfiguration;
use flowy_user_deps::cloud::UserService;
use crate::supabase::impls::{
PgCollabStorageImpl, SupabaseDatabaseCloudServiceImpl, SupabaseDocumentCloudServiceImpl,
SupabaseFolderCloudServiceImpl, SupabaseUserAuthServiceImpl,
};
use crate::supabase::postgres_db::{PgClientReceiver, PostgresDB, PostgresEvent};
use crate::supabase::queue::{
PendingRequest, RequestHandler, RequestQueue, RequestRunner, RequestState,
use crate::supabase::api::{
RESTfulPostgresServer, RESTfulSupabaseCollabStorageImpl, RESTfulSupabaseDatabaseServiceImpl,
RESTfulSupabaseDocumentServiceImpl, RESTfulSupabaseFolderServiceImpl,
RESTfulSupabaseUserAuthServiceImpl, SupabaseServerServiceImpl,
};
use crate::AppFlowyServer;
/// https://www.pgbouncer.org/features.html
/// Only support session mode.
///
/// Session mode:
/// When a new client connects, a connection is assigned to the client until it disconnects. Afterward,
/// the connection is returned back to the pool. All PostgreSQL features can be used with this option.
/// For the moment, the default pool size of pgbouncer in supabase is 15 in session mode. Which means
/// that we can have 15 concurrent connections to the database.
///
/// Transaction mode:
/// This is the suggested option for serverless functions. With this, the connection is only assigned
/// to the client for the duration of a transaction. Once done, the connection is returned to the pool.
/// Two consecutive transactions from the same client could be done over two, different connections.
/// Some session-based PostgreSQL features such as prepared statements are not available with this option.
/// A more comprehensive list of incompatible features can be found here.
///
/// Most of the case, Session mode is faster than Transaction mode(no statement cache(https://github.com/supabase/supavisor/issues/69) and queue transaction).
/// But Transaction mode is more suitable for serverless functions. It can reduce the number of concurrent
/// connections to the database.
/// TODO(nathan): fix prepared statement error when using transaction mode. https://github.com/prisma/prisma/issues/11643
///
#[derive(Clone, Debug, Default)]
pub enum PgPoolMode {
#[default]
Session,
Transaction,
}
impl PgPoolMode {
pub fn support_prepare_cached(&self) -> bool {
matches!(self, PgPoolMode::Session)
}
}
/// Supabase server is used to provide the implementation of the [AppFlowyServer] trait.
/// It contains the configuration of the supabase server and the postgres server.
pub struct SupabaseServer {
#[allow(dead_code)]
config: SupabaseConfiguration,
postgres: Arc<RwLock<Option<Arc<PostgresServer>>>>,
restful_postgres: Arc<RwLock<Option<Arc<RESTfulPostgresServer>>>>,
}
impl SupabaseServer {
pub fn new(config: SupabaseConfiguration) -> Self {
let postgres = if config.enable_sync {
Some(Arc::new(PostgresServer::new(
config.postgres_config.clone(),
)))
let restful_postgres = if config.enable_sync {
Some(Arc::new(RESTfulPostgresServer::new(config.clone())))
} else {
None
};
Self {
config,
postgres: Arc::new(RwLock::new(postgres)),
restful_postgres: Arc::new(RwLock::new(restful_postgres)),
}
}
pub fn set_enable_sync(&self, enable: bool) {
if enable {
if self.postgres.read().is_some() {
if self.restful_postgres.read().is_some() {
return;
}
*self.postgres.write() = Some(Arc::new(PostgresServer::new(
self.config.postgres_config.clone(),
)));
*self.restful_postgres.write() =
Some(Arc::new(RESTfulPostgresServer::new(self.config.clone())));
} else {
*self.postgres.write() = None;
*self.restful_postgres.write() = None;
}
}
}
@ -70,194 +89,33 @@ impl AppFlowyServer for SupabaseServer {
self.set_enable_sync(enable);
}
fn user_service(&self) -> Arc<dyn UserAuthService> {
Arc::new(SupabaseUserAuthServiceImpl::new(SupabaseServerServiceImpl(
self.postgres.clone(),
)))
fn user_service(&self) -> Arc<dyn UserService> {
Arc::new(RESTfulSupabaseUserAuthServiceImpl::new(
SupabaseServerServiceImpl(self.restful_postgres.clone()),
))
}
fn folder_service(&self) -> Arc<dyn FolderCloudService> {
Arc::new(SupabaseFolderCloudServiceImpl::new(
SupabaseServerServiceImpl(self.postgres.clone()),
Arc::new(RESTfulSupabaseFolderServiceImpl::new(
SupabaseServerServiceImpl(self.restful_postgres.clone()),
))
}
fn database_service(&self) -> Arc<dyn DatabaseCloudService> {
Arc::new(SupabaseDatabaseCloudServiceImpl::new(
SupabaseServerServiceImpl(self.postgres.clone()),
Arc::new(RESTfulSupabaseDatabaseServiceImpl::new(
SupabaseServerServiceImpl(self.restful_postgres.clone()),
))
}
fn document_service(&self) -> Arc<dyn DocumentCloudService> {
Arc::new(SupabaseDocumentCloudServiceImpl::new(
SupabaseServerServiceImpl(self.postgres.clone()),
Arc::new(RESTfulSupabaseDocumentServiceImpl::new(
SupabaseServerServiceImpl(self.restful_postgres.clone()),
))
}
fn collab_storage(&self) -> Option<Arc<dyn RemoteCollabStorage>> {
Some(Arc::new(PgCollabStorageImpl::new(
SupabaseServerServiceImpl(self.postgres.clone()),
Some(Arc::new(RESTfulSupabaseCollabStorageImpl::new(
SupabaseServerServiceImpl(self.restful_postgres.clone()),
)))
}
}
/// [SupabaseServerService] is used to provide supabase services. The caller can using this trait
/// to get the services and it might need to handle the situation when the services is unavailable.
/// For example, when user stop syncing, the services will be unavailable or when the user is logged
/// out.
pub trait SupabaseServerService: Send + Sync + 'static {
fn get_pg_server(&self) -> Option<Weak<PostgresServer>>;
fn try_get_pg_server(&self) -> FlowyResult<Weak<PostgresServer>>;
}
#[derive(Clone)]
pub struct SupabaseServerServiceImpl(pub Arc<RwLock<Option<Arc<PostgresServer>>>>);
impl SupabaseServerService for SupabaseServerServiceImpl {
/// Get the postgres server, if the postgres server is not available, return None.
fn get_pg_server(&self) -> Option<Weak<PostgresServer>> {
self.0.read().as_ref().map(Arc::downgrade)
}
/// Try to get the postgres server, if the postgres server is not available, return an error.
fn try_get_pg_server(&self) -> FlowyResult<Weak<PostgresServer>> {
self.0.read().as_ref().map(Arc::downgrade).ok_or_else(|| {
FlowyError::new(
ErrorCode::SupabaseSyncRequired,
"Supabase sync is disabled, please enable it first",
)
})
}
}
pub struct PostgresServer {
request_handler: Arc<PostgresRequestHandler>,
}
impl Deref for PostgresServer {
type Target = Arc<PostgresRequestHandler>;
fn deref(&self) -> &Self::Target {
&self.request_handler
}
}
impl PostgresServer {
pub fn new(config: PostgresConfiguration) -> Self {
let (runner_notifier_tx, runner_notifier) = watch::channel(false);
let request_handler = Arc::new(PostgresRequestHandler::new(runner_notifier_tx, config));
// Initialize the connection to the database
let conn = PendingRequest::new(PostgresEvent::ConnectDB);
request_handler.queue.lock().push(conn);
let handler = Arc::downgrade(&request_handler) as Weak<dyn RequestHandler<PostgresEvent>>;
spawn(RequestRunner::run(runner_notifier, handler));
Self { request_handler }
}
}
pub struct PostgresRequestHandler {
config: PostgresConfiguration,
db: Arc<Mutex<Option<Arc<PostgresDB>>>>,
queue: parking_lot::Mutex<RequestQueue<PostgresEvent>>,
runner_notifier: Arc<watch::Sender<bool>>,
sequence: AtomicU32,
}
impl PostgresRequestHandler {
pub fn new(runner_notifier: watch::Sender<bool>, config: PostgresConfiguration) -> Self {
let db = Arc::new(Default::default());
let queue = parking_lot::Mutex::new(RequestQueue::new());
let runner_notifier = Arc::new(runner_notifier);
Self {
db,
queue,
runner_notifier,
config,
sequence: Default::default(),
}
}
pub async fn get_pg_client(&self) -> PgClientReceiver {
let (tx, rx) = tokio::sync::mpsc::channel(1);
let mut queue = self.queue.lock();
let event = PostgresEvent::GetPgClient {
id: self.sequence.fetch_add(1, Ordering::SeqCst),
sender: tx,
};
let request = PendingRequest::new(event);
queue.push(request);
self.notify();
PgClientReceiver(rx)
}
}
#[async_trait]
impl RequestHandler<PostgresEvent> for PostgresRequestHandler {
async fn prepare_request(&self) -> Option<PendingRequest<PostgresEvent>> {
match self.queue.try_lock() {
None => {
// If acquire the lock failed, try after 300ms
let weak_notifier = Arc::downgrade(&self.runner_notifier);
spawn(async move {
interval(Duration::from_millis(300)).tick().await;
if let Some(notifier) = weak_notifier.upgrade() {
let _ = notifier.send(false);
}
});
None
},
Some(queue) => queue.peek().cloned(),
}
}
async fn handle_request(&self, request: PendingRequest<PostgresEvent>) -> Option<()> {
debug_assert!(Some(&request) == self.queue.lock().peek());
match request.payload {
PostgresEvent::ConnectDB => {
let is_connected = self.db.lock().await.is_some();
if is_connected {
tracing::warn!("Already connect to postgres db");
} else {
tracing::info!("Start connecting to postgres db");
match PostgresDB::new(self.config.clone()).await {
Ok(db) => {
*self.db.lock().await = Some(Arc::new(db));
if let Some(mut request) = self.queue.lock().pop() {
request.set_state(RequestState::Done);
}
},
Err(e) => tracing::error!("Error connecting to the postgres db: {}", e),
}
}
},
PostgresEvent::GetPgClient { id: _, sender } => {
match self.db.lock().await.as_ref().map(|db| db.client.clone()) {
None => tracing::error!("Can't get the postgres client"),
Some(pool) => {
match pool.get().await {
Ok(object) => {
if let Err(e) = sender.send(object).await {
tracing::error!("Error sending the postgres client: {}", e);
}
},
Err(e) => tracing::error!("Get postgres client failed: {}", e),
}
if let Some(mut request) = self.queue.lock().pop() {
request.set_state(RequestState::Done);
}
},
}
},
}
None
}
fn notify(&self) {
let _ = self.runner_notifier.send(false);
}
}

View File

@ -1,308 +0,0 @@
use tokio_postgres::types::ToSql;
pub struct UpdateSqlBuilder {
table: String,
sets: Vec<(String, Box<dyn ToSql + Sync + Send>)>,
where_clause: Option<(String, Box<dyn ToSql + Sync + Send>)>,
}
impl UpdateSqlBuilder {
pub fn new(table: &str) -> Self {
Self {
table: table.to_string(),
sets: Vec::new(),
where_clause: None,
}
}
pub fn set<T: 'static + ToSql + Sync + Send>(mut self, column: &str, value: Option<T>) -> Self {
if let Some(value) = value {
self.sets.push((column.to_string(), Box::new(value)));
}
self
}
pub fn where_clause<T: 'static + ToSql + Sync + Send>(mut self, clause: &str, value: T) -> Self {
self.where_clause = Some((clause.to_string(), Box::new(value)));
self
}
pub fn build(self) -> (String, Vec<Box<dyn ToSql + Sync + Send>>) {
let mut sql = format!("UPDATE {} SET ", self.table);
for i in 0..self.sets.len() {
if i > 0 {
sql.push_str(", ");
}
sql.push_str(&format!("{} = ${}", self.sets[i].0, i + 1));
}
let mut params: Vec<_> = self.sets.into_iter().map(|(_, value)| value).collect();
if let Some((clause, value)) = self.where_clause {
sql.push_str(&format!(" WHERE {} = ${}", clause, params.len() + 1));
params.push(value);
}
(sql, params)
}
}
pub struct SelectSqlBuilder {
table: String,
columns: Vec<String>,
where_clause: Option<(String, Box<dyn ToSql + Sync + Send>)>,
where_clause_in: Option<(String, Vec<Box<dyn ToSql + Sync + Send>>)>,
group_by_column: Option<String>,
order_by: Option<(String, bool)>,
limit: Option<i64>,
lock: bool,
array_agg_columns: Vec<String>,
}
impl SelectSqlBuilder {
pub fn new(table: &str) -> Self {
Self {
table: table.to_string(),
columns: Vec::new(),
where_clause: None,
where_clause_in: None,
group_by_column: None,
order_by: None,
limit: None,
lock: false,
array_agg_columns: vec![],
}
}
pub fn lock(mut self) -> Self {
self.lock = true;
self
}
pub fn column(mut self, column: &str) -> Self {
self.columns.push(column.to_string());
self
}
pub fn group_by(mut self, column: &str) -> Self {
self.group_by_column = Some(column.to_string());
self
}
pub fn array_agg(mut self, column: &str) -> Self {
self.array_agg_columns.push(column.to_string());
self
}
pub fn order_by(mut self, column: &str, asc: bool) -> Self {
self.order_by = Some((column.to_string(), asc));
self
}
pub fn where_clause<T: 'static + ToSql + Sync + Send>(mut self, clause: &str, value: T) -> Self {
self.where_clause = Some((clause.to_string(), Box::new(value)));
self
}
pub fn where_clause_in<T: 'static + ToSql + Sync + Send>(
mut self,
clause: &str,
values: Vec<T>,
) -> Self {
let boxed_values: Vec<_> = values
.into_iter()
.map(|value| Box::new(value) as Box<dyn ToSql + Send + Sync>)
.collect();
self.where_clause_in = Some((clause.to_string(), boxed_values));
self
}
pub fn limit(mut self, limit: i64) -> Self {
self.limit = Some(limit);
self
}
pub fn build(self) -> (String, Vec<Box<dyn ToSql + Sync + Send>>) {
let all_columns = self
.columns
.iter()
.chain(self.array_agg_columns.iter())
.cloned()
.collect::<Vec<_>>()
.join(", ");
let mut sql = format!("SELECT {} FROM {}", all_columns, self.table);
let mut params: Vec<_> = Vec::new();
if let Some((clause, value)) = self.where_clause {
sql.push_str(&format!(" WHERE {} = ${}", clause, params.len() + 1));
params.push(value);
}
if let Some((clause, values)) = self.where_clause_in {
let placeholders: Vec<String> = values
.iter()
.enumerate()
.map(|(i, _)| format!("${}", i + 1))
.collect();
sql.push_str(&format!(
" WHERE {} IN ({})",
clause,
placeholders.join(",")
));
params.extend(values);
}
if let Some(group_by_column) = self.group_by_column {
sql.push_str(&format!(" GROUP BY {}", group_by_column));
}
if let Some((order_by_column, asc)) = self.order_by {
let order = if asc { "ASC" } else { "DESC" };
sql.push_str(&format!(" ORDER BY {} {}", order_by_column, order));
}
// ARRAY_AGG is an aggregate function that concatenates the values from column_name
// into an array.
for array_agg_column in self.array_agg_columns {
sql = sql.replace(
&array_agg_column,
&format!("ARRAY_AGG({}) as {}", array_agg_column, array_agg_column),
);
}
if let Some(limit) = self.limit {
sql.push_str(&format!(" LIMIT {}", limit));
}
if self.lock {
sql.push_str(" FOR UPDATE");
}
(sql, params)
}
}
pub struct InsertSqlBuilder {
table: String,
columns: Vec<String>,
values: Vec<Box<(dyn ToSql + Sync + Send + 'static)>>,
override_system_value: bool,
returning: Vec<String>, // Vec for returning multiple columns
}
impl InsertSqlBuilder {
pub fn new(table: &str) -> Self {
Self {
table: table.to_string(),
columns: Vec::new(),
values: Vec::new(),
override_system_value: false,
returning: vec![],
}
}
pub fn value<T: ToSql + Sync + Send + 'static>(mut self, column: &str, value: T) -> Self {
self.columns.push(column.to_string());
self.values.push(Box::new(value));
self
}
pub fn overriding_system_value(mut self) -> Self {
self.override_system_value = true;
self
}
pub fn returning(mut self, column: &str) -> Self {
// add column to return
self.returning.push(column.to_string());
self
}
pub fn build(self) -> (String, Vec<Box<(dyn ToSql + Sync + Send)>>) {
let mut query = format!("INSERT INTO {} (", self.table);
query.push_str(&self.columns.join(", "));
query.push(')');
if self.override_system_value {
query.push_str(" OVERRIDING SYSTEM VALUE");
}
query.push_str(" VALUES (");
query.push_str(
&(0..self.columns.len())
.map(|i| format!("${}", i + 1))
.collect::<Vec<_>>()
.join(", "),
);
query.push(')');
if !self.returning.is_empty() {
// add RETURNING clause if there are columns to return
query.push_str(&format!(" RETURNING {}", self.returning.join(", ")));
}
(query, self.values)
}
}
pub enum WhereCondition {
Equals(String, Box<dyn ToSql + Sync + Send>),
In(String, Vec<Box<dyn ToSql + Sync + Send>>),
}
pub struct DeleteSqlBuilder {
table: String,
conditions: Vec<WhereCondition>,
}
impl DeleteSqlBuilder {
pub fn new(table: &str) -> Self {
Self {
table: table.to_string(),
conditions: Vec::new(),
}
}
pub fn where_condition(mut self, condition: WhereCondition) -> Self {
self.conditions.push(condition);
self
}
pub fn build(self) -> (String, Vec<Box<dyn ToSql + Sync + Send>>) {
let mut sql = format!("DELETE FROM {}", self.table);
let mut params: Vec<Box<dyn ToSql + Sync + Send>> = Vec::new();
if !self.conditions.is_empty() {
sql.push_str(" WHERE ");
let condition_len = self.conditions.len();
for (i, condition) in self.conditions.into_iter().enumerate() {
match condition {
WhereCondition::Equals(column, value) => {
sql.push_str(&format!(
"{} = ${}{}",
column,
params.len() + 1,
if i < condition_len - 1 { " AND " } else { "" },
));
params.push(value);
},
WhereCondition::In(column, values) => {
let placeholders: Vec<String> = (1..=values.len())
.map(|i| format!("${}", i + params.len()))
.collect();
sql.push_str(&format!(
"{} IN ({}){}",
column,
placeholders.join(", "),
if i < condition_len - 1 { " AND " } else { "" },
));
params.extend(values);
},
}
}
}
(sql, params)
}
}

View File

@ -9,7 +9,7 @@ mod supabase_test;
pub fn setup_log() {
static START: Once = Once::new();
START.call_once(|| {
let level = "trace";
let level = "debug";
let mut filters = vec![];
filters.push(format!("flowy_server={}", level));
std::env::set_var("RUST_LOG", filters.join(","));

View File

@ -0,0 +1,53 @@
use crate::supabase_test::util::{
collab_service, database_service, get_supabase_config, sign_up_param, user_auth_service,
};
use collab_plugins::cloud_storage::{CollabObject, CollabType};
use flowy_user_deps::entities::SignUpResponse;
use lib_infra::box_any::BoxAny;
use uuid::Uuid;
#[tokio::test]
async fn supabase_create_workspace_test() {
if get_supabase_config().is_none() {
return;
}
let user_service = user_auth_service();
let uuid = Uuid::new_v4().to_string();
let params = sign_up_param(uuid);
let user: SignUpResponse = user_service.sign_up(BoxAny::new(params)).await.unwrap();
let collab_service = collab_service();
let database_service = database_service();
let mut row_ids = vec![];
for _i in 0..3 {
let row_id = uuid::Uuid::new_v4().to_string();
row_ids.push(row_id.clone());
let collab_object = CollabObject {
id: row_id,
uid: user.user_id,
ty: CollabType::DatabaseRow,
meta: Default::default(),
}
.with_workspace_id(user.latest_workspace.id.clone());
collab_service
.send_update(&collab_object, 0, vec![1, 2, 3])
.await
.unwrap();
collab_service
.send_update(&collab_object, 0, vec![4, 5, 6])
.await
.unwrap();
}
let updates_by_oid = database_service
.batch_get_collab_updates(row_ids, CollabType::DatabaseRow)
.await
.unwrap();
assert_eq!(updates_by_oid.len(), 3);
for (_, update) in updates_by_oid {
assert_eq!(update.len(), 2);
}
}

View File

@ -0,0 +1,100 @@
use crate::supabase_test::util::{
collab_service, folder_service, get_supabase_config, sign_up_param, user_auth_service,
};
use collab_plugins::cloud_storage::{CollabObject, CollabType};
use flowy_user_deps::entities::SignUpResponse;
use futures::future::join_all;
use lib_infra::box_any::BoxAny;
use tokio::task;
use uuid::Uuid;
use yrs::{Doc, Map, ReadTxn, StateVector, Transact};
#[tokio::test]
async fn supabase_create_workspace_test() {
if get_supabase_config().is_none() {
return;
}
let service = folder_service();
// will replace the uid with the real uid
let workspace = service.create_workspace(1, "test").await.unwrap();
dbg!(workspace);
}
#[tokio::test]
async fn supabase_get_folder_test() {
if get_supabase_config().is_none() {
return;
}
let folder_service = folder_service();
let user_service = user_auth_service();
let collab_service = collab_service();
let uuid = Uuid::new_v4().to_string();
let params = sign_up_param(uuid);
let user: SignUpResponse = user_service.sign_up(BoxAny::new(params)).await.unwrap();
let collab_object = CollabObject {
id: user.latest_workspace.id.clone(),
uid: user.user_id,
ty: CollabType::Folder,
meta: Default::default(),
}
.with_workspace_id(user.latest_workspace.id.clone());
let doc = Doc::with_client_id(1);
let map = { doc.get_or_insert_map("map") };
{
let mut txn = doc.transact_mut();
map.insert(&mut txn, "1", "a");
collab_service
.send_update(&collab_object, 0, txn.encode_update_v1())
.await
.unwrap();
};
{
let mut txn = doc.transact_mut();
map.insert(&mut txn, "2", "b");
collab_service
.send_update(&collab_object, 1, txn.encode_update_v1())
.await
.unwrap();
};
// let updates = collab_service.get_all_updates(&collab_object).await.unwrap();
let updates = folder_service
.get_folder_updates(&user.latest_workspace.id, user.user_id)
.await
.unwrap();
assert_eq!(updates.len(), 2);
// The init sync will try to merge the updates into one.
let mut handles = Vec::new();
for _ in 0..5 {
let cloned_collab_service = collab_service.clone();
let cloned_collab_object = collab_object.clone();
let handle = task::spawn(async move {
cloned_collab_service
.send_init_sync(&cloned_collab_object, 3, vec![])
.await
.unwrap();
});
handles.push(handle);
}
let _results: Vec<_> = join_all(handles).await;
let remote_update = folder_service
.get_folder_updates(&user.latest_workspace.id, user.user_id)
.await
.unwrap()
.first()
.unwrap()
.clone();
let expected_update = doc
.transact_mut()
.encode_state_as_update_v1(&StateVector::default());
// check the update is the same as local document update.
assert_eq!(remote_update, expected_update);
}

View File

@ -1 +1,4 @@
mod database_test;
mod folder_test;
mod user_test;
mod util;

View File

@ -1,67 +1,51 @@
use std::collections::HashMap;
use std::sync::Arc;
use parking_lot::RwLock;
use uuid::Uuid;
use flowy_server::supabase::impls::{SupabaseUserAuthServiceImpl, USER_UUID};
use flowy_server::supabase::{PostgresServer, SupabaseServerServiceImpl};
use flowy_server_config::supabase_config::PostgresConfiguration;
use flowy_user::entities::{SignUpResponse, UpdateUserProfileParams};
use flowy_user::event_map::{UserAuthService, UserCredentials};
use flowy_user_deps::entities::*;
use lib_infra::box_any::BoxAny;
use crate::setup_log;
use crate::supabase_test::util::{get_supabase_config, sign_up_param, user_auth_service};
// ‼️‼️‼️ Warning: this test will create a table in the database
#[tokio::test]
async fn user_sign_up_test() {
if dotenv::from_filename("./.env.test").is_err() {
async fn supabase_user_sign_up_test() {
if get_supabase_config().is_none() {
return;
}
let user_service = user_auth_service_impl();
let mut params = HashMap::new();
params.insert(USER_UUID.to_string(), Uuid::new_v4().to_string());
let user_service = user_auth_service();
let uuid = Uuid::new_v4().to_string();
let params = sign_up_param(uuid);
let user: SignUpResponse = user_service.sign_up(BoxAny::new(params)).await.unwrap();
assert!(!user.workspace_id.is_empty());
}
fn user_auth_service_impl() -> SupabaseUserAuthServiceImpl<SupabaseServerServiceImpl> {
let server = Arc::new(PostgresServer::new(
PostgresConfiguration::from_env().unwrap(),
));
let weak_server = SupabaseServerServiceImpl(Arc::new(RwLock::new(Some(server))));
SupabaseUserAuthServiceImpl::new(weak_server)
assert!(!user.latest_workspace.id.is_empty());
assert!(!user.user_workspaces.is_empty());
assert!(!user.latest_workspace.database_storage_id.is_empty());
}
#[tokio::test]
async fn user_sign_up_with_existing_uuid_test() {
if dotenv::from_filename("./.env.test").is_err() {
async fn supabase_user_sign_up_with_existing_uuid_test() {
if get_supabase_config().is_none() {
return;
}
let user_service = user_auth_service_impl();
let uuid = Uuid::new_v4();
let mut params = HashMap::new();
params.insert(USER_UUID.to_string(), uuid.to_string());
let user_service = user_auth_service();
let uuid = Uuid::new_v4().to_string();
let params = sign_up_param(uuid);
let _user: SignUpResponse = user_service
.sign_up(BoxAny::new(params.clone()))
.await
.unwrap();
let user: SignUpResponse = user_service.sign_up(BoxAny::new(params)).await.unwrap();
assert!(!user.workspace_id.is_empty());
assert!(!user.latest_workspace.id.is_empty());
assert!(!user.latest_workspace.database_storage_id.is_empty());
assert!(!user.user_workspaces.is_empty());
}
#[tokio::test]
async fn update_user_profile_test() {
if dotenv::from_filename("./.env.test").is_err() {
async fn supabase_update_user_profile_test() {
if get_supabase_config().is_none() {
return;
}
let user_service = user_auth_service_impl();
let uuid = Uuid::new_v4();
let mut params = HashMap::new();
params.insert(USER_UUID.to_string(), uuid.to_string());
let user_service = user_auth_service();
let uuid = Uuid::new_v4().to_string();
let params = sign_up_param(uuid);
let user: SignUpResponse = user_service
.sign_up(BoxAny::new(params.clone()))
.await
@ -74,7 +58,7 @@ async fn update_user_profile_test() {
id: user.user_id,
auth_type: Default::default(),
name: Some("123".to_string()),
email: Some("123@appflowy.io".to_string()),
email: Some(format!("{}@test.com", Uuid::new_v4())),
password: None,
icon_url: None,
openai_key: None,
@ -93,57 +77,33 @@ async fn update_user_profile_test() {
}
#[tokio::test]
async fn get_user_profile_test() {
if dotenv::from_filename("./.env.test").is_err() {
async fn supabase_get_user_profile_test() {
if get_supabase_config().is_none() {
return;
}
setup_log();
let user_service = user_auth_service_impl();
let uuid = Uuid::new_v4();
let mut params = HashMap::new();
params.insert(USER_UUID.to_string(), uuid.to_string());
let user_service = user_auth_service();
let uuid = Uuid::new_v4().to_string();
let params = sign_up_param(uuid);
let user: SignUpResponse = user_service
.sign_up(BoxAny::new(params.clone()))
.await
.unwrap();
let credential = UserCredentials::from_uid(user.user_id);
user_service
.get_user_profile(credential.clone())
.await
.unwrap()
.unwrap();
user_service
.get_user_profile(credential.clone())
.await
.unwrap()
.unwrap();
user_service
.get_user_profile(credential.clone())
.await
.unwrap()
.unwrap();
user_service
.get_user_profile(credential.clone())
.await
.unwrap()
.unwrap();
user_service
.get_user_profile(credential)
.await
.unwrap()
.unwrap();
}
#[tokio::test]
async fn get_not_exist_user_profile_test() {
if dotenv::from_filename("./.env.test").is_err() {
async fn supabase_get_not_exist_user_profile_test() {
if get_supabase_config().is_none() {
return;
}
setup_log();
let user_service = user_auth_service_impl();
let user_service = user_auth_service();
let result = user_service
.get_user_profile(UserCredentials::from_uid(i64::MAX))
.await

View File

@ -0,0 +1,65 @@
use std::collections::HashMap;
use std::sync::Arc;
use collab_plugins::cloud_storage::RemoteCollabStorage;
use uuid::Uuid;
use flowy_database_deps::cloud::DatabaseCloudService;
use flowy_folder_deps::cloud::FolderCloudService;
use flowy_server::supabase::api::{
RESTfulPostgresServer, RESTfulSupabaseCollabStorageImpl, RESTfulSupabaseDatabaseServiceImpl,
RESTfulSupabaseFolderServiceImpl, RESTfulSupabaseUserAuthServiceImpl, SupabaseServerServiceImpl,
};
use flowy_server::supabase::define::{USER_EMAIL, USER_UUID};
use flowy_server_config::supabase_config::SupabaseConfiguration;
use flowy_user_deps::cloud::UserService;
use crate::setup_log;
pub fn get_supabase_config() -> Option<SupabaseConfiguration> {
dotenv::from_filename("./.env.test").ok()?;
setup_log();
SupabaseConfiguration::from_env().ok()
}
pub fn collab_service() -> Arc<dyn RemoteCollabStorage> {
let config = SupabaseConfiguration::from_env().unwrap();
let server = Arc::new(RESTfulPostgresServer::new(config));
Arc::new(RESTfulSupabaseCollabStorageImpl::new(
SupabaseServerServiceImpl::new(server),
))
}
pub fn database_service() -> Arc<dyn DatabaseCloudService> {
let config = SupabaseConfiguration::from_env().unwrap();
let server = Arc::new(RESTfulPostgresServer::new(config));
Arc::new(RESTfulSupabaseDatabaseServiceImpl::new(
SupabaseServerServiceImpl::new(server),
))
}
pub fn user_auth_service() -> Arc<dyn UserService> {
let config = SupabaseConfiguration::from_env().unwrap();
let server = Arc::new(RESTfulPostgresServer::new(config));
Arc::new(RESTfulSupabaseUserAuthServiceImpl::new(
SupabaseServerServiceImpl::new(server),
))
}
pub fn folder_service() -> Arc<dyn FolderCloudService> {
let config = SupabaseConfiguration::from_env().unwrap();
let server = Arc::new(RESTfulPostgresServer::new(config));
Arc::new(RESTfulSupabaseFolderServiceImpl::new(
SupabaseServerServiceImpl::new(server),
))
}
pub fn sign_up_param(uuid: String) -> HashMap<String, String> {
let mut params = HashMap::new();
params.insert(USER_UUID.to_string(), uuid);
params.insert(
USER_EMAIL.to_string(),
format!("{}@test.com", Uuid::new_v4()),
);
params
}