mirror of
https://github.com/AppFlowy-IO/AppFlowy.git
synced 2024-08-30 18:12:39 +00:00
Merge pull request #310 from AppFlowy-IO/remove_backend
feat: remove backend
This commit is contained in:
commit
248473dddf
@ -1,5 +1,4 @@
|
||||
frontend/app_flowy/
|
||||
frontend/scripts/
|
||||
frontend/rust-lib/target
|
||||
backend/target/
|
||||
shared-lib/target/
|
156
.github/workflows/backend_general.yml
vendored
156
.github/workflows/backend_general.yml
vendored
@ -1,156 +0,0 @@
|
||||
name: Backend
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Test
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:12
|
||||
env:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: password
|
||||
POSTGRES_DB: postgres
|
||||
ports:
|
||||
- 5433:5432
|
||||
env:
|
||||
SQLX_VERSION: 0.5.7
|
||||
SQLX_FEATURES: postgres
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Cache dependencies
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: Install stable toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
- name: Cache sqlx-cli
|
||||
uses: actions/cache@v2
|
||||
id: cache-sqlx
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/sqlx
|
||||
~/.cargo/bin/cargo-sqlx
|
||||
key: ${{ runner.os }}-sqlx-${{ env.SQLX_VERSION }}-${{ env.SQLX_FEATURES }}
|
||||
|
||||
- name: Install sqlx-cli
|
||||
uses: actions-rs/cargo@v1
|
||||
if: steps.cache-sqlx.outputs.cache-hit == false
|
||||
with:
|
||||
command: install
|
||||
args: >
|
||||
sqlx-cli
|
||||
--force
|
||||
--version=${{ env.SQLX_VERSION }}
|
||||
--features=${{ env.SQLX_FEATURES }}
|
||||
--no-default-features
|
||||
--locked
|
||||
|
||||
- name: Migrate database
|
||||
working-directory: backend/
|
||||
run: |
|
||||
sudo apt-get install libpq-dev -y
|
||||
SKIP_DOCKER=true POSTGRES_PORT=5433 ./scripts/init_database.sh
|
||||
|
||||
- name: Check sqlx-data.json is up-to-date
|
||||
working-directory: backend/
|
||||
run: |
|
||||
cargo sqlx prepare --check -- --bin backend
|
||||
|
||||
- name: Run cargo test
|
||||
working-directory: backend/
|
||||
run: cargo test
|
||||
|
||||
fmt:
|
||||
name: Rustfmt
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- run: rustup component add rustfmt
|
||||
working-directory: backend/
|
||||
- run: cargo fmt --all -- --check
|
||||
working-directory: backend/
|
||||
|
||||
|
||||
clippy:
|
||||
name: Clippy
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:12
|
||||
env:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: password
|
||||
POSTGRES_DB: postgres
|
||||
ports:
|
||||
- 5433:5432
|
||||
env:
|
||||
SQLX_VERSION: 0.5.7
|
||||
SQLX_FEATURES: postgres
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Install stable toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
components: clippy
|
||||
override: true
|
||||
|
||||
- name: Cache sqlx-cli
|
||||
uses: actions/cache@v2
|
||||
id: cache-sqlx
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/sqlx
|
||||
key: ${{ runner.os }}-sqlx-${{ env.SQLX_VERSION }}-${{ env.SQLX_FEATURES }}
|
||||
|
||||
- name: Install sqlx-cli
|
||||
uses: actions-rs/cargo@v1
|
||||
if: steps.cache-sqlx.outputs.cache-hit == false
|
||||
with:
|
||||
command: install
|
||||
args: >
|
||||
sqlx-cli
|
||||
--force
|
||||
--version=${{ env.SQLX_VERSION }}
|
||||
--features=${{ env.SQLX_FEATURES }}
|
||||
--no-default-features
|
||||
--locked
|
||||
|
||||
- name: Migrate database
|
||||
working-directory: backend/
|
||||
run: |
|
||||
sudo apt-get install libpq-dev -y
|
||||
SKIP_DOCKER=true POSTGRES_PORT=5433 ./scripts/init_database.sh
|
||||
|
||||
- run: rustup component add clippy
|
||||
working-directory: backend/
|
||||
- run: cargo clippy -- -D warnings
|
||||
working-directory: backend/
|
||||
|
||||
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -4,9 +4,6 @@
|
||||
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
|
||||
# backend
|
||||
/target/
|
||||
./backend/.env
|
||||
./backend/configuration/base.yaml
|
||||
Cargo.lock
|
||||
|
||||
# These are backup files generated by rustfmt
|
||||
**/*.rs.bk
|
||||
|
@ -1,9 +0,0 @@
|
||||
.env
|
||||
.dockerignore
|
||||
spec.yaml
|
||||
target/
|
||||
deploy/
|
||||
tests/
|
||||
Dockerfile
|
||||
scripts/
|
||||
migrations/
|
@ -1 +0,0 @@
|
||||
DATABASE_URL="postgres://postgres:password@localhost:5433/flowy"
|
4131
backend/Cargo.lock
generated
4131
backend/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -1,109 +0,0 @@
|
||||
[package]
|
||||
name = "backend"
|
||||
version = "0.1.0"
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
actix = "0.12"
|
||||
#actix-web = "3"
|
||||
#actix-http = "2.2.1"
|
||||
#actix-web-actors = "3"
|
||||
actix-codec = "0.4"
|
||||
actix-web = "4.0.0-beta.11"
|
||||
actix-http = "3.0.0-beta.12"
|
||||
actix-rt = "2"
|
||||
actix-web-actors = { version = "4.0.0-beta.7" }
|
||||
actix-service = "2.0.1"
|
||||
actix-identity = "0.4.0-beta.3"
|
||||
actix-cors = "0.6.0-beta.3"
|
||||
|
||||
futures = "0.3.15"
|
||||
bytes = "1"
|
||||
toml = "0.5.8"
|
||||
dashmap = "4.0"
|
||||
log = "0.4.14"
|
||||
async-trait = "0.1.52"
|
||||
|
||||
# tracing
|
||||
tracing = { version = "0.1", features = ["log"] }
|
||||
tracing-futures = "0.2.4"
|
||||
tracing-subscriber = { version = "0.2.12", features = ["registry", "env-filter", "ansi", "json"] }
|
||||
tracing-bunyan-formatter = "0.2.2"
|
||||
tracing-appender = "0.1"
|
||||
tracing-core = "0.1"
|
||||
tracing-log = { version = "0.1.1"}
|
||||
|
||||
|
||||
# serde
|
||||
serde_json = "1.0"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_repr = "0.1"
|
||||
serde-aux = "1.0.1"
|
||||
|
||||
derive_more = {version = "0.99"}
|
||||
protobuf = {version = "2.20.0"}
|
||||
uuid = { version = "0.8", features = ["serde", "v4"] }
|
||||
config = { version = "0.10.1", default-features = false, features = ["yaml"] }
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
anyhow = "1.0.40"
|
||||
thiserror = "1.0.24"
|
||||
bcrypt = "0.10"
|
||||
jsonwebtoken = "7.2"
|
||||
sql-builder = "3.1.1"
|
||||
lazy_static = "1.4"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
parking_lot = "0.11"
|
||||
md5 = "0.7.0"
|
||||
futures-core = { version = "0.3", default-features = false }
|
||||
pin-project = "1.0.0"
|
||||
byteorder = {version = "1.3.4"}
|
||||
async-stream = "0.3.2"
|
||||
|
||||
flowy-user-data-model = { path = "../shared-lib/flowy-user-data-model" }
|
||||
flowy-folder-data-model = { path = "../shared-lib/flowy-folder-data-model" }
|
||||
flowy-collaboration = { path = "../shared-lib/flowy-collaboration" }
|
||||
lib-ws = { path = "../shared-lib/lib-ws" }
|
||||
lib-ot = { path = "../shared-lib/lib-ot" }
|
||||
lib-infra = { path = "../shared-lib/lib-infra" }
|
||||
backend-service = { path = "../shared-lib/backend-service", features = ["http_server"] }
|
||||
|
||||
ormx = { version = "0.7", features = ["postgres"]}
|
||||
[dependencies.sqlx]
|
||||
version = "0.5.7"
|
||||
default-features = false
|
||||
features = [
|
||||
"runtime-actix-rustls",
|
||||
"macros",
|
||||
"postgres",
|
||||
"uuid",
|
||||
"chrono",
|
||||
"migrate",
|
||||
"offline",
|
||||
]
|
||||
|
||||
|
||||
[lib]
|
||||
path = "src/lib.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "backend"
|
||||
path = "src/main.rs"
|
||||
|
||||
[features]
|
||||
flowy_test = []
|
||||
ignore_auth = []
|
||||
|
||||
[dev-dependencies]
|
||||
parking_lot = "0.11"
|
||||
once_cell = "1.7.2"
|
||||
linkify = "0.5.0"
|
||||
futures-util = "0.3.15"
|
||||
backend = { path = ".", features = ["flowy_test"]}
|
||||
flowy-sdk = { path = "../frontend/rust-lib/flowy-sdk", features = ["http_server"] }
|
||||
flowy-user = { path = "../frontend/rust-lib/flowy-user", features = ["http_server"] }
|
||||
flowy-document = { path = "../frontend/rust-lib/flowy-document", features = ["flowy_unit_test", "http_server"] }
|
||||
flowy-test = { path = "../frontend/rust-lib/flowy-test" }
|
||||
flowy-net = { path = "../frontend/rust-lib/flowy-net", features = ["http_server"] }
|
||||
|
@ -1,23 +0,0 @@
|
||||
FROM rust:1.56.1 as builder
|
||||
WORKDIR /app
|
||||
|
||||
COPY . .
|
||||
WORKDIR /app/backend
|
||||
ENV SQLX_OFFLINE true
|
||||
RUN RUSTFLAGS="-C opt-level=2" cargo build --release --bin backend
|
||||
# Size optimization
|
||||
#RUN strip ./target/release/backend
|
||||
|
||||
FROM debian:bullseye-slim AS runtime
|
||||
WORKDIR /app
|
||||
RUN apt-get update -y \
|
||||
&& apt-get install -y --no-install-recommends openssl \
|
||||
# Clean up
|
||||
&& apt-get autoremove -y \
|
||||
&& apt-get clean -y \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=builder /app/backend/target/release/backend /usr/local/bin/backend
|
||||
COPY --from=builder /app/backend/configuration configuration
|
||||
ENV APP_ENVIRONMENT production
|
||||
CMD ["backend"]
|
@ -1,23 +0,0 @@
|
||||
ROOT = "./scripts"
|
||||
SEMVER_VERSION=$(shell grep version Cargo.toml | awk -F"\"" '{print $$2}' | head -n 1)
|
||||
|
||||
.PHONY: init_database run_docker run_test
|
||||
|
||||
init_database:
|
||||
POSTGRES_PORT=5433 ${ROOT}/init_database.sh
|
||||
|
||||
docker_image:
|
||||
source $(ROOT)/docker_env.sh && docker-compose up -d db
|
||||
source $(ROOT)/docker_env.sh && docker-compose up -d backend
|
||||
|
||||
local_server:
|
||||
cargo run
|
||||
|
||||
docker_test:
|
||||
sh $(ROOT)/docker_test.sh
|
||||
|
||||
local_test:
|
||||
# 🔥 Must run init_database first
|
||||
SQLX_OFFLINE=true cargo test
|
||||
|
||||
|
@ -1,9 +0,0 @@
|
||||
application:
|
||||
port: 8000
|
||||
host: 0.0.0.0
|
||||
database:
|
||||
host: "localhost"
|
||||
port: 5433
|
||||
username: "postgres"
|
||||
password: "password"
|
||||
database_name: "flowy"
|
@ -1,5 +0,0 @@
|
||||
application:
|
||||
host: 127.0.0.1
|
||||
base_url: "http://127.0.0.1"
|
||||
database:
|
||||
require_ssl: false
|
@ -1,6 +0,0 @@
|
||||
application:
|
||||
host: 0.0.0.0
|
||||
database:
|
||||
host: "db"
|
||||
port: 5432
|
||||
require_ssl: false
|
@ -1,70 +0,0 @@
|
||||
|
||||
|
||||
|
||||
### Docker
|
||||
|
||||
1. follow the [instructions](https://docs.docker.com/desktop/mac/install/) to install docker.
|
||||
2. open terminal and run: `docker pull postgres`
|
||||
3run `make init_database`. It will create the database scheme on remote specified by DATABASE_URL. You can connect you database using
|
||||
pgAdmin.
|
||||
|
||||
![img_2.png](img_2.png)
|
||||
|
||||
The information you enter must be the same as the `make init_postgres`. e.g.
|
||||
```
|
||||
export DB_USER=postgres
|
||||
export DB_PASSWORD=password
|
||||
export DB_NAME=flowy
|
||||
export DB_PORT=5432
|
||||
```
|
||||
|
||||
![img_1.png](img_1.png)
|
||||
|
||||
[Docker command](https://docs.docker.com/engine/reference/commandline/builder_prune/)
|
||||
|
||||
### Run
|
||||
By default, Docker images do not expose their ports to the underlying host machine. We need to do it explicitly using the -p flag.
|
||||
`docker run -p 8000:8000 backend`
|
||||
|
||||
|
||||
### Sqlx
|
||||
|
||||
**sqlx-cli**
|
||||
* [install sqlx-cli](https://github.com/launchbadge/sqlx/tree/master/sqlx-cli)
|
||||
|
||||
**Sqlx and Diesel commands**
|
||||
* create migration
|
||||
* sqlx: sqlx migrate add $(table)
|
||||
* diesel: diesel migration generation $(table)
|
||||
|
||||
* run migration
|
||||
* sqlx: sqlx migrate run
|
||||
* diesel: diesel migration run
|
||||
|
||||
* reset database
|
||||
* sqlx: sqlx database reset
|
||||
* diesel: diesel database reset
|
||||
|
||||
**offline mode**
|
||||
|
||||
`cargo sqlx prepare -- --bin backend`
|
||||
|
||||
**Type mapping**
|
||||
* [postgres type map](https://docs.rs/sqlx/0.5.7/sqlx/postgres/types/index.html)
|
||||
* [postgres and diesel type map](https://kotiri.com/2018/01/31/postgresql-diesel-rust-types.html)
|
||||
|
||||
|
||||
## Q&A
|
||||
1. Receive` { code: 24, kind: Other, message: "Too many open files" } on arbiter` after running cargo test on backend.
|
||||
> This is due to a limit enforced by the operating system on the maximum number of open file descriptors (including sockets) for each process.
|
||||
> Raising the file descriptor limit using `ulimit -n 2048` to solve this issue. It won't stay after reboot so check on google how to persist
|
||||
> that value if you want to.
|
||||
>
|
||||
> or you can try:
|
||||
> `launchctl limit maxfiles 2048 2048`
|
||||
> `launchctl limit maxfiles`
|
||||
>
|
||||
> Don't forget to relaunch your terminal.
|
||||
|
||||
## More
|
||||
* [11-database-drivers](https://blog.logrocket.com/11-database-drivers-and-orms-for-rust-that-are-ready-for-production/)
|
@ -1,200 +0,0 @@
|
||||
|
||||
# Table Struct
|
||||
|
||||
## Table: user_table
|
||||
|
||||
- `Name`: UserTable
|
||||
- `Comment`: UserTable
|
||||
|
||||
### `Primary Key`
|
||||
|
||||
- `Columns`: id
|
||||
|
||||
### `Indexes[]`
|
||||
|
||||
| `Columns` | `Unique` |
|
||||
| --------- | -------- |
|
||||
| email | `true` |
|
||||
|
||||
### `Foreign Keys[]`
|
||||
|
||||
| `Columns` | `Ref Table` | `Ref Columns` | `Options` |
|
||||
| --------- | ----------- | ------------- | --------- |
|
||||
|
||||
|
||||
### `Columns[]`
|
||||
|
||||
| `Label` | `Name` | `Type` | `Nullable` | `Default` | `Comment` |
|
||||
| ----------- | ----------- | ----------- | ---------- | --------- | --------- |
|
||||
| id | id | uuid | `false` | | |
|
||||
| email | email | text | `false` | | |
|
||||
| name | name | text | `false` | | |
|
||||
| password | password | text | `false` | | |
|
||||
| create_time | create_time | timestamptz | `false` | | |
|
||||
|
||||
|
||||
## Table: workspace_table
|
||||
|
||||
- `Name`: WorkspaceTable
|
||||
- `Comment`: WorkspaceTable
|
||||
|
||||
### `Primary Key`
|
||||
|
||||
- `Columns`: id
|
||||
|
||||
### `Indexes[]`
|
||||
|
||||
| `Columns` | `Unique` |
|
||||
| --------- | -------- |
|
||||
|
||||
### `Foreign Keys[]`
|
||||
|
||||
| `Columns` | `Ref Table` | `Ref Columns` | `Options` |
|
||||
| --------- | ----------- | ------------- | --------- |
|
||||
| user_id | user_table | id | |
|
||||
|
||||
### `Columns[]`
|
||||
|
||||
| `Label` | `Name` | `Type` | `Nullable` | `Default` | `Comment` |
|
||||
| ------------- | ------------- | ----------- | ---------- | --------- | --------- |
|
||||
| id | id | uuid | `false` | | |
|
||||
| user_id | user_id | text | `false` | | |
|
||||
| name | name | text | `false` | | |
|
||||
| description | description | text | `false` | | |
|
||||
| create_time | create_time | timestamptz | `false` | | |
|
||||
| modified_time | modified_time | timestamptz | `false` | | |
|
||||
|
||||
|
||||
## Table: app_table
|
||||
|
||||
- `Name`: AppTable
|
||||
- `Comment`: AppTable
|
||||
|
||||
### `Primary Key`
|
||||
|
||||
- `Columns`: id
|
||||
|
||||
### `Indexes[]`
|
||||
|
||||
| `Columns` | `Unique` |
|
||||
| --------- | -------- |
|
||||
|
||||
### `Foreign Keys[]`
|
||||
|
||||
| `Columns` | `Ref Table` | `Ref Columns` | `Options` |
|
||||
| ------------ | --------------- | ------------- | --------- |
|
||||
| user_id | user_table | id | |
|
||||
| workspace_id | workspace_table | id | |
|
||||
| last_view_id | view_table | id | |
|
||||
|
||||
### `Columns[]`
|
||||
|
||||
| `Label` | `Name` | `Type` | `Nullable` | `Default` | `Comment` |
|
||||
| ------------- | ------------- | ----------- | ---------- | --------- | --------- |
|
||||
| id | id | uuid | `false` | | |
|
||||
| user_id | user_id | text | `false` | | |
|
||||
| workspace_id | workspace_id | text | `false` | | |
|
||||
| last_view_id | workspace_id | text | `false` | | |
|
||||
| name | name | text | `false` | | |
|
||||
| description | description | text | `false` | | |
|
||||
| color_style | color_style | text | `false` | | |
|
||||
| is_trash | is_trash | bool | `false` | `false` | |
|
||||
| create_time | create_time | timestamptz | `false` | | |
|
||||
| modified_time | modified_time | timestamptz | `false` | | |
|
||||
|
||||
|
||||
## Table: view_table
|
||||
|
||||
- `Name`: ViewTable
|
||||
- `Comment`: ViewTable
|
||||
|
||||
### `Primary Key`
|
||||
|
||||
- `Columns`: id
|
||||
|
||||
### `Indexes[]`
|
||||
|
||||
| `Columns` | `Unique` |
|
||||
| --------- | -------- |
|
||||
|
||||
### `Foreign Keys[]`
|
||||
|
||||
| `Columns` | `Ref Table` | `Ref Columns` | `Options` |
|
||||
| ------------ | ----------- | ------------- | --------- |
|
||||
| user_id | user_table | id | |
|
||||
| belong_to_id | app_table | id | |
|
||||
|
||||
### `Columns[]`
|
||||
|
||||
| `Label` | `Name` | `Type` | `Nullable` | `Default` | `Comment` |
|
||||
| ------------- | ------------- | ----------- | ---------- | --------- | --------- |
|
||||
| id | id | uuid | `false` | | |
|
||||
| belong_to_id | belong_to_id | text | `false` | | |
|
||||
| name | name | text | `false` | | |
|
||||
| description | description | text | `false` | | |
|
||||
| thumbnail | thumbnail | text | `false` | | |
|
||||
| view_type | view_type | int | `false` | | |
|
||||
| create_time | create_time | timestamptz | `false` | | |
|
||||
| modified_time | modified_time | timestamptz | `false` | | |
|
||||
|
||||
|
||||
## Table: doc_table
|
||||
|
||||
- `Name`: DocTable
|
||||
- `Comment`: DocTable
|
||||
|
||||
### `Primary Key`
|
||||
|
||||
- `Columns`: id
|
||||
|
||||
### `Indexes[]`
|
||||
|
||||
| `Columns` | `Unique` |
|
||||
| --------- | -------- |
|
||||
|
||||
### `Foreign Keys[]`
|
||||
|
||||
| `Columns` | `Ref Table` | `Ref Columns` | `Options` |
|
||||
| --------- | ----------- | ------------- | --------- |
|
||||
| rev_id | doc_table | id | |
|
||||
|
||||
|
||||
|
||||
### `Columns[]`
|
||||
|
||||
| `Label` | `Name` | `Type` | `Nullable` | `Default` | `Comment` |
|
||||
| ------- | ------ | ------ | ---------- | --------- | --------- |
|
||||
| id | id | uuid | `false` | | |
|
||||
| rev_id | rev_id | text | `false` | | |
|
||||
| data | data | text | `false` | | |
|
||||
|
||||
|
||||
## Table: trash_table
|
||||
|
||||
- `Name`: TrashTable
|
||||
- `Comment`: TrashTable
|
||||
|
||||
### `Primary Key`
|
||||
|
||||
- `Columns`: id
|
||||
|
||||
### `Indexes[]`
|
||||
|
||||
| `Columns` | `Unique` |
|
||||
| --------- | -------- |
|
||||
|
||||
### `Foreign Keys[]`
|
||||
|
||||
| `Columns` | `Ref Table` | `Ref Columns` | `Options` |
|
||||
| --------- | ----------- | ------------- | --------- |
|
||||
| user_id | user_table | id | |
|
||||
|
||||
|
||||
### `Columns[]`
|
||||
|
||||
| `Label` | `Name` | `Type` | `Nullable` | `Default` | `Comment` |
|
||||
| ------- | ------- | ------ | ---------- | --------- | --------- |
|
||||
| id | id | uuid | `false` | | |
|
||||
| user_id | user_id | text | `false` | | |
|
||||
| ty | ty | int4 | `false` | 0 | |
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 214 KiB |
Binary file not shown.
Before Width: | Height: | Size: 48 KiB |
@ -1,23 +0,0 @@
|
||||
version: '3'
|
||||
services:
|
||||
db:
|
||||
image: 'postgres:9.6-alpine'
|
||||
environment:
|
||||
- POSTGRES_USER=${POSTGRES_USER}
|
||||
- POSTGRES_DB=${POSTGRES_DB}
|
||||
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
|
||||
ports:
|
||||
- "5434:5432"
|
||||
backend:
|
||||
restart: on-failure
|
||||
environment:
|
||||
- APP_ENVIRONMENT=production
|
||||
- DATABASE_URL="postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db/${POSTGRES_DB}"
|
||||
build:
|
||||
context: ../
|
||||
dockerfile: ./backend/Dockerfile
|
||||
image: flowy_backend:${BACKEND_VERSION}
|
||||
depends_on:
|
||||
- db
|
||||
ports:
|
||||
- 8000:8000
|
@ -1,9 +0,0 @@
|
||||
-- Add migration script here
|
||||
CREATE TABLE IF NOT EXISTS user_table(
|
||||
id uuid NOT NULL,
|
||||
PRIMARY KEY (id),
|
||||
email TEXT NOT NULL UNIQUE,
|
||||
name TEXT NOT NULL,
|
||||
create_time timestamptz NOT NULL,
|
||||
password TEXT NOT NULL
|
||||
);
|
@ -1,10 +0,0 @@
|
||||
-- Add migration script here
|
||||
CREATE TABLE IF NOT EXISTS workspace_table(
|
||||
id uuid NOT NULL,
|
||||
PRIMARY KEY (id),
|
||||
name TEXT NOT NULL,
|
||||
description TEXT NOT NULL,
|
||||
modified_time timestamptz NOT NULL,
|
||||
create_time timestamptz NOT NULL,
|
||||
user_id TEXT NOT NULL
|
||||
);
|
@ -1,14 +0,0 @@
|
||||
-- Add migration script here
|
||||
CREATE TABLE IF NOT EXISTS app_table(
|
||||
id uuid NOT NULL,
|
||||
PRIMARY KEY (id),
|
||||
workspace_id TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
description TEXT NOT NULL,
|
||||
color_style BYTEA NOT NULL,
|
||||
last_view_id TEXT DEFAULT '',
|
||||
modified_time timestamptz NOT NULL,
|
||||
create_time timestamptz NOT NULL,
|
||||
user_id TEXT NOT NULL,
|
||||
is_trash BOOL NOT NULL DEFAULT false
|
||||
);
|
@ -1,12 +0,0 @@
|
||||
-- Add migration script here
|
||||
CREATE TABLE IF NOT EXISTS view_table(
|
||||
id uuid NOT NULL,
|
||||
PRIMARY KEY (id),
|
||||
belong_to_id TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
description TEXT NOT NULL,
|
||||
modified_time timestamptz NOT NULL,
|
||||
create_time timestamptz NOT NULL,
|
||||
thumbnail TEXT NOT NULL,
|
||||
view_type INTEGER NOT NULL
|
||||
);
|
@ -1,6 +0,0 @@
|
||||
-- Add migration script here
|
||||
CREATE TABLE IF NOT EXISTS doc_table(
|
||||
id uuid NOT NULL,
|
||||
PRIMARY KEY (id),
|
||||
rev_id bigint NOT NULL DEFAULT 0
|
||||
);
|
@ -1,7 +0,0 @@
|
||||
-- Add migration script here
|
||||
CREATE TABLE IF NOT EXISTS trash_table(
|
||||
id uuid NOT NULL,
|
||||
PRIMARY KEY (id),
|
||||
user_id TEXT NOT NULL,
|
||||
ty INTEGER NOT NULL DEFAULT 0
|
||||
);
|
@ -1,6 +0,0 @@
|
||||
-- Add migration script here
|
||||
CREATE TABLE IF NOT EXISTS kv_table(
|
||||
id TEXT NOT NULL,
|
||||
PRIMARY KEY (id),
|
||||
blob bytea
|
||||
);
|
@ -1,2 +0,0 @@
|
||||
[toolchain]
|
||||
channel = "stable-2022-01-20"
|
@ -1,18 +0,0 @@
|
||||
# https://rust-lang.github.io/rustfmt/?version=master&search=
|
||||
max_width = 120
|
||||
tab_spaces = 4
|
||||
# fn_single_line = true
|
||||
# match_block_trailing_comma = true
|
||||
# normalize_comments = true
|
||||
# wrap_comments = true
|
||||
# use_field_init_shorthand = true
|
||||
# use_try_shorthand = true
|
||||
# normalize_doc_attributes = true
|
||||
# report_todo = "Never"
|
||||
# report_fixme = "Always"
|
||||
# imports_layout = "HorizontalVertical"
|
||||
# imports_granularity = "Crate"
|
||||
# reorder_modules = true
|
||||
# reorder_imports = true
|
||||
# enum_discrim_align_threshold = 20
|
||||
edition = "2018"
|
@ -1,10 +0,0 @@
|
||||
#!/bin/bash
|
||||
export BACKEND_VERSION="v0.0.1"
|
||||
|
||||
export POSTGRES_USER=postgres
|
||||
export POSTGRES_PASSWORD=password
|
||||
export POSTGRES_PORT=5432
|
||||
export POSTGRES_HOST=db
|
||||
export POSTGRES_DB=flowy
|
||||
|
||||
export DATABASE_URL=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
@ -1,2 +0,0 @@
|
||||
#!/bin/bash
|
||||
curl -i --request Get --url http://0.0.0.0:8000/api/user --header 'content-type: application/json' --data '{"token":"123"}'
|
@ -1,61 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -x
|
||||
set -eo pipefail
|
||||
|
||||
if ! [ -x "$(command -v psql)" ]; then
|
||||
echo >&2 "Error: `psql` is not installed."
|
||||
echo >&2 "install using brew: brew install libpq."
|
||||
echo >&2 "link to /usr/local/bin: brew link --force libpq ail"
|
||||
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! [ -x "$(command -v sqlx)" ]; then
|
||||
echo >&2 "Error: `sqlx` is not installed."
|
||||
echo >&2 "Use:"
|
||||
echo >&2 " cargo install --version=^0.5.7 sqlx-cli --no-default-features --features postgres"
|
||||
echo >&2 "to install it."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DB_USER="${POSTGRES_USER:=postgres}"
|
||||
DB_PASSWORD="${POSTGRES_PASSWORD:=password}"
|
||||
DB_PORT="${POSTGRES_PORT:=5432}"
|
||||
DB_HOST="${POSTGRES_HOST:=localhost}"
|
||||
DB_NAME="${POSTGRES_DB:=flowy}"
|
||||
|
||||
if [[ -z "${SKIP_DOCKER}" ]]
|
||||
then
|
||||
RUNNING_POSTGRES_CONTAINER=$(docker ps --filter 'name=postgres' --format '{{.ID}}')
|
||||
if [[ -n $RUNNING_POSTGRES_CONTAINER ]]; then
|
||||
echo >&2 "there is a postgres container already running, kill it with"
|
||||
echo >&2 " docker kill ${RUNNING_POSTGRES_CONTAINER}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker run \
|
||||
-e POSTGRES_USER=${DB_USER} \
|
||||
-e POSTGRES_PASSWORD=${DB_PASSWORD} \
|
||||
-e POSTGRES_DB="${DB_NAME}" \
|
||||
-p "${DB_PORT}":5432 \
|
||||
-d \
|
||||
--name "flowy_postgres_$(date '+%s')" \
|
||||
postgres -N 1000
|
||||
fi
|
||||
|
||||
|
||||
# Keep pinging Postgres until it's ready to accept commands
|
||||
until PGPASSWORD="${DB_PASSWORD}" psql -h "${DB_HOST}" -U "${DB_USER}" -p "${DB_PORT}" -d "postgres" -c '\q'; do
|
||||
|
||||
>&2 echo "Postgres is still unavailable - sleeping"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
>&2 echo "Postgres is up and running on port ${DB_PORT} - running migrations now!"
|
||||
|
||||
export DATABASE_URL=postgres://${DB_USER}:${DB_PASSWORD}@localhost:${DB_PORT}/${DB_NAME}
|
||||
sqlx database create
|
||||
sqlx migrate run
|
||||
|
||||
>&2 echo "Postgres has been migrated, ready to go!"
|
||||
|
@ -1,19 +0,0 @@
|
||||
{
|
||||
"db": "PostgreSQL",
|
||||
"e8c487b4314c267f6da2667b95f6c8003fabc2461c10df2d6d39d081e74e167f": {
|
||||
"query": "\n INSERT INTO user_table (id, email, name, create_time, password)\n VALUES ($1, $2, $3, $4, $5)\n ",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Left": [
|
||||
"Uuid",
|
||||
"Text",
|
||||
"Text",
|
||||
"Timestamptz",
|
||||
"Text"
|
||||
]
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
}
|
||||
}
|
@ -1,166 +0,0 @@
|
||||
use actix::Actor;
|
||||
use actix_identity::{CookieIdentityPolicy, IdentityService};
|
||||
use actix_web::{dev::Server, middleware, web, web::Data, App, HttpServer, Scope};
|
||||
use sqlx::{postgres::PgPoolOptions, PgPool};
|
||||
use std::{net::TcpListener, time::Duration};
|
||||
use tokio::time::interval;
|
||||
|
||||
use crate::{
|
||||
config::{
|
||||
env::{domain, secret, use_https},
|
||||
DatabaseSettings, Settings,
|
||||
},
|
||||
context::AppContext,
|
||||
services::{
|
||||
document::router as doc,
|
||||
folder::{app::router as app, trash::router as trash, view::router as view, workspace::router as workspace},
|
||||
user::router as user,
|
||||
web_socket::WSServer,
|
||||
},
|
||||
};
|
||||
|
||||
pub struct Application {
|
||||
port: u16,
|
||||
server: Server,
|
||||
}
|
||||
|
||||
impl Application {
|
||||
pub async fn build(configuration: Settings, app_ctx: AppContext) -> Result<Self, std::io::Error> {
|
||||
let address = format!("{}:{}", configuration.application.host, configuration.application.port);
|
||||
let listener = TcpListener::bind(&address)?;
|
||||
let port = listener.local_addr().unwrap().port();
|
||||
let server = run(listener, app_ctx)?;
|
||||
Ok(Self { port, server })
|
||||
}
|
||||
|
||||
pub async fn run_until_stopped(self) -> Result<(), std::io::Error> {
|
||||
self.server.await
|
||||
}
|
||||
|
||||
pub fn port(&self) -> u16 {
|
||||
self.port
|
||||
}
|
||||
}
|
||||
|
||||
pub fn run(listener: TcpListener, app_ctx: AppContext) -> Result<Server, std::io::Error> {
|
||||
let domain = domain();
|
||||
let secret: String = secret();
|
||||
actix_rt::spawn(period_check(app_ctx.persistence.pg_pool()));
|
||||
|
||||
let server = HttpServer::new(move || {
|
||||
App::new()
|
||||
.wrap(middleware::Logger::default())
|
||||
.wrap(identify_service(&domain, &secret))
|
||||
.wrap(crate::middleware::default_cors())
|
||||
.wrap(crate::middleware::AuthenticationService)
|
||||
.app_data(web::JsonConfig::default().limit(4096))
|
||||
.service(ws_scope())
|
||||
.service(user_scope())
|
||||
.app_data(app_ctx.ws_server.clone())
|
||||
.app_data(app_ctx.persistence.clone())
|
||||
.app_data(Data::new(app_ctx.persistence.pg_pool()))
|
||||
.app_data(app_ctx.ws_receivers.clone())
|
||||
.app_data(app_ctx.document_manager.clone())
|
||||
})
|
||||
.listen(listener)?
|
||||
.run();
|
||||
Ok(server)
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
async fn period_check(_pool: PgPool) {
|
||||
let mut i = interval(Duration::from_secs(60));
|
||||
loop {
|
||||
i.tick().await;
|
||||
}
|
||||
}
|
||||
|
||||
fn ws_scope() -> Scope {
|
||||
web::scope("/ws").service(crate::services::web_socket::router::establish_ws_connection)
|
||||
}
|
||||
|
||||
fn user_scope() -> Scope {
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP
|
||||
// TODO: replace GET body with query params
|
||||
web::scope("/api")
|
||||
// authentication
|
||||
.service(
|
||||
web::resource("/auth")
|
||||
.route(web::post().to(user::sign_in_handler))
|
||||
.route(web::delete().to(user::sign_out_handler)),
|
||||
)
|
||||
.service(
|
||||
web::resource("/user")
|
||||
.route(web::patch().to(user::set_user_profile_handler))
|
||||
.route(web::get().to(user::get_user_profile_handler)),
|
||||
)
|
||||
.service(web::resource("/register").route(web::post().to(user::register_handler)))
|
||||
.service(
|
||||
web::resource("/workspace")
|
||||
.route(web::post().to(workspace::create_handler))
|
||||
.route(web::delete().to(workspace::delete_handler))
|
||||
.route(web::get().to(workspace::read_handler))
|
||||
.route(web::patch().to(workspace::update_handler)),
|
||||
)
|
||||
.service(web::resource("/workspace_list/{user_id}").route(web::get().to(workspace::workspace_list)))
|
||||
.service(
|
||||
web::resource("/app")
|
||||
.route(web::post().to(app::create_handler))
|
||||
.route(web::get().to(app::read_handler))
|
||||
.route(web::delete().to(app::delete_handler))
|
||||
.route(web::patch().to(app::update_handler)),
|
||||
)
|
||||
.service(
|
||||
web::resource("/view")
|
||||
.route(web::post().to(view::create_handler))
|
||||
.route(web::delete().to(view::delete_handler))
|
||||
.route(web::get().to(view::read_handler))
|
||||
.route(web::patch().to(view::update_handler)),
|
||||
)
|
||||
.service(
|
||||
web::resource("/doc")
|
||||
.route(web::post().to(doc::create_document_handler))
|
||||
.route(web::get().to(doc::read_document_handler))
|
||||
.route(web::patch().to(doc::reset_document_handler)),
|
||||
)
|
||||
.service(
|
||||
web::resource("/trash")
|
||||
.route(web::post().to(trash::create_handler))
|
||||
.route(web::delete().to(trash::delete_handler))
|
||||
.route(web::get().to(trash::read_handler)),
|
||||
)
|
||||
.service(web::resource("/sync").route(web::post().to(trash::create_handler)))
|
||||
// password
|
||||
.service(web::resource("/password_change").route(web::post().to(user::change_password)))
|
||||
}
|
||||
|
||||
pub async fn init_app_context(configuration: &Settings) -> AppContext {
|
||||
let level = std::env::var("RUST_LOG").unwrap_or_else(|_| "info".to_owned());
|
||||
let _ = crate::services::log::Builder::new("flowy-server")
|
||||
.env_filter(&level)
|
||||
.build();
|
||||
let pg_pool = get_connection_pool(&configuration.database)
|
||||
.await
|
||||
.unwrap_or_else(|_| panic!("Failed to connect to Postgres at {:?}.", configuration.database));
|
||||
|
||||
let ws_server = WSServer::new().start();
|
||||
AppContext::new(ws_server, pg_pool)
|
||||
}
|
||||
|
||||
pub fn identify_service(domain: &str, secret: &str) -> IdentityService<CookieIdentityPolicy> {
|
||||
IdentityService::new(
|
||||
CookieIdentityPolicy::new(secret.as_bytes())
|
||||
.name("auth")
|
||||
.path("/")
|
||||
.domain(domain)
|
||||
.max_age_secs(24 * 3600)
|
||||
.secure(use_https()),
|
||||
)
|
||||
}
|
||||
|
||||
pub async fn get_connection_pool(configuration: &DatabaseSettings) -> Result<PgPool, sqlx::Error> {
|
||||
PgPoolOptions::new()
|
||||
.connect_timeout(std::time::Duration::from_secs(5))
|
||||
.connect_with(configuration.with_db())
|
||||
.await
|
||||
}
|
@ -1,106 +0,0 @@
|
||||
use serde_aux::field_attributes::deserialize_number_from_string;
|
||||
use sqlx::postgres::{PgConnectOptions, PgSslMode};
|
||||
use std::convert::{TryFrom, TryInto};
|
||||
|
||||
#[derive(serde::Deserialize, Clone, Debug)]
|
||||
pub struct Settings {
|
||||
pub database: DatabaseSettings,
|
||||
pub application: ApplicationSettings,
|
||||
}
|
||||
|
||||
// We are using 127.0.0.1 as our host in address, we are instructing our
|
||||
// application to only accept connections coming from the same machine. However,
|
||||
// request from the hose machine which is not seen as local by our Docker image.
|
||||
//
|
||||
// Using 0.0.0.0 as host to instruct our application to accept connections from
|
||||
// any network interface. So using 127.0.0.1 for our local development and set
|
||||
// it to 0.0.0.0 in our Docker images.
|
||||
//
|
||||
#[derive(serde::Deserialize, Clone, Debug)]
|
||||
pub struct ApplicationSettings {
|
||||
#[serde(deserialize_with = "deserialize_number_from_string")]
|
||||
pub port: u16,
|
||||
pub host: String,
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize, Clone, Debug)]
|
||||
pub struct DatabaseSettings {
|
||||
pub username: String,
|
||||
pub password: String,
|
||||
#[serde(deserialize_with = "deserialize_number_from_string")]
|
||||
pub port: u16,
|
||||
pub host: String,
|
||||
pub database_name: String,
|
||||
pub require_ssl: bool,
|
||||
}
|
||||
|
||||
impl DatabaseSettings {
|
||||
pub fn without_db(&self) -> PgConnectOptions {
|
||||
let ssl_mode = if self.require_ssl {
|
||||
PgSslMode::Require
|
||||
} else {
|
||||
PgSslMode::Prefer
|
||||
};
|
||||
PgConnectOptions::new()
|
||||
.host(&self.host)
|
||||
.username(&self.username)
|
||||
.password(&self.password)
|
||||
.port(self.port)
|
||||
.ssl_mode(ssl_mode)
|
||||
}
|
||||
|
||||
pub fn with_db(&self) -> PgConnectOptions {
|
||||
self.without_db().database(&self.database_name)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_configuration() -> Result<Settings, config::ConfigError> {
|
||||
let mut settings = config::Config::default();
|
||||
let base_path = std::env::current_dir().expect("Failed to determine the current directory");
|
||||
let configuration_dir = base_path.join("configuration");
|
||||
settings.merge(config::File::from(configuration_dir.join("base")).required(true))?;
|
||||
|
||||
let environment: Environment = std::env::var("APP_ENVIRONMENT")
|
||||
.unwrap_or_else(|_| "local".into())
|
||||
.try_into()
|
||||
.expect("Failed to parse APP_ENVIRONMENT.");
|
||||
|
||||
settings.merge(config::File::from(configuration_dir.join(environment.as_str())).required(true))?;
|
||||
|
||||
// Add in settings from environment variables (with a prefix of APP and '__' as
|
||||
// separator) E.g. `APP_APPLICATION__PORT=5001 would set
|
||||
// `Settings.application.port`
|
||||
settings.merge(config::Environment::with_prefix("app").separator("__"))?;
|
||||
|
||||
settings.try_into()
|
||||
}
|
||||
|
||||
/// The possible runtime environment for our application.
|
||||
pub enum Environment {
|
||||
Local,
|
||||
Production,
|
||||
}
|
||||
|
||||
impl Environment {
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
Environment::Local => "local",
|
||||
Environment::Production => "production",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for Environment {
|
||||
type Error = String;
|
||||
|
||||
fn try_from(s: String) -> Result<Self, Self::Error> {
|
||||
match s.to_lowercase().as_str() {
|
||||
"local" => Ok(Self::Local),
|
||||
"production" => Ok(Self::Production),
|
||||
other => Err(format!(
|
||||
"{} is not a supported environment. Use either `local` or `production`.",
|
||||
other
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
@ -1,7 +0,0 @@
|
||||
use std::time::Duration;
|
||||
|
||||
pub const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(8);
|
||||
pub const PING_TIMEOUT: Duration = Duration::from_secs(60);
|
||||
pub const MAX_PAYLOAD_SIZE: usize = 262_144; // max payload size is 256k
|
||||
|
||||
pub const IGNORE_ROUTES: [&str; 3] = ["/api/register", "/api/auth", "/ws"];
|
@ -1,17 +0,0 @@
|
||||
use std::env;
|
||||
|
||||
pub fn domain() -> String {
|
||||
env::var("DOMAIN").unwrap_or_else(|_| "localhost".to_string())
|
||||
}
|
||||
|
||||
pub fn jwt_secret() -> String {
|
||||
env::var("JWT_SECRET").unwrap_or_else(|_| "my secret".into())
|
||||
}
|
||||
|
||||
pub fn secret() -> String {
|
||||
env::var("SECRET_KEY").unwrap_or_else(|_| "0123".repeat(8))
|
||||
}
|
||||
|
||||
pub fn use_https() -> bool {
|
||||
false
|
||||
}
|
@ -1,6 +0,0 @@
|
||||
mod configuration;
|
||||
mod const_define;
|
||||
pub mod env;
|
||||
|
||||
pub use configuration::*;
|
||||
pub use const_define::*;
|
@ -1,92 +0,0 @@
|
||||
use crate::services::{
|
||||
kv::PostgresKV,
|
||||
web_socket::{WSServer, WebSocketReceivers},
|
||||
};
|
||||
use actix::Addr;
|
||||
use actix_web::web::Data;
|
||||
|
||||
use crate::services::{
|
||||
document::ws_receiver::{make_document_ws_receiver, HttpDocumentCloudPersistence},
|
||||
folder::ws_receiver::{make_folder_ws_receiver, HttpFolderCloudPersistence},
|
||||
kv::revision_kv::RevisionKVPersistence,
|
||||
};
|
||||
use flowy_collaboration::{server_document::ServerDocumentManager, server_folder::ServerFolderManager};
|
||||
use lib_ws::WSChannel;
|
||||
use sqlx::PgPool;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AppContext {
|
||||
pub ws_server: Data<Addr<WSServer>>,
|
||||
pub persistence: Data<Arc<FlowyPersistence>>,
|
||||
pub ws_receivers: Data<WebSocketReceivers>,
|
||||
pub document_manager: Data<Arc<ServerDocumentManager>>,
|
||||
pub folder_manager: Data<Arc<ServerFolderManager>>,
|
||||
}
|
||||
|
||||
impl AppContext {
|
||||
pub fn new(ws_server: Addr<WSServer>, pg_pool: PgPool) -> Self {
|
||||
let ws_server = Data::new(ws_server);
|
||||
let mut ws_receivers = WebSocketReceivers::new();
|
||||
|
||||
let document_store = make_document_kv_store(pg_pool.clone());
|
||||
let folder_store = make_folder_kv_store(pg_pool.clone());
|
||||
let flowy_persistence = Arc::new(FlowyPersistence {
|
||||
pg_pool,
|
||||
document_store,
|
||||
folder_store,
|
||||
});
|
||||
|
||||
let document_persistence = Arc::new(HttpDocumentCloudPersistence(flowy_persistence.document_kv_store()));
|
||||
let document_manager = Arc::new(ServerDocumentManager::new(document_persistence));
|
||||
let document_ws_receiver = make_document_ws_receiver(flowy_persistence.clone(), document_manager.clone());
|
||||
ws_receivers.set(WSChannel::Document, document_ws_receiver);
|
||||
|
||||
let folder_persistence = Arc::new(HttpFolderCloudPersistence(flowy_persistence.folder_kv_store()));
|
||||
let folder_manager = Arc::new(ServerFolderManager::new(folder_persistence));
|
||||
let folder_ws_receiver = make_folder_ws_receiver(flowy_persistence.clone(), folder_manager.clone());
|
||||
ws_receivers.set(WSChannel::Folder, folder_ws_receiver);
|
||||
|
||||
AppContext {
|
||||
ws_server,
|
||||
persistence: Data::new(flowy_persistence),
|
||||
ws_receivers: Data::new(ws_receivers),
|
||||
document_manager: Data::new(document_manager),
|
||||
folder_manager: Data::new(folder_manager),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub type DocumentRevisionKV = RevisionKVPersistence;
|
||||
pub type FolderRevisionKV = RevisionKVPersistence;
|
||||
|
||||
fn make_document_kv_store(pg_pool: PgPool) -> Arc<DocumentRevisionKV> {
|
||||
let kv_impl = Arc::new(PostgresKV { pg_pool });
|
||||
Arc::new(DocumentRevisionKV::new(kv_impl))
|
||||
}
|
||||
|
||||
fn make_folder_kv_store(pg_pool: PgPool) -> Arc<FolderRevisionKV> {
|
||||
let kv_impl = Arc::new(PostgresKV { pg_pool });
|
||||
Arc::new(FolderRevisionKV::new(kv_impl))
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct FlowyPersistence {
|
||||
pg_pool: PgPool,
|
||||
document_store: Arc<DocumentRevisionKV>,
|
||||
folder_store: Arc<FolderRevisionKV>,
|
||||
}
|
||||
|
||||
impl FlowyPersistence {
|
||||
pub fn pg_pool(&self) -> PgPool {
|
||||
self.pg_pool.clone()
|
||||
}
|
||||
|
||||
pub fn document_kv_store(&self) -> Arc<DocumentRevisionKV> {
|
||||
self.document_store.clone()
|
||||
}
|
||||
|
||||
pub fn folder_kv_store(&self) -> Arc<FolderRevisionKV> {
|
||||
self.folder_store.clone()
|
||||
}
|
||||
}
|
@ -1,118 +0,0 @@
|
||||
use crate::entities::token::{Claim, Token};
|
||||
use actix_web::http::HeaderValue;
|
||||
use backend_service::errors::ServerError;
|
||||
use chrono::{DateTime, Utc};
|
||||
use dashmap::DashMap;
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref AUTHORIZED_USERS: AuthorizedUsers = AuthorizedUsers::new();
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Hash, Clone)]
|
||||
pub struct LoggedUser {
|
||||
pub user_id: String,
|
||||
}
|
||||
|
||||
impl std::convert::From<Claim> for LoggedUser {
|
||||
fn from(c: Claim) -> Self {
|
||||
Self { user_id: c.user_id() }
|
||||
}
|
||||
}
|
||||
|
||||
impl LoggedUser {
|
||||
pub fn new(user_id: &str) -> Self {
|
||||
Self {
|
||||
user_id: user_id.to_owned(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_token(token: String) -> Result<Self, ServerError> {
|
||||
let user: LoggedUser = Token::decode_token(&token.into())?.into();
|
||||
Ok(user)
|
||||
}
|
||||
|
||||
pub fn as_uuid(&self) -> Result<uuid::Uuid, ServerError> {
|
||||
let id = uuid::Uuid::parse_str(&self.user_id)?;
|
||||
Ok(id)
|
||||
}
|
||||
}
|
||||
|
||||
use actix_web::{dev::Payload, FromRequest, HttpRequest};
|
||||
|
||||
use futures::future::{ready, Ready};
|
||||
|
||||
impl FromRequest for LoggedUser {
|
||||
type Error = ServerError;
|
||||
type Future = Ready<Result<Self, Self::Error>>;
|
||||
|
||||
fn from_request(request: &HttpRequest, _payload: &mut Payload) -> Self::Future {
|
||||
match Token::parser_from_request(request) {
|
||||
Ok(token) => ready(LoggedUser::from_token(token.0)),
|
||||
Err(err) => ready(Err(err)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::convert::TryFrom<&HeaderValue> for LoggedUser {
|
||||
type Error = ServerError;
|
||||
|
||||
fn try_from(header: &HeaderValue) -> Result<Self, Self::Error> {
|
||||
match header.to_str() {
|
||||
Ok(val) => LoggedUser::from_token(val.to_owned()),
|
||||
Err(e) => {
|
||||
log::error!("Header to string failed: {:?}", e);
|
||||
Err(ServerError::unauthorized())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Copy)]
|
||||
enum AuthStatus {
|
||||
Authorized(DateTime<Utc>),
|
||||
NotAuthorized,
|
||||
}
|
||||
|
||||
pub const EXPIRED_DURATION_DAYS: i64 = 30;
|
||||
|
||||
pub struct AuthorizedUsers(DashMap<LoggedUser, AuthStatus>);
|
||||
impl std::default::Default for AuthorizedUsers {
|
||||
fn default() -> Self {
|
||||
Self(DashMap::new())
|
||||
}
|
||||
}
|
||||
impl AuthorizedUsers {
|
||||
pub fn new() -> Self {
|
||||
AuthorizedUsers::default()
|
||||
}
|
||||
|
||||
pub fn is_authorized(&self, user: &LoggedUser) -> bool {
|
||||
match self.0.get(user) {
|
||||
None => {
|
||||
tracing::debug!("user not login yet or server was reboot");
|
||||
false
|
||||
}
|
||||
Some(status) => match *status {
|
||||
AuthStatus::Authorized(last_time) => {
|
||||
let current_time = Utc::now();
|
||||
let days = (current_time - last_time).num_days();
|
||||
days < EXPIRED_DURATION_DAYS
|
||||
}
|
||||
AuthStatus::NotAuthorized => {
|
||||
tracing::debug!("user logout already");
|
||||
false
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn store_auth(&self, user: LoggedUser, is_auth: bool) {
|
||||
let status = if is_auth {
|
||||
AuthStatus::Authorized(Utc::now())
|
||||
} else {
|
||||
AuthStatus::NotAuthorized
|
||||
};
|
||||
self.0.insert(user, status);
|
||||
}
|
||||
}
|
@ -1,3 +0,0 @@
|
||||
pub mod logged_user;
|
||||
pub mod token;
|
||||
pub mod user;
|
@ -1,94 +0,0 @@
|
||||
use crate::{
|
||||
config::env::{domain, jwt_secret},
|
||||
entities::logged_user::EXPIRED_DURATION_DAYS,
|
||||
};
|
||||
use actix_web::{dev::Payload, FromRequest, HttpRequest};
|
||||
use backend_service::{configuration::HEADER_TOKEN, errors::ServerError};
|
||||
use chrono::{Duration, Local};
|
||||
use derive_more::{From, Into};
|
||||
use futures::future::{ready, Ready};
|
||||
use jsonwebtoken::{decode, encode, Algorithm, DecodingKey, EncodingKey, Header, Validation};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
const DEFAULT_ALGORITHM: Algorithm = Algorithm::HS256;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct Claim {
|
||||
// issuer
|
||||
iss: String,
|
||||
// subject
|
||||
sub: String,
|
||||
// issue at
|
||||
iat: i64,
|
||||
// expiry
|
||||
exp: i64,
|
||||
user_id: String,
|
||||
}
|
||||
|
||||
impl Claim {
|
||||
pub fn with_user_id(user_id: &str) -> Self {
|
||||
let domain = domain();
|
||||
Self {
|
||||
iss: domain,
|
||||
sub: "auth".to_string(),
|
||||
user_id: user_id.to_string(),
|
||||
iat: Local::now().timestamp(),
|
||||
exp: (Local::now() + Duration::days(EXPIRED_DURATION_DAYS)).timestamp(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn user_id(self) -> String {
|
||||
self.user_id
|
||||
}
|
||||
}
|
||||
|
||||
// impl From<Claim> for User {
|
||||
// fn from(claim: Claim) -> Self { Self { email: claim.email } }
|
||||
// }
|
||||
|
||||
#[derive(From, Into, Clone)]
|
||||
pub struct Token(pub String);
|
||||
impl Token {
|
||||
pub fn create_token(user_id: &str) -> Result<Self, ServerError> {
|
||||
let claims = Claim::with_user_id(user_id);
|
||||
encode(
|
||||
&Header::new(DEFAULT_ALGORITHM),
|
||||
&claims,
|
||||
&EncodingKey::from_secret(jwt_secret().as_ref()),
|
||||
)
|
||||
.map(Into::into)
|
||||
.map_err(|err| ServerError::internal().context(err))
|
||||
}
|
||||
|
||||
pub fn decode_token(token: &Self) -> Result<Claim, ServerError> {
|
||||
decode::<Claim>(
|
||||
&token.0,
|
||||
&DecodingKey::from_secret(jwt_secret().as_ref()),
|
||||
&Validation::new(DEFAULT_ALGORITHM),
|
||||
)
|
||||
.map(|data| Ok(data.claims))
|
||||
.map_err(|err| ServerError::unauthorized().context(err))?
|
||||
}
|
||||
|
||||
pub fn parser_from_request(request: &HttpRequest) -> Result<Self, ServerError> {
|
||||
match request.headers().get(HEADER_TOKEN) {
|
||||
Some(header) => match header.to_str() {
|
||||
Ok(val) => Ok(Token(val.to_owned())),
|
||||
Err(_) => Err(ServerError::unauthorized()),
|
||||
},
|
||||
None => Err(ServerError::unauthorized()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FromRequest for Token {
|
||||
type Error = ServerError;
|
||||
type Future = Ready<Result<Self, Self::Error>>;
|
||||
|
||||
fn from_request(request: &HttpRequest, _payload: &mut Payload) -> Self::Future {
|
||||
match Token::parser_from_request(request) {
|
||||
Ok(token) => ready(Ok(token)),
|
||||
Err(err) => ready(Err(err)),
|
||||
}
|
||||
}
|
||||
}
|
@ -1,13 +0,0 @@
|
||||
// type mapped https://kotiri.com/2018/01/31/postgresql-diesel-rust-types.html
|
||||
|
||||
use chrono::Utc;
|
||||
|
||||
#[derive(Debug, Clone, sqlx::FromRow)]
|
||||
pub struct UserTable {
|
||||
pub(crate) id: uuid::Uuid,
|
||||
pub(crate) email: String,
|
||||
pub(crate) name: String,
|
||||
#[allow(dead_code)]
|
||||
pub(crate) create_time: chrono::DateTime<Utc>,
|
||||
pub(crate) password: String,
|
||||
}
|
@ -1,7 +0,0 @@
|
||||
pub mod application;
|
||||
pub mod config;
|
||||
pub mod context;
|
||||
mod entities;
|
||||
pub mod middleware;
|
||||
pub mod services;
|
||||
pub mod util;
|
@ -1,14 +0,0 @@
|
||||
use backend::{
|
||||
application::{init_app_context, Application},
|
||||
config::get_configuration,
|
||||
};
|
||||
|
||||
#[actix_web::main]
|
||||
async fn main() -> std::io::Result<()> {
|
||||
let configuration = get_configuration().expect("Failed to read configuration.");
|
||||
let app_ctx = init_app_context(&configuration).await;
|
||||
let application = Application::build(configuration, app_ctx).await?;
|
||||
application.run_until_stopped().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
@ -1,105 +0,0 @@
|
||||
use actix_service::{Service, Transform};
|
||||
use actix_web::{
|
||||
dev::{ServiceRequest, ServiceResponse},
|
||||
Error, HttpResponse, ResponseError,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
config::IGNORE_ROUTES,
|
||||
entities::logged_user::{LoggedUser, AUTHORIZED_USERS},
|
||||
};
|
||||
use actix_web::{body::AnyBody, dev::MessageBody};
|
||||
use backend_service::{configuration::HEADER_TOKEN, errors::ServerError};
|
||||
use futures::future::{ok, LocalBoxFuture, Ready};
|
||||
use std::{
|
||||
convert::TryInto,
|
||||
error::Error as StdError,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
pub struct AuthenticationService;
|
||||
|
||||
impl<S, B> Transform<S, ServiceRequest> for AuthenticationService
|
||||
where
|
||||
S: Service<ServiceRequest, Response = ServiceResponse<B>, Error = Error> + 'static,
|
||||
S::Future: 'static,
|
||||
B: MessageBody + 'static,
|
||||
B::Error: StdError,
|
||||
{
|
||||
type Response = ServiceResponse;
|
||||
type Error = Error;
|
||||
type Transform = AuthenticationMiddleware<S>;
|
||||
type InitError = ();
|
||||
type Future = Ready<Result<Self::Transform, Self::InitError>>;
|
||||
|
||||
fn new_transform(&self, service: S) -> Self::Future {
|
||||
ok(AuthenticationMiddleware { service })
|
||||
}
|
||||
}
|
||||
pub struct AuthenticationMiddleware<S> {
|
||||
service: S,
|
||||
}
|
||||
|
||||
impl<S, B> Service<ServiceRequest> for AuthenticationMiddleware<S>
|
||||
where
|
||||
S: Service<ServiceRequest, Response = ServiceResponse<B>, Error = Error> + 'static,
|
||||
S::Future: 'static,
|
||||
B: MessageBody + 'static,
|
||||
B::Error: StdError,
|
||||
{
|
||||
type Response = ServiceResponse;
|
||||
type Error = Error;
|
||||
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.service.poll_ready(cx)
|
||||
}
|
||||
|
||||
fn call(&self, req: ServiceRequest) -> Self::Future {
|
||||
let mut authenticate_pass: bool = false;
|
||||
for ignore_route in IGNORE_ROUTES.iter() {
|
||||
// tracing::info!("ignore: {}, path: {}", ignore_route, req.path());
|
||||
if req.path().starts_with(ignore_route) {
|
||||
authenticate_pass = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if !authenticate_pass {
|
||||
if let Some(header) = req.headers().get(HEADER_TOKEN) {
|
||||
let result: Result<LoggedUser, ServerError> = header.try_into();
|
||||
match result {
|
||||
Ok(logged_user) => {
|
||||
if cfg!(feature = "ignore_auth") {
|
||||
authenticate_pass = true;
|
||||
AUTHORIZED_USERS.store_auth(logged_user, true);
|
||||
} else {
|
||||
authenticate_pass = AUTHORIZED_USERS.is_authorized(&logged_user);
|
||||
if authenticate_pass {
|
||||
AUTHORIZED_USERS.store_auth(logged_user, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => log::error!("{:?}", e),
|
||||
}
|
||||
} else {
|
||||
tracing::debug!("Can't find any token from request: {:?}", req);
|
||||
}
|
||||
}
|
||||
|
||||
if authenticate_pass {
|
||||
let fut = self.service.call(req);
|
||||
Box::pin(async move {
|
||||
let res = fut.await?;
|
||||
Ok(res.map_body(|_, body| AnyBody::from_message(body)))
|
||||
})
|
||||
} else {
|
||||
Box::pin(async move { Ok(req.into_response(unauthorized_response())) })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn unauthorized_response() -> HttpResponse {
|
||||
let error = ServerError::unauthorized();
|
||||
error.error_response()
|
||||
}
|
@ -1,16 +0,0 @@
|
||||
use actix_cors::Cors;
|
||||
use actix_web::http;
|
||||
|
||||
// https://javascript.info/fetch-crossorigin#cors-for-safe-requests
|
||||
// https://docs.rs/actix-cors/0.5.4/actix_cors/index.html
|
||||
// http://www.ruanyifeng.com/blog/2016/04/cors.html
|
||||
// Cors short for Cross-Origin Resource Sharing.
|
||||
pub fn default_cors() -> Cors {
|
||||
Cors::default() // allowed_origin return access-control-allow-origin: * by default
|
||||
// .allowed_origin("http://127.0.0.1:8080")
|
||||
.send_wildcard()
|
||||
.allowed_methods(vec!["GET", "POST", "PUT", "DELETE"])
|
||||
.allowed_headers(vec![http::header::ACCEPT])
|
||||
.allowed_header(http::header::CONTENT_TYPE)
|
||||
.max_age(3600)
|
||||
}
|
@ -1,5 +0,0 @@
|
||||
mod auth_middleware;
|
||||
mod cors_middleware;
|
||||
|
||||
pub use auth_middleware::*;
|
||||
pub use cors_middleware::*;
|
@ -1,6 +0,0 @@
|
||||
#![allow(clippy::module_inception)]
|
||||
|
||||
pub mod persistence;
|
||||
pub(crate) mod router;
|
||||
pub(crate) mod ws_actor;
|
||||
pub(crate) mod ws_receiver;
|
@ -1,63 +0,0 @@
|
||||
use anyhow::Context;
|
||||
use backend_service::errors::{internal_error, ServerError};
|
||||
|
||||
use flowy_collaboration::{
|
||||
protobuf::{CreateDocParams, DocumentId, DocumentInfo, ResetDocumentParams},
|
||||
server_document::ServerDocumentManager,
|
||||
util::make_document_info_pb_from_revisions_pb,
|
||||
};
|
||||
|
||||
use crate::services::kv::revision_kv::RevisionKVPersistence;
|
||||
use std::sync::Arc;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[tracing::instrument(level = "trace", skip(document_store, params), err)]
|
||||
pub(crate) async fn create_document(
|
||||
document_store: &Arc<RevisionKVPersistence>,
|
||||
mut params: CreateDocParams,
|
||||
) -> Result<(), ServerError> {
|
||||
let revisions = params.take_revisions().take_items();
|
||||
let _ = document_store.set_revision(revisions.into()).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip(document_store), err)]
|
||||
pub async fn read_document(
|
||||
document_store: &Arc<RevisionKVPersistence>,
|
||||
params: DocumentId,
|
||||
) -> Result<DocumentInfo, ServerError> {
|
||||
let _ = Uuid::parse_str(¶ms.doc_id).context("Parse document id to uuid failed")?;
|
||||
let revisions = document_store.get_revisions(¶ms.doc_id, None).await?;
|
||||
match make_document_info_pb_from_revisions_pb(¶ms.doc_id, revisions) {
|
||||
Ok(Some(document_info)) => Ok(document_info),
|
||||
Ok(None) => Err(ServerError::record_not_found().context(format!("{} not exist", params.doc_id))),
|
||||
Err(e) => Err(ServerError::internal().context(e)),
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(document_manager, params), err)]
|
||||
pub async fn reset_document(
|
||||
document_manager: &Arc<ServerDocumentManager>,
|
||||
mut params: ResetDocumentParams,
|
||||
) -> Result<(), ServerError> {
|
||||
let repeated_revision = params.take_revisions();
|
||||
if repeated_revision.get_items().is_empty() {
|
||||
return Err(ServerError::payload_none().context("Revisions should not be empty when reset the document"));
|
||||
}
|
||||
let doc_id = params.doc_id.clone();
|
||||
let _ = document_manager
|
||||
.handle_document_reset(&doc_id, repeated_revision)
|
||||
.await
|
||||
.map_err(internal_error)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip(document_store), err)]
|
||||
pub(crate) async fn delete_document(
|
||||
document_store: &Arc<RevisionKVPersistence>,
|
||||
doc_id: Uuid,
|
||||
) -> Result<(), ServerError> {
|
||||
// TODO: delete revisions may cause time issue. Maybe delete asynchronously?
|
||||
let _ = document_store.delete_revisions(&doc_id.to_string(), None).await?;
|
||||
Ok(())
|
||||
}
|
@ -1,48 +0,0 @@
|
||||
use crate::{
|
||||
context::FlowyPersistence,
|
||||
services::document::persistence::{create_document, read_document, reset_document},
|
||||
util::serde_ext::parse_from_payload,
|
||||
};
|
||||
use actix_web::{
|
||||
web::{Data, Payload},
|
||||
HttpResponse,
|
||||
};
|
||||
use backend_service::{errors::ServerError, response::FlowyResponse};
|
||||
use flowy_collaboration::{
|
||||
protobuf::{
|
||||
CreateDocParams as CreateDocParamsPB, DocumentId as DocumentIdPB, ResetDocumentParams as ResetDocumentParamsPB,
|
||||
},
|
||||
server_document::ServerDocumentManager,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
|
||||
pub async fn create_document_handler(
|
||||
payload: Payload,
|
||||
persistence: Data<Arc<FlowyPersistence>>,
|
||||
) -> Result<HttpResponse, ServerError> {
|
||||
let params: CreateDocParamsPB = parse_from_payload(payload).await?;
|
||||
let kv_store = persistence.document_kv_store();
|
||||
let _ = create_document(&kv_store, params).await?;
|
||||
Ok(FlowyResponse::success().into())
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(payload, persistence), err)]
|
||||
pub async fn read_document_handler(
|
||||
payload: Payload,
|
||||
persistence: Data<Arc<FlowyPersistence>>,
|
||||
) -> Result<HttpResponse, ServerError> {
|
||||
let params: DocumentIdPB = parse_from_payload(payload).await?;
|
||||
let kv_store = persistence.document_kv_store();
|
||||
let doc = read_document(&kv_store, params).await?;
|
||||
let response = FlowyResponse::success().pb(doc)?;
|
||||
Ok(response.into())
|
||||
}
|
||||
|
||||
pub async fn reset_document_handler(
|
||||
payload: Payload,
|
||||
document_manager: Data<Arc<ServerDocumentManager>>,
|
||||
) -> Result<HttpResponse, ServerError> {
|
||||
let params: ResetDocumentParamsPB = parse_from_payload(payload).await?;
|
||||
let _ = reset_document(document_manager.get_ref(), params).await?;
|
||||
Ok(FlowyResponse::success().into())
|
||||
}
|
@ -1,159 +0,0 @@
|
||||
use crate::{
|
||||
context::FlowyPersistence,
|
||||
services::web_socket::{entities::Socket, WSClientData, WSUser, WebSocketMessage},
|
||||
util::serde_ext::{md5, parse_from_bytes},
|
||||
};
|
||||
use actix_rt::task::spawn_blocking;
|
||||
use async_stream::stream;
|
||||
use backend_service::errors::{internal_error, Result, ServerError};
|
||||
|
||||
use crate::services::web_socket::revision_data_to_ws_message;
|
||||
use flowy_collaboration::{
|
||||
protobuf::{
|
||||
ClientRevisionWSData as ClientRevisionWSDataPB, ClientRevisionWSDataType as ClientRevisionWSDataTypePB,
|
||||
Revision as RevisionPB,
|
||||
},
|
||||
server_document::ServerDocumentManager,
|
||||
synchronizer::{RevisionSyncResponse, RevisionUser},
|
||||
};
|
||||
use futures::stream::StreamExt;
|
||||
use lib_ws::WSChannel;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::{mpsc, oneshot};
|
||||
|
||||
pub enum DocumentWSActorMessage {
|
||||
ClientData {
|
||||
client_data: WSClientData,
|
||||
persistence: Arc<FlowyPersistence>,
|
||||
ret: oneshot::Sender<Result<()>>,
|
||||
},
|
||||
}
|
||||
|
||||
pub struct DocumentWebSocketActor {
|
||||
actor_msg_receiver: Option<mpsc::Receiver<DocumentWSActorMessage>>,
|
||||
doc_manager: Arc<ServerDocumentManager>,
|
||||
}
|
||||
|
||||
impl DocumentWebSocketActor {
|
||||
pub fn new(receiver: mpsc::Receiver<DocumentWSActorMessage>, manager: Arc<ServerDocumentManager>) -> Self {
|
||||
Self {
|
||||
actor_msg_receiver: Some(receiver),
|
||||
doc_manager: manager,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run(mut self) {
|
||||
let mut actor_msg_receiver = self
|
||||
.actor_msg_receiver
|
||||
.take()
|
||||
.expect("DocumentWebSocketActor's receiver should only take one time");
|
||||
|
||||
let stream = stream! {
|
||||
loop {
|
||||
match actor_msg_receiver.recv().await {
|
||||
Some(msg) => yield msg,
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
stream.for_each(|msg| self.handle_message(msg)).await;
|
||||
}
|
||||
|
||||
async fn handle_message(&self, msg: DocumentWSActorMessage) {
|
||||
match msg {
|
||||
DocumentWSActorMessage::ClientData {
|
||||
client_data,
|
||||
persistence: _,
|
||||
ret,
|
||||
} => {
|
||||
let _ = ret.send(self.handle_document_data(client_data).await);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_document_data(&self, client_data: WSClientData) -> Result<()> {
|
||||
let WSClientData { user, socket, data } = client_data;
|
||||
let document_client_data = spawn_blocking(move || parse_from_bytes::<ClientRevisionWSDataPB>(&data))
|
||||
.await
|
||||
.map_err(internal_error)??;
|
||||
|
||||
tracing::trace!(
|
||||
"[DocumentWebSocketActor]: receive: {}:{}, {:?}",
|
||||
document_client_data.object_id,
|
||||
document_client_data.data_id,
|
||||
document_client_data.ty
|
||||
);
|
||||
|
||||
let user = Arc::new(DocumentRevisionUser { user, socket });
|
||||
match &document_client_data.ty {
|
||||
ClientRevisionWSDataTypePB::ClientPushRev => {
|
||||
let _ = self
|
||||
.doc_manager
|
||||
.handle_client_revisions(user, document_client_data)
|
||||
.await
|
||||
.map_err(internal_error)?;
|
||||
}
|
||||
ClientRevisionWSDataTypePB::ClientPing => {
|
||||
let _ = self
|
||||
.doc_manager
|
||||
.handle_client_ping(user, document_client_data)
|
||||
.await
|
||||
.map_err(internal_error)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn verify_md5(revision: &RevisionPB) -> Result<()> {
|
||||
if md5(&revision.delta_data) != revision.md5 {
|
||||
return Err(ServerError::internal().context("RevisionPB md5 not match"));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DocumentRevisionUser {
|
||||
pub user: Arc<WSUser>,
|
||||
pub(crate) socket: Socket,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for DocumentRevisionUser {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
f.debug_struct("DocumentRevisionUser")
|
||||
.field("user", &self.user)
|
||||
.field("socket", &self.socket)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl RevisionUser for DocumentRevisionUser {
|
||||
fn user_id(&self) -> String {
|
||||
self.user.id().to_string()
|
||||
}
|
||||
|
||||
fn receive(&self, resp: RevisionSyncResponse) {
|
||||
let result = match resp {
|
||||
RevisionSyncResponse::Pull(data) => {
|
||||
let msg: WebSocketMessage = revision_data_to_ws_message(data, WSChannel::Document);
|
||||
self.socket.try_send(msg).map_err(internal_error)
|
||||
}
|
||||
RevisionSyncResponse::Push(data) => {
|
||||
let msg: WebSocketMessage = revision_data_to_ws_message(data, WSChannel::Document);
|
||||
self.socket.try_send(msg).map_err(internal_error)
|
||||
}
|
||||
RevisionSyncResponse::Ack(data) => {
|
||||
let msg: WebSocketMessage = revision_data_to_ws_message(data, WSChannel::Document);
|
||||
self.socket.try_send(msg).map_err(internal_error)
|
||||
}
|
||||
};
|
||||
|
||||
match result {
|
||||
Ok(_) => {}
|
||||
Err(e) => log::error!("[DocumentRevisionUser]: {}", e),
|
||||
}
|
||||
}
|
||||
}
|
@ -1,176 +0,0 @@
|
||||
use crate::{
|
||||
context::{DocumentRevisionKV, FlowyPersistence},
|
||||
services::{
|
||||
document::{
|
||||
persistence::{create_document, read_document},
|
||||
ws_actor::{DocumentWSActorMessage, DocumentWebSocketActor},
|
||||
},
|
||||
kv::revision_kv::revisions_to_key_value_items,
|
||||
web_socket::{WSClientData, WebSocketReceiver},
|
||||
},
|
||||
};
|
||||
use backend_service::errors::ServerError;
|
||||
use flowy_collaboration::{
|
||||
entities::document_info::DocumentInfo,
|
||||
errors::CollaborateError,
|
||||
protobuf::{
|
||||
CreateDocParams as CreateDocParamsPB, DocumentId, RepeatedRevision as RepeatedRevisionPB,
|
||||
Revision as RevisionPB,
|
||||
},
|
||||
server_document::{DocumentCloudPersistence, ServerDocumentManager},
|
||||
util::make_document_info_from_revisions_pb,
|
||||
};
|
||||
use lib_infra::future::BoxResultFuture;
|
||||
use std::{
|
||||
convert::TryInto,
|
||||
fmt::{Debug, Formatter},
|
||||
sync::Arc,
|
||||
};
|
||||
use tokio::sync::{mpsc, oneshot};
|
||||
|
||||
pub fn make_document_ws_receiver(
|
||||
persistence: Arc<FlowyPersistence>,
|
||||
document_manager: Arc<ServerDocumentManager>,
|
||||
) -> Arc<DocumentWebSocketReceiver> {
|
||||
let (actor_msg_sender, rx) = tokio::sync::mpsc::channel(1000);
|
||||
let actor = DocumentWebSocketActor::new(rx, document_manager);
|
||||
tokio::task::spawn(actor.run());
|
||||
|
||||
Arc::new(DocumentWebSocketReceiver::new(persistence, actor_msg_sender))
|
||||
}
|
||||
|
||||
pub struct DocumentWebSocketReceiver {
|
||||
actor_msg_sender: mpsc::Sender<DocumentWSActorMessage>,
|
||||
persistence: Arc<FlowyPersistence>,
|
||||
}
|
||||
|
||||
impl DocumentWebSocketReceiver {
|
||||
pub fn new(persistence: Arc<FlowyPersistence>, actor_msg_sender: mpsc::Sender<DocumentWSActorMessage>) -> Self {
|
||||
Self {
|
||||
actor_msg_sender,
|
||||
persistence,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WebSocketReceiver for DocumentWebSocketReceiver {
|
||||
fn receive(&self, data: WSClientData) {
|
||||
let (ret, rx) = oneshot::channel();
|
||||
let actor_msg_sender = self.actor_msg_sender.clone();
|
||||
let persistence = self.persistence.clone();
|
||||
|
||||
actix_rt::spawn(async move {
|
||||
let msg = DocumentWSActorMessage::ClientData {
|
||||
client_data: data,
|
||||
persistence,
|
||||
ret,
|
||||
};
|
||||
|
||||
match actor_msg_sender.send(msg).await {
|
||||
Ok(_) => {}
|
||||
Err(e) => tracing::error!("[DocumentWebSocketReceiver]: send message to actor failed: {}", e),
|
||||
}
|
||||
match rx.await {
|
||||
Ok(_) => {}
|
||||
Err(e) => tracing::error!("[DocumentWebSocketReceiver]: message ret failed {:?}", e),
|
||||
};
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
pub struct HttpDocumentCloudPersistence(pub Arc<DocumentRevisionKV>);
|
||||
impl Debug for HttpDocumentCloudPersistence {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str("HttpDocumentCloudPersistence")
|
||||
}
|
||||
}
|
||||
|
||||
impl DocumentCloudPersistence for HttpDocumentCloudPersistence {
|
||||
fn read_document(&self, doc_id: &str) -> BoxResultFuture<DocumentInfo, CollaborateError> {
|
||||
let params = DocumentId {
|
||||
doc_id: doc_id.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
let document_store = self.0.clone();
|
||||
Box::pin(async move {
|
||||
let mut pb_doc = read_document(&document_store, params)
|
||||
.await
|
||||
.map_err(|e| e.to_collaborate_error())?;
|
||||
let doc = (&mut pb_doc)
|
||||
.try_into()
|
||||
.map_err(|e| CollaborateError::internal().context(e))?;
|
||||
Ok(doc)
|
||||
})
|
||||
}
|
||||
|
||||
fn create_document(
|
||||
&self,
|
||||
doc_id: &str,
|
||||
repeated_revision: RepeatedRevisionPB,
|
||||
) -> BoxResultFuture<Option<DocumentInfo>, CollaborateError> {
|
||||
let document_store = self.0.clone();
|
||||
let doc_id = doc_id.to_owned();
|
||||
Box::pin(async move {
|
||||
let document_info = make_document_info_from_revisions_pb(&doc_id, repeated_revision.clone())?;
|
||||
let doc_id = doc_id.to_owned();
|
||||
let mut params = CreateDocParamsPB::new();
|
||||
params.set_id(doc_id);
|
||||
params.set_revisions(repeated_revision);
|
||||
let _ = create_document(&document_store, params)
|
||||
.await
|
||||
.map_err(|e| e.to_collaborate_error())?;
|
||||
Ok(document_info)
|
||||
})
|
||||
}
|
||||
|
||||
fn read_document_revisions(
|
||||
&self,
|
||||
doc_id: &str,
|
||||
rev_ids: Option<Vec<i64>>,
|
||||
) -> BoxResultFuture<Vec<RevisionPB>, CollaborateError> {
|
||||
let document_store = self.0.clone();
|
||||
let doc_id = doc_id.to_owned();
|
||||
let f = || async move {
|
||||
let mut repeated_revision = document_store.get_revisions(&doc_id, rev_ids).await?;
|
||||
Ok::<Vec<RevisionPB>, ServerError>(repeated_revision.take_items().into())
|
||||
};
|
||||
|
||||
Box::pin(async move { f().await.map_err(|e| e.to_collaborate_error()) })
|
||||
}
|
||||
|
||||
fn save_document_revisions(
|
||||
&self,
|
||||
mut repeated_revision: RepeatedRevisionPB,
|
||||
) -> BoxResultFuture<(), CollaborateError> {
|
||||
let document_store = self.0.clone();
|
||||
let f = || async move {
|
||||
let revisions = repeated_revision.take_items().into();
|
||||
let _ = document_store.set_revision(revisions).await?;
|
||||
Ok::<(), ServerError>(())
|
||||
};
|
||||
|
||||
Box::pin(async move { f().await.map_err(|e| e.to_collaborate_error()) })
|
||||
}
|
||||
|
||||
fn reset_document(
|
||||
&self,
|
||||
doc_id: &str,
|
||||
mut repeated_revision: RepeatedRevisionPB,
|
||||
) -> BoxResultFuture<(), CollaborateError> {
|
||||
let document_store = self.0.clone();
|
||||
let doc_id = doc_id.to_owned();
|
||||
let f = || async move {
|
||||
document_store
|
||||
.transaction(|mut transaction| {
|
||||
Box::pin(async move {
|
||||
let _ = transaction.batch_delete_key_start_with(&doc_id).await?;
|
||||
let items = revisions_to_key_value_items(repeated_revision.take_items().into())?;
|
||||
let _ = transaction.batch_set(items).await?;
|
||||
Ok(())
|
||||
})
|
||||
})
|
||||
.await
|
||||
};
|
||||
Box::pin(async move { f().await.map_err(|e| e.to_collaborate_error()) })
|
||||
}
|
||||
}
|
@ -1,113 +0,0 @@
|
||||
use crate::services::folder::view::read_view_belong_to_id;
|
||||
|
||||
use crate::{
|
||||
entities::logged_user::LoggedUser,
|
||||
services::folder::{app::persistence::*, trash::read_trash_ids},
|
||||
util::sqlx_ext::{map_sqlx_error, DBTransaction, SqlBuilder},
|
||||
};
|
||||
use backend_service::errors::{invalid_params, ServerError};
|
||||
use chrono::Utc;
|
||||
use flowy_folder_data_model::{
|
||||
parser::{
|
||||
app::{AppDesc, AppName},
|
||||
workspace::WorkspaceIdentify,
|
||||
},
|
||||
protobuf::{App as AppPB, CreateAppParams as CreateAppParamsPB, RepeatedView as RepeatedViewPB},
|
||||
};
|
||||
use sqlx::{postgres::PgArguments, Postgres};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub(crate) async fn create_app(
|
||||
transaction: &mut DBTransaction<'_>,
|
||||
mut params: CreateAppParamsPB,
|
||||
logged_user: LoggedUser,
|
||||
) -> Result<AppPB, ServerError> {
|
||||
let name = AppName::parse(params.take_name()).map_err(invalid_params)?;
|
||||
let workspace_id = WorkspaceIdentify::parse(params.take_workspace_id()).map_err(invalid_params)?;
|
||||
let user_id = logged_user.as_uuid()?.to_string();
|
||||
let desc = AppDesc::parse(params.take_desc()).map_err(invalid_params)?;
|
||||
|
||||
let (sql, args, app) = NewAppSqlBuilder::new(&user_id, workspace_id.as_ref())
|
||||
.name(name.as_ref())
|
||||
.desc(desc.as_ref())
|
||||
.color_style(params.take_color_style())
|
||||
.build()?;
|
||||
|
||||
let _ = sqlx::query_with(&sql, args)
|
||||
.execute(transaction)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?;
|
||||
Ok(app)
|
||||
}
|
||||
|
||||
pub(crate) async fn read_app(
|
||||
transaction: &mut DBTransaction<'_>,
|
||||
app_id: Uuid,
|
||||
user: &LoggedUser,
|
||||
) -> Result<AppPB, ServerError> {
|
||||
let table = read_app_table(app_id, transaction).await?;
|
||||
|
||||
let read_trash_ids = read_trash_ids(user, transaction).await?;
|
||||
if read_trash_ids.contains(&table.id.to_string()) {
|
||||
return Err(ServerError::record_not_found());
|
||||
}
|
||||
|
||||
let mut views = RepeatedViewPB::default();
|
||||
views.set_items(
|
||||
read_view_belong_to_id(&table.id.to_string(), user, transaction as &mut DBTransaction<'_>)
|
||||
.await?
|
||||
.into(),
|
||||
);
|
||||
|
||||
let mut app: AppPB = table.into();
|
||||
app.set_belongings(views);
|
||||
Ok(app)
|
||||
}
|
||||
|
||||
pub(crate) async fn read_app_table(app_id: Uuid, transaction: &mut DBTransaction<'_>) -> Result<AppTable, ServerError> {
|
||||
let (sql, args) = SqlBuilder::select(APP_TABLE)
|
||||
.add_field("*")
|
||||
.and_where_eq("id", app_id)
|
||||
.build()?;
|
||||
|
||||
let table = sqlx::query_as_with::<Postgres, AppTable, PgArguments>(&sql, args)
|
||||
.fetch_one(transaction as &mut DBTransaction<'_>)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?;
|
||||
|
||||
Ok(table)
|
||||
}
|
||||
|
||||
pub(crate) async fn update_app(
|
||||
transaction: &mut DBTransaction<'_>,
|
||||
app_id: Uuid,
|
||||
name: Option<String>,
|
||||
desc: Option<String>,
|
||||
color_style: Option<Vec<u8>>,
|
||||
) -> Result<(), ServerError> {
|
||||
let (sql, args) = SqlBuilder::update(APP_TABLE)
|
||||
.add_some_arg("name", name)
|
||||
.add_some_arg("color_style", color_style)
|
||||
.add_some_arg("description", desc)
|
||||
.add_some_arg("modified_time", Some(Utc::now()))
|
||||
.and_where_eq("id", app_id)
|
||||
.build()?;
|
||||
|
||||
sqlx::query_with(&sql, args)
|
||||
.execute(transaction)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(transaction), err)]
|
||||
pub(crate) async fn delete_app(transaction: &mut DBTransaction<'_>, app_id: Uuid) -> Result<(), ServerError> {
|
||||
let (sql, args) = SqlBuilder::delete(APP_TABLE).and_where_eq("id", app_id).build()?;
|
||||
let _ = sqlx::query_with(&sql, args)
|
||||
.execute(transaction)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?;
|
||||
|
||||
Ok(())
|
||||
}
|
@ -1,5 +0,0 @@
|
||||
#![allow(clippy::module_inception)]
|
||||
pub mod controller;
|
||||
pub mod router;
|
||||
|
||||
pub mod persistence;
|
@ -1,140 +0,0 @@
|
||||
use crate::util::sqlx_ext::SqlBuilder;
|
||||
use backend_service::errors::{invalid_params, ServerError};
|
||||
use chrono::{DateTime, NaiveDateTime, Utc};
|
||||
use flowy_folder_data_model::{
|
||||
parser::app::AppIdentify,
|
||||
protobuf::{App as AppPB, ColorStyle as ColorStylePB, RepeatedView as RepeatedViewPB},
|
||||
};
|
||||
use protobuf::Message;
|
||||
use sqlx::postgres::PgArguments;
|
||||
use uuid::Uuid;
|
||||
|
||||
pub(crate) const APP_TABLE: &str = "app_table";
|
||||
|
||||
pub struct NewAppSqlBuilder {
|
||||
table: AppTable,
|
||||
}
|
||||
|
||||
impl NewAppSqlBuilder {
|
||||
pub fn new(user_id: &str, workspace_id: &str) -> Self {
|
||||
let uuid = uuid::Uuid::new_v4();
|
||||
let time = Utc::now();
|
||||
|
||||
let table = AppTable {
|
||||
id: uuid,
|
||||
workspace_id: workspace_id.to_string(),
|
||||
name: "".to_string(),
|
||||
description: "".to_string(),
|
||||
color_style: default_color_style(),
|
||||
last_view_id: "".to_string(),
|
||||
modified_time: time,
|
||||
create_time: time,
|
||||
user_id: user_id.to_string(),
|
||||
};
|
||||
|
||||
Self { table }
|
||||
}
|
||||
|
||||
pub fn from_app(user_id: &str, app: AppPB) -> Result<Self, ServerError> {
|
||||
let app_id = check_app_id(app.id)?;
|
||||
let create_time = DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(app.create_time, 0), Utc);
|
||||
let modified_time = DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(app.modified_time, 0), Utc);
|
||||
|
||||
let table = AppTable {
|
||||
id: app_id,
|
||||
workspace_id: app.workspace_id,
|
||||
name: app.name,
|
||||
description: app.desc,
|
||||
color_style: default_color_style(),
|
||||
last_view_id: "".to_string(),
|
||||
modified_time,
|
||||
create_time,
|
||||
user_id: user_id.to_string(),
|
||||
};
|
||||
|
||||
Ok(Self { table })
|
||||
}
|
||||
|
||||
pub fn name(mut self, name: &str) -> Self {
|
||||
self.table.name = name.to_string();
|
||||
self
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn last_view_id(mut self, view_id: &str) -> Self {
|
||||
self.table.last_view_id = view_id.to_string();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn desc(mut self, desc: &str) -> Self {
|
||||
self.table.description = desc.to_owned();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn color_style(mut self, color_style: ColorStylePB) -> Self {
|
||||
self.table.color_style = color_style.write_to_bytes().unwrap_or_else(|_| default_color_style());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> Result<(String, PgArguments, AppPB), ServerError> {
|
||||
let app: AppPB = self.table.clone().into();
|
||||
|
||||
let (sql, args) = SqlBuilder::create(APP_TABLE)
|
||||
.add_field_with_arg("id", self.table.id)
|
||||
.add_field_with_arg("workspace_id", self.table.workspace_id)
|
||||
.add_field_with_arg("name", self.table.name)
|
||||
.add_field_with_arg("description", self.table.description)
|
||||
.add_field_with_arg("color_style", self.table.color_style)
|
||||
.add_field_with_arg("modified_time", self.table.modified_time)
|
||||
.add_field_with_arg("create_time", self.table.create_time)
|
||||
.add_field_with_arg("user_id", self.table.user_id)
|
||||
.build()?;
|
||||
|
||||
Ok((sql, args, app))
|
||||
}
|
||||
}
|
||||
|
||||
fn default_color_style() -> Vec<u8> {
|
||||
let style = ColorStylePB::default();
|
||||
match style.write_to_bytes() {
|
||||
Ok(bytes) => bytes,
|
||||
Err(e) => {
|
||||
log::error!("Serialize color style failed: {:?}", e);
|
||||
vec![]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn check_app_id(id: String) -> Result<Uuid, ServerError> {
|
||||
let app_id = AppIdentify::parse(id).map_err(invalid_params)?;
|
||||
let app_id = Uuid::parse_str(app_id.as_ref())?;
|
||||
Ok(app_id)
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, sqlx::FromRow)]
|
||||
pub struct AppTable {
|
||||
pub(crate) id: uuid::Uuid,
|
||||
pub(crate) workspace_id: String,
|
||||
pub(crate) name: String,
|
||||
pub(crate) description: String,
|
||||
pub(crate) color_style: Vec<u8>,
|
||||
pub(crate) last_view_id: String,
|
||||
pub(crate) modified_time: chrono::DateTime<Utc>,
|
||||
pub(crate) create_time: chrono::DateTime<Utc>,
|
||||
#[allow(dead_code)]
|
||||
pub(crate) user_id: String,
|
||||
}
|
||||
impl std::convert::From<AppTable> for AppPB {
|
||||
fn from(table: AppTable) -> Self {
|
||||
let mut app = AppPB::default();
|
||||
app.set_id(table.id.to_string());
|
||||
app.set_workspace_id(table.workspace_id.to_string());
|
||||
app.set_name(table.name.clone());
|
||||
app.set_desc(table.description.clone());
|
||||
app.set_belongings(RepeatedViewPB::default());
|
||||
app.set_modified_time(table.modified_time.timestamp());
|
||||
app.set_create_time(table.create_time.timestamp());
|
||||
|
||||
app
|
||||
}
|
||||
}
|
@ -1,114 +0,0 @@
|
||||
use crate::{
|
||||
entities::logged_user::LoggedUser,
|
||||
services::folder::app::{
|
||||
controller::{create_app, delete_app, read_app, update_app},
|
||||
persistence::check_app_id,
|
||||
},
|
||||
util::serde_ext::parse_from_payload,
|
||||
};
|
||||
use actix_web::{
|
||||
web::{Data, Payload},
|
||||
HttpResponse,
|
||||
};
|
||||
use anyhow::Context;
|
||||
use backend_service::{
|
||||
errors::{invalid_params, ServerError},
|
||||
response::FlowyResponse,
|
||||
};
|
||||
use flowy_folder_data_model::{
|
||||
parser::app::{AppDesc, AppName},
|
||||
protobuf::{AppId as AppIdPB, CreateAppParams as CreateAppParamsPB, UpdateAppParams as UpdateAppParamsPB},
|
||||
};
|
||||
use protobuf::Message;
|
||||
use sqlx::PgPool;
|
||||
|
||||
pub async fn create_handler(
|
||||
payload: Payload,
|
||||
pool: Data<PgPool>,
|
||||
logged_user: LoggedUser,
|
||||
) -> Result<HttpResponse, ServerError> {
|
||||
let params: CreateAppParamsPB = parse_from_payload(payload).await?;
|
||||
let mut transaction = pool
|
||||
.begin()
|
||||
.await
|
||||
.context("Failed to acquire a Postgres connection to create app")?;
|
||||
|
||||
let app = create_app(&mut transaction, params, logged_user).await?;
|
||||
|
||||
transaction
|
||||
.commit()
|
||||
.await
|
||||
.context("Failed to commit SQL transaction to create app.")?;
|
||||
|
||||
Ok(FlowyResponse::success().pb(app)?.into())
|
||||
}
|
||||
|
||||
pub async fn read_handler(payload: Payload, pool: Data<PgPool>, user: LoggedUser) -> Result<HttpResponse, ServerError> {
|
||||
let params: AppIdPB = parse_from_payload(payload).await?;
|
||||
let app_id = check_app_id(params.app_id)?;
|
||||
|
||||
let mut transaction = pool
|
||||
.begin()
|
||||
.await
|
||||
.context("Failed to acquire a Postgres connection to read app")?;
|
||||
let app = read_app(&mut transaction, app_id, &user).await?;
|
||||
transaction
|
||||
.commit()
|
||||
.await
|
||||
.context("Failed to commit SQL transaction to read app.")?;
|
||||
|
||||
Ok(FlowyResponse::success().pb(app)?.into())
|
||||
}
|
||||
|
||||
pub async fn update_handler(payload: Payload, pool: Data<PgPool>) -> Result<HttpResponse, ServerError> {
|
||||
let params: UpdateAppParamsPB = parse_from_payload(payload).await?;
|
||||
let app_id = check_app_id(params.get_app_id().to_string())?;
|
||||
let name = match params.has_name() {
|
||||
false => None,
|
||||
true => Some(AppName::parse(params.get_name().to_owned()).map_err(invalid_params)?.0),
|
||||
};
|
||||
|
||||
let color_style = match params.has_color_style() {
|
||||
false => None,
|
||||
true => {
|
||||
let color_bytes = params.get_color_style().write_to_bytes()?;
|
||||
Some(color_bytes)
|
||||
}
|
||||
};
|
||||
|
||||
let desc = match params.has_desc() {
|
||||
false => None,
|
||||
true => Some(AppDesc::parse(params.get_desc().to_owned()).map_err(invalid_params)?.0),
|
||||
};
|
||||
|
||||
let mut transaction = pool
|
||||
.begin()
|
||||
.await
|
||||
.context("Failed to acquire a Postgres connection to update app")?;
|
||||
|
||||
let _ = update_app(&mut transaction, app_id, name, desc, color_style).await?;
|
||||
|
||||
transaction
|
||||
.commit()
|
||||
.await
|
||||
.context("Failed to commit SQL transaction to update app.")?;
|
||||
Ok(FlowyResponse::success().into())
|
||||
}
|
||||
|
||||
pub async fn delete_handler(payload: Payload, pool: Data<PgPool>) -> Result<HttpResponse, ServerError> {
|
||||
let params: AppIdPB = parse_from_payload(payload).await?;
|
||||
let app_id = check_app_id(params.app_id.to_owned())?;
|
||||
let mut transaction = pool
|
||||
.begin()
|
||||
.await
|
||||
.context("Failed to acquire a Postgres connection to delete app")?;
|
||||
|
||||
let _ = delete_app(&mut transaction, app_id).await?;
|
||||
|
||||
transaction
|
||||
.commit()
|
||||
.await
|
||||
.context("Failed to commit SQL transaction to delete app.")?;
|
||||
|
||||
Ok(FlowyResponse::success().into())
|
||||
}
|
@ -1,6 +0,0 @@
|
||||
pub mod app;
|
||||
pub mod trash;
|
||||
pub mod view;
|
||||
pub mod workspace;
|
||||
pub(crate) mod ws_actor;
|
||||
pub(crate) mod ws_receiver;
|
@ -1,6 +0,0 @@
|
||||
#![allow(clippy::module_inception)]
|
||||
mod persistence;
|
||||
pub mod router;
|
||||
mod trash;
|
||||
|
||||
pub(crate) use trash::*;
|
@ -1,40 +0,0 @@
|
||||
use crate::services::folder::{app::persistence::AppTable, view::persistence::ViewTable};
|
||||
use flowy_folder_data_model::protobuf::{Trash, TrashType};
|
||||
|
||||
pub(crate) const TRASH_TABLE: &str = "trash_table";
|
||||
|
||||
#[derive(Debug, Clone, sqlx::FromRow)]
|
||||
pub struct TrashTable {
|
||||
pub(crate) id: uuid::Uuid,
|
||||
#[allow(dead_code)]
|
||||
pub(crate) user_id: String,
|
||||
pub(crate) ty: i32,
|
||||
}
|
||||
|
||||
impl std::convert::From<AppTable> for Trash {
|
||||
fn from(table: AppTable) -> Self {
|
||||
Trash {
|
||||
id: table.id.to_string(),
|
||||
name: table.name,
|
||||
modified_time: table.modified_time.timestamp(),
|
||||
create_time: table.create_time.timestamp(),
|
||||
ty: TrashType::App,
|
||||
unknown_fields: Default::default(),
|
||||
cached_size: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::convert::From<ViewTable> for Trash {
|
||||
fn from(table: ViewTable) -> Self {
|
||||
Trash {
|
||||
id: table.id.to_string(),
|
||||
name: table.name,
|
||||
modified_time: table.modified_time.timestamp(),
|
||||
create_time: table.create_time.timestamp(),
|
||||
ty: TrashType::View,
|
||||
unknown_fields: Default::default(),
|
||||
cached_size: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
@ -1,106 +0,0 @@
|
||||
use crate::{
|
||||
context::FlowyPersistence,
|
||||
entities::logged_user::LoggedUser,
|
||||
services::folder::trash::{create_trash, delete_all_trash, delete_trash, read_trash},
|
||||
util::serde_ext::parse_from_payload,
|
||||
};
|
||||
use ::protobuf::ProtobufEnum;
|
||||
use actix_web::{
|
||||
web::{Data, Payload},
|
||||
HttpResponse,
|
||||
};
|
||||
use anyhow::Context;
|
||||
use backend_service::{
|
||||
errors::{invalid_params, ServerError},
|
||||
response::FlowyResponse,
|
||||
};
|
||||
use flowy_folder_data_model::{parser::trash::TrashIdentify, protobuf::RepeatedTrashId};
|
||||
use sqlx::PgPool;
|
||||
use std::sync::Arc;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[tracing::instrument(skip(payload, pool, logged_user), err)]
|
||||
pub async fn create_handler(
|
||||
payload: Payload,
|
||||
pool: Data<PgPool>,
|
||||
logged_user: LoggedUser,
|
||||
) -> Result<HttpResponse, ServerError> {
|
||||
let params: RepeatedTrashId = parse_from_payload(payload).await?;
|
||||
let mut transaction = pool
|
||||
.begin()
|
||||
.await
|
||||
.context("Failed to acquire a Postgres connection to create trash")?;
|
||||
|
||||
let _ = create_trash(&mut transaction, make_records(params)?, logged_user).await?;
|
||||
|
||||
transaction
|
||||
.commit()
|
||||
.await
|
||||
.context("Failed to commit SQL transaction to create trash.")?;
|
||||
|
||||
Ok(FlowyResponse::success().into())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(payload, persistence, logged_user), fields(delete_trash), err)]
|
||||
pub async fn delete_handler(
|
||||
payload: Payload,
|
||||
persistence: Data<Arc<FlowyPersistence>>,
|
||||
logged_user: LoggedUser,
|
||||
) -> Result<HttpResponse, ServerError> {
|
||||
let pool = persistence.pg_pool();
|
||||
let kv_store = persistence.document_kv_store();
|
||||
let params: RepeatedTrashId = parse_from_payload(payload).await?;
|
||||
let mut transaction = pool
|
||||
.begin()
|
||||
.await
|
||||
.context("Failed to acquire a Postgres connection to delete trash")?;
|
||||
|
||||
if params.delete_all {
|
||||
tracing::Span::current().record("delete_trash", &"all");
|
||||
let _ = delete_all_trash(&mut transaction, &kv_store, &logged_user).await?;
|
||||
} else {
|
||||
let records = make_records(params)?;
|
||||
let _ = delete_trash(&mut transaction, &kv_store, records).await?;
|
||||
}
|
||||
|
||||
transaction
|
||||
.commit()
|
||||
.await
|
||||
.context("Failed to commit SQL transaction to delete trash.")?;
|
||||
|
||||
Ok(FlowyResponse::success().into())
|
||||
}
|
||||
|
||||
pub async fn read_handler(pool: Data<PgPool>, logged_user: LoggedUser) -> Result<HttpResponse, ServerError> {
|
||||
let mut transaction = pool
|
||||
.begin()
|
||||
.await
|
||||
.context("Failed to acquire a Postgres connection to read trash")?;
|
||||
|
||||
let repeated_trash = read_trash(&mut transaction, &logged_user).await?;
|
||||
|
||||
transaction
|
||||
.commit()
|
||||
.await
|
||||
.context("Failed to commit SQL transaction to read view.")?;
|
||||
|
||||
Ok(FlowyResponse::success().pb(repeated_trash)?.into())
|
||||
}
|
||||
|
||||
fn check_trash_id(id: String) -> Result<Uuid, ServerError> {
|
||||
let trash_id = TrashIdentify::parse(id).map_err(invalid_params)?;
|
||||
let trash_id = Uuid::parse_str(trash_id.as_ref())?;
|
||||
Ok(trash_id)
|
||||
}
|
||||
|
||||
fn make_records(identifiers: RepeatedTrashId) -> Result<Vec<(Uuid, i32)>, ServerError> {
|
||||
let mut records = vec![];
|
||||
for identifier in identifiers.items {
|
||||
// match TrashType::from_i32(identifier.ty.value()) {
|
||||
// None => {}
|
||||
// Some(ty) => {}
|
||||
// }
|
||||
records.push((check_trash_id(identifier.id.to_owned())?, identifier.ty.value()));
|
||||
}
|
||||
Ok(records)
|
||||
}
|
@ -1,180 +0,0 @@
|
||||
use crate::{
|
||||
context::DocumentRevisionKV,
|
||||
entities::logged_user::LoggedUser,
|
||||
services::folder::{
|
||||
app::controller::{delete_app, read_app_table},
|
||||
trash::persistence::{TrashTable, TRASH_TABLE},
|
||||
view::{delete_view, read_view_table},
|
||||
},
|
||||
util::sqlx_ext::{map_sqlx_error, DBTransaction, SqlBuilder},
|
||||
};
|
||||
use ::protobuf::ProtobufEnum;
|
||||
use backend_service::errors::ServerError;
|
||||
use flowy_folder_data_model::protobuf::{RepeatedTrash, Trash, TrashType};
|
||||
use sqlx::{postgres::PgArguments, Postgres, Row};
|
||||
use std::sync::Arc;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[tracing::instrument(skip(transaction, user), err)]
|
||||
pub(crate) async fn create_trash(
|
||||
transaction: &mut DBTransaction<'_>,
|
||||
records: Vec<(Uuid, i32)>,
|
||||
user: LoggedUser,
|
||||
) -> Result<(), ServerError> {
|
||||
for (trash_id, ty) in records {
|
||||
let (sql, args) = SqlBuilder::create(TRASH_TABLE)
|
||||
.add_field_with_arg("id", trash_id)
|
||||
.add_field_with_arg("user_id", &user.user_id)
|
||||
.add_field_with_arg("ty", ty)
|
||||
.build()?;
|
||||
|
||||
let _ = sqlx::query_with(&sql, args)
|
||||
.execute(transaction as &mut DBTransaction<'_>)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(transaction, document_store, user), fields(delete_rows), err)]
|
||||
pub(crate) async fn delete_all_trash(
|
||||
transaction: &mut DBTransaction<'_>,
|
||||
document_store: &Arc<DocumentRevisionKV>,
|
||||
user: &LoggedUser,
|
||||
) -> Result<(), ServerError> {
|
||||
let (sql, args) = SqlBuilder::select(TRASH_TABLE)
|
||||
.and_where_eq("user_id", &user.user_id)
|
||||
.build()?;
|
||||
let rows = sqlx::query_with(&sql, args)
|
||||
.fetch_all(transaction as &mut DBTransaction<'_>)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?
|
||||
.into_iter()
|
||||
.map(|row| (row.get("id"), row.get("ty")))
|
||||
.collect::<Vec<(Uuid, i32)>>();
|
||||
tracing::Span::current().record("delete_rows", &format!("{:?}", rows).as_str());
|
||||
let affected_row_count = rows.len();
|
||||
let _ = delete_trash_associate_targets(transaction as &mut DBTransaction<'_>, document_store, rows).await?;
|
||||
|
||||
let (sql, args) = SqlBuilder::delete(TRASH_TABLE)
|
||||
.and_where_eq("user_id", &user.user_id)
|
||||
.build()?;
|
||||
let result = sqlx::query_with(&sql, args)
|
||||
.execute(transaction as &mut DBTransaction<'_>)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?;
|
||||
tracing::Span::current().record("affected_row", &result.rows_affected());
|
||||
debug_assert_eq!(affected_row_count as u64, result.rows_affected());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(transaction, document_store), err)]
|
||||
pub(crate) async fn delete_trash(
|
||||
transaction: &mut DBTransaction<'_>,
|
||||
document_store: &Arc<DocumentRevisionKV>,
|
||||
records: Vec<(Uuid, i32)>,
|
||||
) -> Result<(), ServerError> {
|
||||
for (trash_id, _) in records {
|
||||
// Read the trash_table and delete the original table according to the TrashType
|
||||
let (sql, args) = SqlBuilder::select(TRASH_TABLE)
|
||||
.add_field("*")
|
||||
.and_where_eq("id", trash_id)
|
||||
.build()?;
|
||||
|
||||
let trash_table = sqlx::query_as_with::<Postgres, TrashTable, PgArguments>(&sql, args)
|
||||
.fetch_one(transaction as &mut DBTransaction<'_>)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?;
|
||||
|
||||
let _ = delete_trash_associate_targets(
|
||||
transaction as &mut DBTransaction<'_>,
|
||||
document_store,
|
||||
vec![(trash_table.id, trash_table.ty)],
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Delete the trash table
|
||||
let (sql, args) = SqlBuilder::delete(TRASH_TABLE).and_where_eq("id", &trash_id).build()?;
|
||||
let _ = sqlx::query_with(&sql, args)
|
||||
.execute(transaction as &mut DBTransaction<'_>)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(transaction, document_store, targets), err)]
|
||||
async fn delete_trash_associate_targets(
|
||||
transaction: &mut DBTransaction<'_>,
|
||||
document_store: &Arc<DocumentRevisionKV>,
|
||||
targets: Vec<(Uuid, i32)>,
|
||||
) -> Result<(), ServerError> {
|
||||
for (id, ty) in targets {
|
||||
match TrashType::from_i32(ty) {
|
||||
None => log::error!("Parser trash type with value: {} failed", ty),
|
||||
Some(ty) => match ty {
|
||||
TrashType::Unknown => {}
|
||||
TrashType::View => {
|
||||
let _ = delete_view(transaction as &mut DBTransaction<'_>, document_store, vec![id]).await;
|
||||
}
|
||||
TrashType::App => {
|
||||
let _ = delete_app(transaction as &mut DBTransaction<'_>, id).await;
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn read_trash_ids(
|
||||
user: &LoggedUser,
|
||||
transaction: &mut DBTransaction<'_>,
|
||||
) -> Result<Vec<String>, ServerError> {
|
||||
let repeated_trash = read_trash(transaction, user).await?.take_items().into_vec();
|
||||
let ids = repeated_trash
|
||||
.into_iter()
|
||||
.map(|trash| trash.id)
|
||||
.collect::<Vec<String>>();
|
||||
|
||||
Ok(ids)
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(transaction, user), err)]
|
||||
pub(crate) async fn read_trash(
|
||||
transaction: &mut DBTransaction<'_>,
|
||||
user: &LoggedUser,
|
||||
) -> Result<RepeatedTrash, ServerError> {
|
||||
let (sql, args) = SqlBuilder::select(TRASH_TABLE)
|
||||
.add_field("*")
|
||||
.and_where_eq("user_id", &user.user_id)
|
||||
.build()?;
|
||||
|
||||
let tables = sqlx::query_as_with::<Postgres, TrashTable, PgArguments>(&sql, args)
|
||||
.fetch_all(transaction as &mut DBTransaction<'_>)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?;
|
||||
|
||||
let mut trash: Vec<Trash> = vec![];
|
||||
for table in tables {
|
||||
match TrashType::from_i32(table.ty) {
|
||||
None => log::error!("Parser trash type with value: {} failed", table.ty),
|
||||
Some(ty) => match ty {
|
||||
TrashType::Unknown => {}
|
||||
TrashType::View => {
|
||||
trash.push(read_view_table(table.id, transaction).await?.into());
|
||||
}
|
||||
TrashType::App => {
|
||||
trash.push(read_app_table(table.id, transaction).await?.into());
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
let mut repeated_trash = RepeatedTrash::default();
|
||||
repeated_trash.set_items(trash.into());
|
||||
|
||||
Ok(repeated_trash)
|
||||
}
|
@ -1,170 +0,0 @@
|
||||
use crate::{
|
||||
entities::logged_user::LoggedUser,
|
||||
services::{
|
||||
document::persistence::{create_document, delete_document},
|
||||
folder::{trash::read_trash_ids, view::persistence::*},
|
||||
},
|
||||
util::sqlx_ext::{map_sqlx_error, DBTransaction, SqlBuilder},
|
||||
};
|
||||
use backend_service::errors::{invalid_params, ServerError};
|
||||
|
||||
use crate::context::DocumentRevisionKV;
|
||||
use chrono::Utc;
|
||||
use flowy_collaboration::{
|
||||
client_document::default::initial_delta,
|
||||
entities::revision::{RepeatedRevision, Revision},
|
||||
protobuf::CreateDocParams as CreateDocParamsPB,
|
||||
};
|
||||
use flowy_folder_data_model::{
|
||||
parser::{
|
||||
app::AppIdentify,
|
||||
view::{ViewDesc, ViewName, ViewThumbnail},
|
||||
},
|
||||
protobuf::{CreateViewParams as CreateViewParamsPB, RepeatedView as RepeatedViewPB, View as ViewPB},
|
||||
};
|
||||
use sqlx::{postgres::PgArguments, Postgres};
|
||||
use std::{convert::TryInto, sync::Arc};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub(crate) async fn update_view(
|
||||
transaction: &mut DBTransaction<'_>,
|
||||
view_id: Uuid,
|
||||
name: Option<String>,
|
||||
desc: Option<String>,
|
||||
thumbnail: Option<String>,
|
||||
) -> Result<(), ServerError> {
|
||||
let (sql, args) = SqlBuilder::update(VIEW_TABLE)
|
||||
.add_some_arg("name", name)
|
||||
.add_some_arg("description", desc)
|
||||
.add_some_arg("thumbnail", thumbnail)
|
||||
.add_some_arg("modified_time", Some(Utc::now()))
|
||||
.and_where_eq("id", view_id)
|
||||
.build()?;
|
||||
|
||||
sqlx::query_with(&sql, args)
|
||||
.execute(transaction)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(transaction, document_store), err)]
|
||||
pub(crate) async fn delete_view(
|
||||
transaction: &mut DBTransaction<'_>,
|
||||
document_store: &Arc<DocumentRevisionKV>,
|
||||
view_ids: Vec<Uuid>,
|
||||
) -> Result<(), ServerError> {
|
||||
for view_id in view_ids {
|
||||
let (sql, args) = SqlBuilder::delete(VIEW_TABLE).and_where_eq("id", &view_id).build()?;
|
||||
let _ = sqlx::query_with(&sql, args)
|
||||
.execute(transaction as &mut DBTransaction<'_>)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?;
|
||||
|
||||
let _ = delete_document(document_store, view_id).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "create_view", level = "debug", skip(transaction, document_store), err)]
|
||||
pub(crate) async fn create_view(
|
||||
transaction: &mut DBTransaction<'_>,
|
||||
document_store: Arc<DocumentRevisionKV>,
|
||||
params: CreateViewParamsPB,
|
||||
user_id: &str,
|
||||
) -> Result<ViewPB, ServerError> {
|
||||
let view_id = check_view_id(params.view_id.clone())?;
|
||||
let name = ViewName::parse(params.name).map_err(invalid_params)?;
|
||||
let belong_to_id = AppIdentify::parse(params.belong_to_id).map_err(invalid_params)?;
|
||||
let thumbnail = ViewThumbnail::parse(params.thumbnail).map_err(invalid_params)?;
|
||||
let desc = ViewDesc::parse(params.desc).map_err(invalid_params)?;
|
||||
|
||||
let (sql, args, view) = NewViewSqlBuilder::new(view_id, belong_to_id.as_ref())
|
||||
.name(name.as_ref())
|
||||
.desc(desc.as_ref())
|
||||
.thumbnail(thumbnail.as_ref())
|
||||
.view_type(params.view_type)
|
||||
.build()?;
|
||||
|
||||
let _ = sqlx::query_with(&sql, args)
|
||||
.execute(transaction as &mut DBTransaction<'_>)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?;
|
||||
|
||||
let initial_delta_data = initial_delta().to_bytes();
|
||||
let md5 = format!("{:x}", md5::compute(&initial_delta_data));
|
||||
let revision = Revision::new(&view.id, 0, 0, initial_delta_data, user_id, md5);
|
||||
let repeated_revision = RepeatedRevision::new(vec![revision]);
|
||||
let mut create_doc_params = CreateDocParamsPB::new();
|
||||
create_doc_params.set_revisions(repeated_revision.try_into().unwrap());
|
||||
create_doc_params.set_id(view.id.clone());
|
||||
let _ = create_document(&document_store, create_doc_params).await?;
|
||||
|
||||
Ok(view)
|
||||
}
|
||||
|
||||
pub(crate) async fn read_view(
|
||||
user: &LoggedUser,
|
||||
view_id: Uuid,
|
||||
transaction: &mut DBTransaction<'_>,
|
||||
) -> Result<ViewPB, ServerError> {
|
||||
let table = read_view_table(view_id, transaction as &mut DBTransaction<'_>).await?;
|
||||
|
||||
let read_trash_ids = read_trash_ids(user, transaction).await?;
|
||||
if read_trash_ids.contains(&table.id.to_string()) {
|
||||
return Err(ServerError::record_not_found());
|
||||
}
|
||||
|
||||
let mut views = RepeatedViewPB::default();
|
||||
views.set_items(
|
||||
read_view_belong_to_id(&table.id.to_string(), user, transaction)
|
||||
.await?
|
||||
.into(),
|
||||
);
|
||||
let mut view: ViewPB = table.into();
|
||||
view.set_belongings(views);
|
||||
Ok(view)
|
||||
}
|
||||
|
||||
pub(crate) async fn read_view_table(
|
||||
view_id: Uuid,
|
||||
transaction: &mut DBTransaction<'_>,
|
||||
) -> Result<ViewTable, ServerError> {
|
||||
let (sql, args) = SqlBuilder::select(VIEW_TABLE)
|
||||
.add_field("*")
|
||||
.and_where_eq("id", view_id)
|
||||
.build()?;
|
||||
|
||||
let table = sqlx::query_as_with::<Postgres, ViewTable, PgArguments>(&sql, args)
|
||||
.fetch_one(transaction as &mut DBTransaction<'_>)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?;
|
||||
|
||||
Ok(table)
|
||||
}
|
||||
|
||||
// transaction must be commit from caller
|
||||
pub(crate) async fn read_view_belong_to_id<'c>(
|
||||
id: &str,
|
||||
user: &LoggedUser,
|
||||
transaction: &mut DBTransaction<'_>,
|
||||
) -> Result<Vec<ViewPB>, ServerError> {
|
||||
// TODO: add index for app_table
|
||||
let (sql, args) = SqlBuilder::select(VIEW_TABLE)
|
||||
.add_field("*")
|
||||
.and_where_eq("belong_to_id", id)
|
||||
.build()?;
|
||||
|
||||
let mut tables = sqlx::query_as_with::<Postgres, ViewTable, PgArguments>(&sql, args)
|
||||
.fetch_all(transaction as &mut DBTransaction<'_>)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?;
|
||||
|
||||
let read_trash_ids = read_trash_ids(user, transaction).await?;
|
||||
tables.retain(|table| !read_trash_ids.contains(&table.id.to_string()));
|
||||
|
||||
let views = tables.into_iter().map(|table| table.into()).collect::<Vec<ViewPB>>();
|
||||
|
||||
Ok(views)
|
||||
}
|
@ -1,6 +0,0 @@
|
||||
#![allow(clippy::module_inception)]
|
||||
mod controller;
|
||||
pub mod persistence;
|
||||
pub mod router;
|
||||
|
||||
pub(crate) use controller::*;
|
@ -1,134 +0,0 @@
|
||||
use crate::util::sqlx_ext::SqlBuilder;
|
||||
use backend_service::errors::{invalid_params, ServerError};
|
||||
use chrono::{DateTime, NaiveDateTime, Utc};
|
||||
use flowy_folder_data_model::{
|
||||
parser::view::ViewIdentify,
|
||||
protobuf::{RepeatedView as RepeatedViewPB, View as ViewPB, ViewType as ViewTypePB},
|
||||
};
|
||||
use protobuf::ProtobufEnum;
|
||||
use sqlx::postgres::PgArguments;
|
||||
use uuid::Uuid;
|
||||
|
||||
pub(crate) const VIEW_TABLE: &str = "view_table";
|
||||
|
||||
pub struct NewViewSqlBuilder {
|
||||
table: ViewTable,
|
||||
}
|
||||
|
||||
impl NewViewSqlBuilder {
|
||||
pub fn new(view_id: Uuid, belong_to_id: &str) -> Self {
|
||||
let time = Utc::now();
|
||||
|
||||
let table = ViewTable {
|
||||
id: view_id,
|
||||
belong_to_id: belong_to_id.to_string(),
|
||||
name: "".to_string(),
|
||||
description: "".to_string(),
|
||||
modified_time: time,
|
||||
create_time: time,
|
||||
thumbnail: "".to_string(),
|
||||
view_type: ViewTypePB::Doc.value(),
|
||||
};
|
||||
|
||||
Self { table }
|
||||
}
|
||||
|
||||
pub fn from_view(view: ViewPB) -> Result<Self, ServerError> {
|
||||
let view_id = ViewIdentify::parse(view.id).map_err(invalid_params)?;
|
||||
let view_id = Uuid::parse_str(view_id.as_ref())?;
|
||||
let create_time = DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(view.create_time, 0), Utc);
|
||||
let modified_time = DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(view.modified_time, 0), Utc);
|
||||
|
||||
let table = ViewTable {
|
||||
id: view_id,
|
||||
belong_to_id: view.belong_to_id,
|
||||
name: view.name,
|
||||
description: view.desc,
|
||||
modified_time,
|
||||
create_time,
|
||||
thumbnail: "".to_string(),
|
||||
view_type: view.view_type.value(),
|
||||
};
|
||||
Ok(Self { table })
|
||||
}
|
||||
|
||||
pub fn name(mut self, name: &str) -> Self {
|
||||
self.table.name = name.to_string();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn desc(mut self, desc: &str) -> Self {
|
||||
self.table.description = desc.to_owned();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn thumbnail(mut self, thumbnail: &str) -> Self {
|
||||
self.table.thumbnail = thumbnail.to_owned();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn view_type(mut self, view_type: ViewTypePB) -> Self {
|
||||
self.table.view_type = view_type.value();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> Result<(String, PgArguments, ViewPB), ServerError> {
|
||||
let view: ViewPB = self.table.clone().into();
|
||||
|
||||
let (sql, args) = SqlBuilder::create(VIEW_TABLE)
|
||||
.add_field_with_arg("id", self.table.id)
|
||||
.add_field_with_arg("belong_to_id", self.table.belong_to_id)
|
||||
.add_field_with_arg("name", self.table.name)
|
||||
.add_field_with_arg("description", self.table.description)
|
||||
.add_field_with_arg("modified_time", self.table.modified_time)
|
||||
.add_field_with_arg("create_time", self.table.create_time)
|
||||
.add_field_with_arg("thumbnail", self.table.thumbnail)
|
||||
.add_field_with_arg("view_type", self.table.view_type)
|
||||
.build()?;
|
||||
|
||||
Ok((sql, args, view))
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn check_view_ids(ids: Vec<String>) -> Result<Vec<Uuid>, ServerError> {
|
||||
let mut view_ids = vec![];
|
||||
for id in ids {
|
||||
view_ids.push(check_view_id(id)?);
|
||||
}
|
||||
Ok(view_ids)
|
||||
}
|
||||
|
||||
pub(crate) fn check_view_id(id: String) -> Result<Uuid, ServerError> {
|
||||
let view_id = ViewIdentify::parse(id).map_err(invalid_params)?;
|
||||
let view_id = Uuid::parse_str(view_id.as_ref())?;
|
||||
Ok(view_id)
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, sqlx::FromRow)]
|
||||
pub struct ViewTable {
|
||||
pub(crate) id: uuid::Uuid,
|
||||
pub(crate) belong_to_id: String,
|
||||
pub(crate) name: String,
|
||||
pub(crate) description: String,
|
||||
pub(crate) modified_time: chrono::DateTime<Utc>,
|
||||
pub(crate) create_time: chrono::DateTime<Utc>,
|
||||
pub(crate) thumbnail: String,
|
||||
pub(crate) view_type: i32,
|
||||
}
|
||||
impl std::convert::From<ViewTable> for ViewPB {
|
||||
fn from(table: ViewTable) -> Self {
|
||||
let view_type = ViewTypePB::from_i32(table.view_type).unwrap_or(ViewTypePB::Doc);
|
||||
|
||||
let mut view = ViewPB::default();
|
||||
view.set_id(table.id.to_string());
|
||||
view.set_belong_to_id(table.belong_to_id);
|
||||
view.set_name(table.name);
|
||||
view.set_desc(table.description);
|
||||
view.set_view_type(view_type);
|
||||
view.set_belongings(RepeatedViewPB::default());
|
||||
view.set_create_time(table.create_time.timestamp());
|
||||
view.set_modified_time(table.modified_time.timestamp());
|
||||
|
||||
view
|
||||
}
|
||||
}
|
@ -1,128 +0,0 @@
|
||||
use crate::{
|
||||
context::FlowyPersistence,
|
||||
entities::logged_user::LoggedUser,
|
||||
services::folder::view::{
|
||||
create_view, delete_view,
|
||||
persistence::{check_view_id, check_view_ids},
|
||||
read_view, update_view,
|
||||
},
|
||||
util::serde_ext::parse_from_payload,
|
||||
};
|
||||
use actix_web::{
|
||||
web::{Data, Payload},
|
||||
HttpResponse,
|
||||
};
|
||||
use anyhow::Context;
|
||||
use backend_service::{
|
||||
errors::{invalid_params, ServerError},
|
||||
response::FlowyResponse,
|
||||
};
|
||||
use flowy_folder_data_model::{
|
||||
parser::view::{ViewDesc, ViewName, ViewThumbnail},
|
||||
protobuf::{
|
||||
CreateViewParams as CreateViewParamsPB, QueryViewRequest as QueryViewRequestPB,
|
||||
UpdateViewParams as UpdateViewParamsPB, ViewId as ViewIdPB,
|
||||
},
|
||||
};
|
||||
use sqlx::PgPool;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub async fn create_handler(
|
||||
payload: Payload,
|
||||
persistence: Data<Arc<FlowyPersistence>>,
|
||||
user: LoggedUser,
|
||||
) -> Result<HttpResponse, ServerError> {
|
||||
let params: CreateViewParamsPB = parse_from_payload(payload).await?;
|
||||
let kv_store = persistence.document_kv_store();
|
||||
let pool = persistence.pg_pool();
|
||||
let mut transaction = pool
|
||||
.begin()
|
||||
.await
|
||||
.context("Failed to acquire a Postgres connection to create view")?;
|
||||
|
||||
let view = create_view(&mut transaction, kv_store, params, &user.user_id).await?;
|
||||
transaction
|
||||
.commit()
|
||||
.await
|
||||
.context("Failed to commit SQL transaction to create view.")?;
|
||||
|
||||
let resp = FlowyResponse::success().pb(view)?;
|
||||
Ok(resp.into())
|
||||
}
|
||||
|
||||
pub async fn read_handler(payload: Payload, pool: Data<PgPool>, user: LoggedUser) -> Result<HttpResponse, ServerError> {
|
||||
let params: ViewIdPB = parse_from_payload(payload).await?;
|
||||
let view_id = check_view_ids(vec![params.view_id])?.pop().unwrap();
|
||||
let mut transaction = pool
|
||||
.begin()
|
||||
.await
|
||||
.context("Failed to acquire a Postgres connection to read view")?;
|
||||
let view = read_view(&user, view_id, &mut transaction).await?;
|
||||
|
||||
transaction
|
||||
.commit()
|
||||
.await
|
||||
.context("Failed to commit SQL transaction to read view.")?;
|
||||
|
||||
Ok(FlowyResponse::success().pb(view)?.into())
|
||||
}
|
||||
|
||||
pub async fn update_handler(payload: Payload, pool: Data<PgPool>) -> Result<HttpResponse, ServerError> {
|
||||
let params: UpdateViewParamsPB = parse_from_payload(payload).await?;
|
||||
let view_id = check_view_id(params.view_id.clone())?;
|
||||
let name = match params.has_name() {
|
||||
false => None,
|
||||
true => Some(ViewName::parse(params.get_name().to_owned()).map_err(invalid_params)?.0),
|
||||
};
|
||||
|
||||
let desc = match params.has_desc() {
|
||||
false => None,
|
||||
true => Some(ViewDesc::parse(params.get_desc().to_owned()).map_err(invalid_params)?.0),
|
||||
};
|
||||
|
||||
let thumbnail = match params.has_thumbnail() {
|
||||
false => None,
|
||||
true => Some(
|
||||
ViewThumbnail::parse(params.get_thumbnail().to_owned())
|
||||
.map_err(invalid_params)?
|
||||
.0,
|
||||
),
|
||||
};
|
||||
|
||||
let mut transaction = pool
|
||||
.begin()
|
||||
.await
|
||||
.context("Failed to acquire a Postgres connection to update app")?;
|
||||
|
||||
let _ = update_view(&mut transaction, view_id, name, desc, thumbnail).await?;
|
||||
|
||||
transaction
|
||||
.commit()
|
||||
.await
|
||||
.context("Failed to commit SQL transaction to update view.")?;
|
||||
|
||||
Ok(FlowyResponse::success().into())
|
||||
}
|
||||
|
||||
pub async fn delete_handler(
|
||||
payload: Payload,
|
||||
persistence: Data<Arc<FlowyPersistence>>,
|
||||
) -> Result<HttpResponse, ServerError> {
|
||||
let params: QueryViewRequestPB = parse_from_payload(payload).await?;
|
||||
let pool = persistence.pg_pool();
|
||||
let kv_store = persistence.document_kv_store();
|
||||
let view_ids = check_view_ids(params.view_ids.to_vec())?;
|
||||
let mut transaction = pool
|
||||
.begin()
|
||||
.await
|
||||
.context("Failed to acquire a Postgres connection to delete view")?;
|
||||
|
||||
let _ = delete_view(&mut transaction, &kv_store, view_ids).await?;
|
||||
|
||||
transaction
|
||||
.commit()
|
||||
.await
|
||||
.context("Failed to commit SQL transaction to delete view.")?;
|
||||
|
||||
Ok(FlowyResponse::success().into())
|
||||
}
|
@ -1,144 +0,0 @@
|
||||
use super::persistence::NewWorkspaceBuilder;
|
||||
use crate::{
|
||||
entities::logged_user::LoggedUser,
|
||||
services::folder::{
|
||||
app::{controller::read_app, persistence::AppTable},
|
||||
workspace::persistence::*,
|
||||
},
|
||||
util::sqlx_ext::*,
|
||||
};
|
||||
use anyhow::Context;
|
||||
use backend_service::errors::{invalid_params, ServerError};
|
||||
use flowy_folder_data_model::{
|
||||
parser::workspace::WorkspaceIdentify,
|
||||
protobuf::{RepeatedApp as RepeatedAppPB, RepeatedWorkspace as RepeatedWorkspacePB, Workspace as WorkspacePB},
|
||||
};
|
||||
use sqlx::{postgres::PgArguments, Postgres};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub(crate) async fn create_workspace(
|
||||
transaction: &mut DBTransaction<'_>,
|
||||
name: &str,
|
||||
desc: &str,
|
||||
logged_user: LoggedUser,
|
||||
) -> Result<WorkspacePB, ServerError> {
|
||||
let user_id = logged_user.as_uuid()?.to_string();
|
||||
let (sql, args, workspace) = NewWorkspaceBuilder::new(&user_id).name(name).desc(desc).build()?;
|
||||
|
||||
let _ = sqlx::query_with(&sql, args)
|
||||
.execute(transaction)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?;
|
||||
|
||||
Ok(workspace)
|
||||
}
|
||||
|
||||
pub(crate) async fn update_workspace(
|
||||
transaction: &mut DBTransaction<'_>,
|
||||
workspace_id: Uuid,
|
||||
name: Option<String>,
|
||||
desc: Option<String>,
|
||||
) -> Result<(), ServerError> {
|
||||
let (sql, args) = SqlBuilder::update(WORKSPACE_TABLE)
|
||||
.add_some_arg("name", name)
|
||||
.add_some_arg("description", desc)
|
||||
.and_where_eq("id", workspace_id)
|
||||
.build()?;
|
||||
|
||||
sqlx::query_with(&sql, args)
|
||||
.execute(transaction)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn delete_workspace(
|
||||
transaction: &mut DBTransaction<'_>,
|
||||
workspace_id: Uuid,
|
||||
) -> Result<(), ServerError> {
|
||||
let (sql, args) = SqlBuilder::delete(WORKSPACE_TABLE)
|
||||
.and_where_eq("id", workspace_id)
|
||||
.build()?;
|
||||
|
||||
let _ = sqlx::query_with(&sql, args)
|
||||
.execute(transaction)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(transaction, logged_user), err)]
|
||||
pub async fn read_workspaces(
|
||||
transaction: &mut DBTransaction<'_>,
|
||||
workspace_id: Option<String>,
|
||||
logged_user: LoggedUser,
|
||||
) -> Result<RepeatedWorkspacePB, ServerError> {
|
||||
let user_id = logged_user.as_uuid()?.to_string();
|
||||
|
||||
let mut builder = SqlBuilder::select(WORKSPACE_TABLE)
|
||||
.add_field("*")
|
||||
.and_where_eq("user_id", &user_id);
|
||||
|
||||
if let Some(workspace_id) = workspace_id {
|
||||
let workspace_id = check_workspace_id(workspace_id)?;
|
||||
builder = builder.and_where_eq("id", workspace_id);
|
||||
}
|
||||
|
||||
let (sql, args) = builder.build()?;
|
||||
let tables = sqlx::query_as_with::<Postgres, WorkspaceTable, PgArguments>(&sql, args)
|
||||
.fetch_all(transaction as &mut DBTransaction<'_>)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?;
|
||||
|
||||
let mut repeated_workspace = RepeatedWorkspacePB::default();
|
||||
let mut workspaces = vec![];
|
||||
// Opti: combine the query
|
||||
for table in tables {
|
||||
let apps = read_workspace_apps(
|
||||
&logged_user,
|
||||
transaction as &mut DBTransaction<'_>,
|
||||
&table.id.to_string(),
|
||||
)
|
||||
.await
|
||||
.context("Get workspace app")
|
||||
.unwrap_or_default();
|
||||
|
||||
let mut workspace: WorkspacePB = table.into();
|
||||
workspace.set_apps(apps);
|
||||
workspaces.push(workspace);
|
||||
}
|
||||
|
||||
repeated_workspace.set_items(workspaces.into());
|
||||
Ok(repeated_workspace)
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(transaction, user), fields(app_count), err)]
|
||||
async fn read_workspace_apps<'c>(
|
||||
user: &LoggedUser,
|
||||
transaction: &mut DBTransaction<'_>,
|
||||
workspace_id: &str,
|
||||
) -> Result<RepeatedAppPB, ServerError> {
|
||||
let workspace_id = WorkspaceIdentify::parse(workspace_id.to_owned()).map_err(invalid_params)?;
|
||||
let (sql, args) = SqlBuilder::select("app_table")
|
||||
.add_field("*")
|
||||
.and_where_eq("workspace_id", workspace_id.0)
|
||||
.build()?;
|
||||
|
||||
let app_tables = sqlx::query_as_with::<Postgres, AppTable, PgArguments>(&sql, args)
|
||||
.fetch_all(transaction as &mut DBTransaction<'_>)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?;
|
||||
|
||||
tracing::Span::current().record("app_count", &app_tables.len());
|
||||
let mut apps = vec![];
|
||||
for table in app_tables {
|
||||
let app = read_app(transaction, table.id, user).await?;
|
||||
apps.push(app);
|
||||
}
|
||||
|
||||
let mut repeated_app = RepeatedAppPB::default();
|
||||
repeated_app.set_items(apps.into());
|
||||
Ok(repeated_app)
|
||||
}
|
@ -1,6 +0,0 @@
|
||||
#![allow(clippy::module_inception)]
|
||||
mod controller;
|
||||
pub mod persistence;
|
||||
pub mod router;
|
||||
|
||||
pub use controller::*;
|
@ -1,98 +0,0 @@
|
||||
use crate::util::sqlx_ext::SqlBuilder;
|
||||
use backend_service::errors::{invalid_params, ServerError};
|
||||
use chrono::{DateTime, NaiveDateTime, Utc};
|
||||
use flowy_folder_data_model::{parser::workspace::WorkspaceIdentify, protobuf::Workspace as WorkspacePB};
|
||||
use sqlx::postgres::PgArguments;
|
||||
use uuid::Uuid;
|
||||
|
||||
pub struct NewWorkspaceBuilder {
|
||||
table: WorkspaceTable,
|
||||
}
|
||||
|
||||
impl NewWorkspaceBuilder {
|
||||
pub fn new(user_id: &str) -> Self {
|
||||
let uuid = uuid::Uuid::new_v4();
|
||||
let time = Utc::now();
|
||||
|
||||
let table = WorkspaceTable {
|
||||
id: uuid,
|
||||
name: "".to_string(),
|
||||
description: "".to_string(),
|
||||
modified_time: time,
|
||||
create_time: time,
|
||||
user_id: user_id.to_string(),
|
||||
};
|
||||
Self { table }
|
||||
}
|
||||
|
||||
pub fn from_workspace(user_id: &str, workspace: WorkspacePB) -> Result<Self, ServerError> {
|
||||
let workspace_id = check_workspace_id(workspace.id)?;
|
||||
let create_time = DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(workspace.create_time, 0), Utc);
|
||||
let modified_time = DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(workspace.modified_time, 0), Utc);
|
||||
|
||||
let table = WorkspaceTable {
|
||||
id: workspace_id,
|
||||
name: workspace.name,
|
||||
description: workspace.desc,
|
||||
modified_time,
|
||||
create_time,
|
||||
user_id: user_id.to_string(),
|
||||
};
|
||||
|
||||
Ok(Self { table })
|
||||
}
|
||||
|
||||
pub fn name(mut self, name: &str) -> Self {
|
||||
self.table.name = name.to_string();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn desc(mut self, desc: &str) -> Self {
|
||||
self.table.description = desc.to_owned();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> Result<(String, PgArguments, WorkspacePB), ServerError> {
|
||||
let workspace: WorkspacePB = self.table.clone().into();
|
||||
// TODO: use macro to fetch each field from struct
|
||||
let (sql, args) = SqlBuilder::create(WORKSPACE_TABLE)
|
||||
.add_field_with_arg("id", self.table.id)
|
||||
.add_field_with_arg("name", self.table.name)
|
||||
.add_field_with_arg("description", self.table.description)
|
||||
.add_field_with_arg("modified_time", self.table.modified_time)
|
||||
.add_field_with_arg("create_time", self.table.create_time)
|
||||
.add_field_with_arg("user_id", self.table.user_id)
|
||||
.build()?;
|
||||
|
||||
Ok((sql, args, workspace))
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn check_workspace_id(id: String) -> Result<Uuid, ServerError> {
|
||||
let workspace_id = WorkspaceIdentify::parse(id).map_err(invalid_params)?;
|
||||
let workspace_id = Uuid::parse_str(workspace_id.as_ref())?;
|
||||
Ok(workspace_id)
|
||||
}
|
||||
|
||||
pub(crate) const WORKSPACE_TABLE: &str = "workspace_table";
|
||||
|
||||
#[derive(Debug, Clone, sqlx::FromRow)]
|
||||
pub struct WorkspaceTable {
|
||||
pub(crate) id: uuid::Uuid,
|
||||
pub(crate) name: String,
|
||||
pub(crate) description: String,
|
||||
pub(crate) modified_time: chrono::DateTime<Utc>,
|
||||
pub(crate) create_time: chrono::DateTime<Utc>,
|
||||
pub(crate) user_id: String,
|
||||
}
|
||||
impl std::convert::From<WorkspaceTable> for WorkspacePB {
|
||||
fn from(table: WorkspaceTable) -> Self {
|
||||
let mut workspace = WorkspacePB::default();
|
||||
workspace.set_id(table.id.to_string());
|
||||
workspace.set_name(table.name.clone());
|
||||
workspace.set_desc(table.description.clone());
|
||||
workspace.set_modified_time(table.modified_time.timestamp());
|
||||
workspace.set_create_time(table.create_time.timestamp());
|
||||
workspace
|
||||
}
|
||||
}
|
@ -1,149 +0,0 @@
|
||||
use crate::{
|
||||
entities::logged_user::LoggedUser,
|
||||
services::folder::workspace::{
|
||||
create_workspace, delete_workspace, persistence::check_workspace_id, read_workspaces, update_workspace,
|
||||
},
|
||||
util::serde_ext::parse_from_payload,
|
||||
};
|
||||
use actix_web::{
|
||||
web::{Data, Payload},
|
||||
HttpResponse,
|
||||
};
|
||||
use anyhow::Context;
|
||||
use backend_service::{
|
||||
errors::{invalid_params, ServerError},
|
||||
response::FlowyResponse,
|
||||
};
|
||||
use flowy_folder_data_model::{
|
||||
parser::workspace::{WorkspaceDesc, WorkspaceName},
|
||||
protobuf::{
|
||||
CreateWorkspaceParams as CreateWorkspaceParamsPB, UpdateWorkspaceParams as UpdateWorkspaceParamsPB,
|
||||
WorkspaceId as WorkspaceIdPB,
|
||||
},
|
||||
};
|
||||
use sqlx::PgPool;
|
||||
|
||||
pub async fn create_handler(
|
||||
payload: Payload,
|
||||
pool: Data<PgPool>,
|
||||
logged_user: LoggedUser,
|
||||
) -> Result<HttpResponse, ServerError> {
|
||||
let params: CreateWorkspaceParamsPB = parse_from_payload(payload).await?;
|
||||
let name = WorkspaceName::parse(params.get_name().to_owned()).map_err(invalid_params)?;
|
||||
let desc = WorkspaceDesc::parse(params.get_desc().to_owned()).map_err(invalid_params)?;
|
||||
let mut transaction = pool
|
||||
.begin()
|
||||
.await
|
||||
.context("Failed to acquire a Postgres connection to create workspace")?;
|
||||
let workspace = create_workspace(&mut transaction, name.as_ref(), desc.as_ref(), logged_user).await?;
|
||||
transaction
|
||||
.commit()
|
||||
.await
|
||||
.context("Failed to commit SQL transaction to create workspace.")?;
|
||||
|
||||
Ok(FlowyResponse::success().pb(workspace)?.into())
|
||||
}
|
||||
|
||||
pub async fn read_handler(
|
||||
payload: Payload,
|
||||
pool: Data<PgPool>,
|
||||
logged_user: LoggedUser,
|
||||
) -> Result<HttpResponse, ServerError> {
|
||||
let params: WorkspaceIdPB = parse_from_payload(payload).await?;
|
||||
let mut transaction = pool
|
||||
.begin()
|
||||
.await
|
||||
.context("Failed to acquire a Postgres connection to read workspace")?;
|
||||
|
||||
let workspace_id = if params.has_workspace_id() {
|
||||
Some(params.get_workspace_id().to_owned())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let repeated_workspace = read_workspaces(&mut transaction, workspace_id, logged_user).await?;
|
||||
|
||||
transaction
|
||||
.commit()
|
||||
.await
|
||||
.context("Failed to commit SQL transaction to read workspace.")?;
|
||||
|
||||
Ok(FlowyResponse::success().pb(repeated_workspace)?.into())
|
||||
}
|
||||
|
||||
pub async fn delete_handler(
|
||||
payload: Payload,
|
||||
pool: Data<PgPool>,
|
||||
_logged_user: LoggedUser,
|
||||
) -> Result<HttpResponse, ServerError> {
|
||||
let params: WorkspaceIdPB = parse_from_payload(payload).await?;
|
||||
let workspace_id = check_workspace_id(params.get_workspace_id().to_owned())?;
|
||||
let mut transaction = pool
|
||||
.begin()
|
||||
.await
|
||||
.context("Failed to acquire a Postgres connection to delete workspace")?;
|
||||
|
||||
let _ = delete_workspace(&mut transaction, workspace_id).await?;
|
||||
transaction
|
||||
.commit()
|
||||
.await
|
||||
.context("Failed to commit SQL transaction to delete workspace.")?;
|
||||
|
||||
Ok(FlowyResponse::success().into())
|
||||
}
|
||||
|
||||
pub async fn update_handler(
|
||||
payload: Payload,
|
||||
pool: Data<PgPool>,
|
||||
_logged_user: LoggedUser,
|
||||
) -> Result<HttpResponse, ServerError> {
|
||||
let params: UpdateWorkspaceParamsPB = parse_from_payload(payload).await?;
|
||||
let workspace_id = check_workspace_id(params.get_id().to_owned())?;
|
||||
let name = match params.has_name() {
|
||||
false => None,
|
||||
true => {
|
||||
let name = WorkspaceName::parse(params.get_name().to_owned())
|
||||
.map_err(invalid_params)?
|
||||
.0;
|
||||
Some(name)
|
||||
}
|
||||
};
|
||||
|
||||
let desc = match params.has_desc() {
|
||||
false => None,
|
||||
true => {
|
||||
let desc = WorkspaceDesc::parse(params.get_desc().to_owned())
|
||||
.map_err(invalid_params)?
|
||||
.0;
|
||||
Some(desc)
|
||||
}
|
||||
};
|
||||
|
||||
let mut transaction = pool
|
||||
.begin()
|
||||
.await
|
||||
.context("Failed to acquire a Postgres connection to update workspace")?;
|
||||
|
||||
let _ = update_workspace(&mut transaction, workspace_id, name, desc).await?;
|
||||
|
||||
transaction
|
||||
.commit()
|
||||
.await
|
||||
.context("Failed to commit SQL transaction to update workspace.")?;
|
||||
|
||||
Ok(FlowyResponse::success().into())
|
||||
}
|
||||
|
||||
pub async fn workspace_list(pool: Data<PgPool>, logged_user: LoggedUser) -> Result<HttpResponse, ServerError> {
|
||||
let mut transaction = pool
|
||||
.begin()
|
||||
.await
|
||||
.context("Failed to acquire a Postgres connection to read workspaces")?;
|
||||
|
||||
let repeated_workspace = read_workspaces(&mut transaction, None, logged_user).await?;
|
||||
transaction
|
||||
.commit()
|
||||
.await
|
||||
.context("Failed to commit SQL transaction to read workspace.")?;
|
||||
|
||||
Ok(FlowyResponse::success().pb(repeated_workspace)?.into())
|
||||
}
|
@ -1,148 +0,0 @@
|
||||
use crate::{
|
||||
context::FlowyPersistence,
|
||||
services::web_socket::{entities::Socket, revision_data_to_ws_message, WSClientData, WSUser, WebSocketMessage},
|
||||
util::serde_ext::parse_from_bytes,
|
||||
};
|
||||
use actix_rt::task::spawn_blocking;
|
||||
use async_stream::stream;
|
||||
use backend_service::errors::{internal_error, Result};
|
||||
|
||||
use flowy_collaboration::{
|
||||
protobuf::{
|
||||
ClientRevisionWSData as ClientRevisionWSDataPB, ClientRevisionWSDataType as ClientRevisionWSDataTypePB,
|
||||
},
|
||||
server_folder::ServerFolderManager,
|
||||
synchronizer::{RevisionSyncResponse, RevisionUser},
|
||||
};
|
||||
use futures::stream::StreamExt;
|
||||
use lib_ws::WSChannel;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::{mpsc, oneshot};
|
||||
|
||||
pub enum FolderWSActorMessage {
|
||||
ClientData {
|
||||
client_data: WSClientData,
|
||||
persistence: Arc<FlowyPersistence>,
|
||||
ret: oneshot::Sender<Result<()>>,
|
||||
},
|
||||
}
|
||||
|
||||
pub struct FolderWebSocketActor {
|
||||
actor_msg_receiver: Option<mpsc::Receiver<FolderWSActorMessage>>,
|
||||
folder_manager: Arc<ServerFolderManager>,
|
||||
}
|
||||
|
||||
impl FolderWebSocketActor {
|
||||
pub fn new(receiver: mpsc::Receiver<FolderWSActorMessage>, folder_manager: Arc<ServerFolderManager>) -> Self {
|
||||
Self {
|
||||
actor_msg_receiver: Some(receiver),
|
||||
folder_manager,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run(mut self) {
|
||||
let mut actor_msg_receiver = self
|
||||
.actor_msg_receiver
|
||||
.take()
|
||||
.expect("FolderWebSocketActor's receiver should only take one time");
|
||||
let stream = stream! {
|
||||
loop {
|
||||
match actor_msg_receiver.recv().await {
|
||||
Some(msg) => yield msg,
|
||||
None => {
|
||||
break
|
||||
},
|
||||
}
|
||||
}
|
||||
};
|
||||
stream.for_each(|msg| self.handle_message(msg)).await;
|
||||
}
|
||||
|
||||
async fn handle_message(&self, msg: FolderWSActorMessage) {
|
||||
match msg {
|
||||
FolderWSActorMessage::ClientData {
|
||||
client_data,
|
||||
persistence: _,
|
||||
ret,
|
||||
} => {
|
||||
let _ = ret.send(self.handle_folder_data(client_data).await);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_folder_data(&self, client_data: WSClientData) -> Result<()> {
|
||||
let WSClientData { user, socket, data } = client_data;
|
||||
let folder_client_data = spawn_blocking(move || parse_from_bytes::<ClientRevisionWSDataPB>(&data))
|
||||
.await
|
||||
.map_err(internal_error)??;
|
||||
|
||||
tracing::debug!(
|
||||
"[FolderWebSocketActor]: receive: {}:{}, {:?}",
|
||||
folder_client_data.object_id,
|
||||
folder_client_data.data_id,
|
||||
folder_client_data.ty
|
||||
);
|
||||
|
||||
let user = Arc::new(FolderRevisionUser { user, socket });
|
||||
match &folder_client_data.ty {
|
||||
ClientRevisionWSDataTypePB::ClientPushRev => {
|
||||
let _ = self
|
||||
.folder_manager
|
||||
.handle_client_revisions(user, folder_client_data)
|
||||
.await
|
||||
.map_err(internal_error)?;
|
||||
}
|
||||
ClientRevisionWSDataTypePB::ClientPing => {
|
||||
let _ = self
|
||||
.folder_manager
|
||||
.handle_client_ping(user, folder_client_data)
|
||||
.await
|
||||
.map_err(internal_error)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct FolderRevisionUser {
|
||||
pub user: Arc<WSUser>,
|
||||
pub(crate) socket: Socket,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for FolderRevisionUser {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
f.debug_struct("FolderRevisionUser")
|
||||
.field("user", &self.user)
|
||||
.field("socket", &self.socket)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl RevisionUser for FolderRevisionUser {
|
||||
fn user_id(&self) -> String {
|
||||
self.user.id().to_string()
|
||||
}
|
||||
|
||||
fn receive(&self, resp: RevisionSyncResponse) {
|
||||
let result = match resp {
|
||||
RevisionSyncResponse::Pull(data) => {
|
||||
let msg: WebSocketMessage = revision_data_to_ws_message(data, WSChannel::Folder);
|
||||
self.socket.try_send(msg).map_err(internal_error)
|
||||
}
|
||||
RevisionSyncResponse::Push(data) => {
|
||||
let msg: WebSocketMessage = revision_data_to_ws_message(data, WSChannel::Folder);
|
||||
self.socket.try_send(msg).map_err(internal_error)
|
||||
}
|
||||
RevisionSyncResponse::Ack(data) => {
|
||||
let msg: WebSocketMessage = revision_data_to_ws_message(data, WSChannel::Folder);
|
||||
self.socket.try_send(msg).map_err(internal_error)
|
||||
}
|
||||
};
|
||||
|
||||
match result {
|
||||
Ok(_) => {}
|
||||
Err(e) => log::error!("[FolderRevisionUser]: {}", e),
|
||||
}
|
||||
}
|
||||
}
|
@ -1,169 +0,0 @@
|
||||
use crate::{
|
||||
context::FlowyPersistence,
|
||||
services::{
|
||||
folder::ws_actor::{FolderWSActorMessage, FolderWebSocketActor},
|
||||
web_socket::{WSClientData, WebSocketReceiver},
|
||||
},
|
||||
};
|
||||
use std::fmt::{Debug, Formatter};
|
||||
|
||||
use crate::{context::FolderRevisionKV, services::kv::revision_kv::revisions_to_key_value_items};
|
||||
use flowy_collaboration::{
|
||||
entities::folder_info::FolderInfo,
|
||||
errors::CollaborateError,
|
||||
protobuf::{RepeatedRevision as RepeatedRevisionPB, Revision as RevisionPB},
|
||||
server_folder::{FolderCloudPersistence, ServerFolderManager},
|
||||
util::make_folder_from_revisions_pb,
|
||||
};
|
||||
use lib_infra::future::BoxResultFuture;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::{mpsc, oneshot};
|
||||
|
||||
pub fn make_folder_ws_receiver(
|
||||
persistence: Arc<FlowyPersistence>,
|
||||
folder_manager: Arc<ServerFolderManager>,
|
||||
) -> Arc<FolderWebSocketReceiver> {
|
||||
let (actor_msg_sender, rx) = tokio::sync::mpsc::channel(1000);
|
||||
let actor = FolderWebSocketActor::new(rx, folder_manager);
|
||||
tokio::task::spawn(actor.run());
|
||||
Arc::new(FolderWebSocketReceiver::new(persistence, actor_msg_sender))
|
||||
}
|
||||
|
||||
pub struct FolderWebSocketReceiver {
|
||||
actor_msg_sender: mpsc::Sender<FolderWSActorMessage>,
|
||||
persistence: Arc<FlowyPersistence>,
|
||||
}
|
||||
|
||||
impl FolderWebSocketReceiver {
|
||||
pub fn new(persistence: Arc<FlowyPersistence>, actor_msg_sender: mpsc::Sender<FolderWSActorMessage>) -> Self {
|
||||
Self {
|
||||
actor_msg_sender,
|
||||
persistence,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WebSocketReceiver for FolderWebSocketReceiver {
|
||||
fn receive(&self, data: WSClientData) {
|
||||
let (ret, rx) = oneshot::channel();
|
||||
let actor_msg_sender = self.actor_msg_sender.clone();
|
||||
let persistence = self.persistence.clone();
|
||||
|
||||
actix_rt::spawn(async move {
|
||||
let msg = FolderWSActorMessage::ClientData {
|
||||
client_data: data,
|
||||
persistence,
|
||||
ret,
|
||||
};
|
||||
|
||||
match actor_msg_sender.send(msg).await {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
log::error!("[FolderWebSocketReceiver]: send message to actor failed: {}", e);
|
||||
}
|
||||
}
|
||||
match rx.await {
|
||||
Ok(_) => {}
|
||||
Err(e) => log::error!("[FolderWebSocketReceiver]: message ret failed {:?}", e),
|
||||
};
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
pub struct HttpFolderCloudPersistence(pub Arc<FolderRevisionKV>);
|
||||
impl Debug for HttpFolderCloudPersistence {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str("HttpFolderCloudPersistence")
|
||||
}
|
||||
}
|
||||
|
||||
impl FolderCloudPersistence for HttpFolderCloudPersistence {
|
||||
fn read_folder(&self, _user_id: &str, folder_id: &str) -> BoxResultFuture<FolderInfo, CollaborateError> {
|
||||
let folder_store = self.0.clone();
|
||||
let folder_id = folder_id.to_owned();
|
||||
Box::pin(async move {
|
||||
let revisions = folder_store
|
||||
.get_revisions(&folder_id, None)
|
||||
.await
|
||||
.map_err(|e| e.to_collaborate_error())?;
|
||||
match make_folder_from_revisions_pb(&folder_id, revisions)? {
|
||||
Some(folder_info) => Ok(folder_info),
|
||||
None => Err(CollaborateError::record_not_found().context(format!("{} not exist", folder_id))),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn create_folder(
|
||||
&self,
|
||||
_user_id: &str,
|
||||
folder_id: &str,
|
||||
mut repeated_revision: RepeatedRevisionPB,
|
||||
) -> BoxResultFuture<Option<FolderInfo>, CollaborateError> {
|
||||
let folder_store = self.0.clone();
|
||||
let folder_id = folder_id.to_owned();
|
||||
Box::pin(async move {
|
||||
let folder_info = make_folder_from_revisions_pb(&folder_id, repeated_revision.clone())?;
|
||||
let revisions: Vec<RevisionPB> = repeated_revision.take_items().into();
|
||||
let _ = folder_store
|
||||
.set_revision(revisions)
|
||||
.await
|
||||
.map_err(|e| e.to_collaborate_error())?;
|
||||
Ok(folder_info)
|
||||
})
|
||||
}
|
||||
|
||||
fn save_folder_revisions(
|
||||
&self,
|
||||
mut repeated_revision: RepeatedRevisionPB,
|
||||
) -> BoxResultFuture<(), CollaborateError> {
|
||||
let folder_store = self.0.clone();
|
||||
Box::pin(async move {
|
||||
let revisions: Vec<RevisionPB> = repeated_revision.take_items().into();
|
||||
let _ = folder_store
|
||||
.set_revision(revisions)
|
||||
.await
|
||||
.map_err(|e| e.to_collaborate_error())?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn read_folder_revisions(
|
||||
&self,
|
||||
folder_id: &str,
|
||||
rev_ids: Option<Vec<i64>>,
|
||||
) -> BoxResultFuture<Vec<RevisionPB>, CollaborateError> {
|
||||
let folder_store = self.0.clone();
|
||||
let folder_id = folder_id.to_owned();
|
||||
Box::pin(async move {
|
||||
let mut repeated_revision = folder_store
|
||||
.get_revisions(&folder_id, rev_ids)
|
||||
.await
|
||||
.map_err(|e| e.to_collaborate_error())?;
|
||||
let revisions: Vec<RevisionPB> = repeated_revision.take_items().into();
|
||||
Ok(revisions)
|
||||
})
|
||||
}
|
||||
|
||||
fn reset_folder(
|
||||
&self,
|
||||
folder_id: &str,
|
||||
mut repeated_revision: RepeatedRevisionPB,
|
||||
) -> BoxResultFuture<(), CollaborateError> {
|
||||
let folder_store = self.0.clone();
|
||||
let folder_id = folder_id.to_owned();
|
||||
Box::pin(async move {
|
||||
let _ = folder_store
|
||||
.transaction(|mut transaction| {
|
||||
Box::pin(async move {
|
||||
let _ = transaction.batch_delete_key_start_with(&folder_id).await?;
|
||||
let items = revisions_to_key_value_items(repeated_revision.take_items().into())?;
|
||||
let _ = transaction.batch_set(items).await?;
|
||||
Ok(())
|
||||
})
|
||||
})
|
||||
.await
|
||||
.map_err(|e| e.to_collaborate_error())?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
@ -1,210 +0,0 @@
|
||||
use crate::{
|
||||
services::kv::{KVTransaction, KeyValue},
|
||||
util::sqlx_ext::{map_sqlx_error, DBTransaction, SqlBuilder},
|
||||
};
|
||||
use anyhow::Context;
|
||||
use async_trait::async_trait;
|
||||
use backend_service::errors::ServerError;
|
||||
use bytes::Bytes;
|
||||
|
||||
use lib_infra::future::BoxResultFuture;
|
||||
use sql_builder::SqlBuilder as RawSqlBuilder;
|
||||
use sqlx::{
|
||||
postgres::{PgArguments, PgRow},
|
||||
Arguments, Error, PgPool, Postgres, Row,
|
||||
};
|
||||
|
||||
const KV_TABLE: &str = "kv_table";
|
||||
|
||||
pub struct PostgresKV {
|
||||
pub(crate) pg_pool: PgPool,
|
||||
}
|
||||
|
||||
impl PostgresKV {
|
||||
pub async fn get(&self, key: &str) -> Result<Option<Bytes>, ServerError> {
|
||||
let key = key.to_owned();
|
||||
self.transaction(|mut transaction| Box::pin(async move { transaction.get(&key).await }))
|
||||
.await
|
||||
}
|
||||
pub async fn set(&self, key: &str, value: Bytes) -> Result<(), ServerError> {
|
||||
let key = key.to_owned();
|
||||
self.transaction(|mut transaction| Box::pin(async move { transaction.set(&key, value).await }))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn remove(&self, key: &str) -> Result<(), ServerError> {
|
||||
let key = key.to_owned();
|
||||
self.transaction(|mut transaction| Box::pin(async move { transaction.remove(&key).await }))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn batch_set(&self, kvs: Vec<KeyValue>) -> Result<(), ServerError> {
|
||||
self.transaction(|mut transaction| Box::pin(async move { transaction.batch_set(kvs).await }))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn batch_get(&self, keys: Vec<String>) -> Result<Vec<KeyValue>, ServerError> {
|
||||
self.transaction(|mut transaction| Box::pin(async move { transaction.batch_get(keys).await }))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn transaction<F, O>(&self, f: F) -> Result<O, ServerError>
|
||||
where
|
||||
F: for<'a> FnOnce(Box<dyn KVTransaction + 'a>) -> BoxResultFuture<O, ServerError>,
|
||||
{
|
||||
let mut transaction = self
|
||||
.pg_pool
|
||||
.begin()
|
||||
.await
|
||||
.context("[KV]:Failed to acquire a Postgres connection")?;
|
||||
let postgres_transaction = PostgresTransaction(&mut transaction);
|
||||
let result = f(Box::new(postgres_transaction)).await;
|
||||
transaction
|
||||
.commit()
|
||||
.await
|
||||
.context("[KV]:Failed to commit SQL transaction.")?;
|
||||
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct PostgresTransaction<'a, 'b>(&'a mut DBTransaction<'b>);
|
||||
|
||||
#[async_trait]
|
||||
impl<'a, 'b> KVTransaction for PostgresTransaction<'a, 'b> {
|
||||
async fn get(&mut self, key: &str) -> Result<Option<Bytes>, ServerError> {
|
||||
let id = key.to_string();
|
||||
let (sql, args) = SqlBuilder::select(KV_TABLE)
|
||||
.add_field("*")
|
||||
.and_where_eq("id", &id)
|
||||
.build()?;
|
||||
|
||||
let result = sqlx::query_as_with::<Postgres, KVTable, PgArguments>(&sql, args)
|
||||
.fetch_one(self.0 as &mut DBTransaction<'b>)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(val) => Ok(Some(Bytes::from(val.blob))),
|
||||
Err(error) => match error {
|
||||
Error::RowNotFound => Ok(None),
|
||||
_ => Err(map_sqlx_error(error)),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
async fn set(&mut self, key: &str, bytes: Bytes) -> Result<(), ServerError> {
|
||||
self.batch_set(vec![KeyValue {
|
||||
key: key.to_string(),
|
||||
value: bytes,
|
||||
}])
|
||||
.await
|
||||
}
|
||||
|
||||
async fn remove(&mut self, key: &str) -> Result<(), ServerError> {
|
||||
let id = key.to_string();
|
||||
let (sql, args) = SqlBuilder::delete(KV_TABLE).and_where_eq("id", &id).build()?;
|
||||
let _ = sqlx::query_with(&sql, args)
|
||||
.execute(self.0 as &mut DBTransaction<'_>)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn batch_set(&mut self, kvs: Vec<KeyValue>) -> Result<(), ServerError> {
|
||||
let mut builder = RawSqlBuilder::insert_into(KV_TABLE);
|
||||
let m_builder = builder.field("id").field("blob");
|
||||
|
||||
let mut args = PgArguments::default();
|
||||
kvs.iter().enumerate().for_each(|(index, _)| {
|
||||
let index = index * 2 + 1;
|
||||
m_builder.values(&[format!("${}", index), format!("${}", index + 1)]);
|
||||
});
|
||||
|
||||
for kv in kvs {
|
||||
args.add(kv.key);
|
||||
args.add(kv.value.to_vec());
|
||||
}
|
||||
|
||||
let sql = m_builder.sql()?;
|
||||
let _ = sqlx::query_with(&sql, args)
|
||||
.execute(self.0 as &mut DBTransaction<'_>)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?;
|
||||
|
||||
Ok::<(), ServerError>(())
|
||||
}
|
||||
|
||||
async fn batch_get(&mut self, keys: Vec<String>) -> Result<Vec<KeyValue>, ServerError> {
|
||||
let sql = RawSqlBuilder::select_from(KV_TABLE)
|
||||
.field("id")
|
||||
.field("blob")
|
||||
.and_where_in_quoted("id", &keys)
|
||||
.sql()?;
|
||||
|
||||
let rows = sqlx::query(&sql)
|
||||
.fetch_all(self.0 as &mut DBTransaction<'_>)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?;
|
||||
let kvs = rows_to_key_values(rows);
|
||||
Ok::<Vec<KeyValue>, ServerError>(kvs)
|
||||
}
|
||||
|
||||
async fn batch_delete(&mut self, keys: Vec<String>) -> Result<(), ServerError> {
|
||||
let sql = RawSqlBuilder::delete_from(KV_TABLE).and_where_in("id", &keys).sql()?;
|
||||
let _ = sqlx::query(&sql)
|
||||
.execute(self.0 as &mut DBTransaction<'_>)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?;
|
||||
|
||||
Ok::<(), ServerError>(())
|
||||
}
|
||||
|
||||
async fn batch_get_start_with(&mut self, key: &str) -> Result<Vec<KeyValue>, ServerError> {
|
||||
let prefix = key.to_owned();
|
||||
let sql = RawSqlBuilder::select_from(KV_TABLE)
|
||||
.field("id")
|
||||
.field("blob")
|
||||
.and_where_like_left("id", &prefix)
|
||||
.sql()?;
|
||||
|
||||
let rows = sqlx::query(&sql)
|
||||
.fetch_all(self.0 as &mut DBTransaction<'_>)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?;
|
||||
|
||||
let kvs = rows_to_key_values(rows);
|
||||
|
||||
Ok::<Vec<KeyValue>, ServerError>(kvs)
|
||||
}
|
||||
|
||||
async fn batch_delete_key_start_with(&mut self, keyword: &str) -> Result<(), ServerError> {
|
||||
let keyword = keyword.to_owned();
|
||||
let sql = RawSqlBuilder::delete_from(KV_TABLE)
|
||||
.and_where_like_left("id", &keyword)
|
||||
.sql()?;
|
||||
|
||||
let _ = sqlx::query(&sql)
|
||||
.execute(self.0 as &mut DBTransaction<'_>)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?;
|
||||
Ok::<(), ServerError>(())
|
||||
}
|
||||
}
|
||||
fn rows_to_key_values(rows: Vec<PgRow>) -> Vec<KeyValue> {
|
||||
rows.into_iter()
|
||||
.map(|row| {
|
||||
let bytes: Vec<u8> = row.get("blob");
|
||||
KeyValue {
|
||||
key: row.get("id"),
|
||||
value: Bytes::from(bytes),
|
||||
}
|
||||
})
|
||||
.collect::<Vec<KeyValue>>()
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, sqlx::FromRow)]
|
||||
struct KVTable {
|
||||
#[allow(dead_code)]
|
||||
pub(crate) id: String,
|
||||
pub(crate) blob: Vec<u8>,
|
||||
}
|
@ -1,41 +0,0 @@
|
||||
#![allow(clippy::module_inception)]
|
||||
mod kv;
|
||||
pub mod revision_kv;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use bytes::Bytes;
|
||||
|
||||
pub(crate) use kv::*;
|
||||
|
||||
use backend_service::errors::ServerError;
|
||||
|
||||
// TODO: Generic the KVStore that enable switching KVStore to another
|
||||
// implementation
|
||||
pub type KVStore = PostgresKV;
|
||||
|
||||
#[rustfmt::skip]
|
||||
// https://rust-lang.github.io/async-book/07_workarounds/05_async_in_traits.html
|
||||
// Note that using these trait methods will result in a heap allocation
|
||||
// per-function-call. This is not a significant cost for the vast majority of
|
||||
// applications, but should be considered when deciding whether to use this
|
||||
// functionality in the public API of a low-level function that is expected to
|
||||
// be called millions of times a second.
|
||||
#[async_trait]
|
||||
pub trait KVTransaction: Send + Sync {
|
||||
async fn get(&mut self, key: &str) -> Result<Option<Bytes>, ServerError>;
|
||||
async fn set(&mut self, key: &str, value: Bytes) -> Result<(), ServerError>;
|
||||
async fn remove(&mut self, key: &str) -> Result<(), ServerError>;
|
||||
|
||||
async fn batch_set(&mut self, kvs: Vec<KeyValue>) -> Result<(), ServerError>;
|
||||
async fn batch_get(&mut self, keys: Vec<String>) -> Result<Vec<KeyValue>, ServerError>;
|
||||
async fn batch_delete(&mut self, keys: Vec<String>) -> Result<(), ServerError>;
|
||||
|
||||
async fn batch_get_start_with(&mut self, key: &str) -> Result<Vec<KeyValue>, ServerError>;
|
||||
async fn batch_delete_key_start_with(&mut self, keyword: &str) -> Result<(), ServerError>;
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct KeyValue {
|
||||
pub key: String,
|
||||
pub value: Bytes,
|
||||
}
|
@ -1,128 +0,0 @@
|
||||
use crate::{
|
||||
services::kv::{KVStore, KeyValue},
|
||||
util::serde_ext::parse_from_bytes,
|
||||
};
|
||||
use backend_service::errors::ServerError;
|
||||
use bytes::Bytes;
|
||||
use flowy_collaboration::protobuf::{RepeatedRevision as RepeatedRevisionPB, Revision as RevisionPB};
|
||||
|
||||
use protobuf::Message;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub struct RevisionKVPersistence {
|
||||
inner: Arc<KVStore>,
|
||||
}
|
||||
|
||||
impl std::ops::Deref for RevisionKVPersistence {
|
||||
type Target = Arc<KVStore>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.inner
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::DerefMut for RevisionKVPersistence {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.inner
|
||||
}
|
||||
}
|
||||
|
||||
impl RevisionKVPersistence {
|
||||
pub(crate) fn new(kv_store: Arc<KVStore>) -> Self {
|
||||
RevisionKVPersistence { inner: kv_store }
|
||||
}
|
||||
|
||||
pub(crate) async fn set_revision(&self, revisions: Vec<RevisionPB>) -> Result<(), ServerError> {
|
||||
let items = revisions_to_key_value_items(revisions)?;
|
||||
self.inner
|
||||
.transaction(|mut t| Box::pin(async move { t.batch_set(items).await }))
|
||||
.await
|
||||
}
|
||||
|
||||
pub(crate) async fn get_revisions<T: Into<Option<Vec<i64>>>>(
|
||||
&self,
|
||||
object_id: &str,
|
||||
rev_ids: T,
|
||||
) -> Result<RepeatedRevisionPB, ServerError> {
|
||||
let rev_ids = rev_ids.into();
|
||||
let items = match rev_ids {
|
||||
None => {
|
||||
let object_id = object_id.to_owned();
|
||||
self.inner
|
||||
.transaction(|mut t| Box::pin(async move { t.batch_get_start_with(&object_id).await }))
|
||||
.await?
|
||||
}
|
||||
Some(rev_ids) => {
|
||||
let keys = rev_ids
|
||||
.into_iter()
|
||||
.map(|rev_id| make_revision_key(object_id, rev_id))
|
||||
.collect::<Vec<String>>();
|
||||
|
||||
self.inner
|
||||
.transaction(|mut t| Box::pin(async move { t.batch_get(keys).await }))
|
||||
.await?
|
||||
}
|
||||
};
|
||||
|
||||
Ok(key_value_items_to_revisions(items))
|
||||
}
|
||||
|
||||
pub(crate) async fn delete_revisions<T: Into<Option<Vec<i64>>>>(
|
||||
&self,
|
||||
object_id: &str,
|
||||
rev_ids: T,
|
||||
) -> Result<(), ServerError> {
|
||||
match rev_ids.into() {
|
||||
None => {
|
||||
let object_id = object_id.to_owned();
|
||||
self.inner
|
||||
.transaction(|mut t| Box::pin(async move { t.batch_delete_key_start_with(&object_id).await }))
|
||||
.await
|
||||
}
|
||||
Some(rev_ids) => {
|
||||
let keys = rev_ids
|
||||
.into_iter()
|
||||
.map(|rev_id| make_revision_key(object_id, rev_id))
|
||||
.collect::<Vec<String>>();
|
||||
|
||||
self.inner
|
||||
.transaction(|mut t| Box::pin(async move { t.batch_delete(keys).await }))
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn revisions_to_key_value_items(revisions: Vec<RevisionPB>) -> Result<Vec<KeyValue>, ServerError> {
|
||||
let mut items = vec![];
|
||||
for revision in revisions {
|
||||
let key = make_revision_key(&revision.object_id, revision.rev_id);
|
||||
|
||||
if revision.delta_data.is_empty() {
|
||||
return Err(ServerError::internal().context("The delta_data of RevisionPB should not be empty"));
|
||||
}
|
||||
|
||||
let value = Bytes::from(revision.write_to_bytes().unwrap());
|
||||
items.push(KeyValue { key, value });
|
||||
}
|
||||
Ok(items)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn key_value_items_to_revisions(items: Vec<KeyValue>) -> RepeatedRevisionPB {
|
||||
let mut revisions = items
|
||||
.into_iter()
|
||||
.filter_map(|kv| parse_from_bytes::<RevisionPB>(&kv.value).ok())
|
||||
.collect::<Vec<RevisionPB>>();
|
||||
|
||||
revisions.sort_by(|a, b| a.rev_id.cmp(&b.rev_id));
|
||||
let mut repeated_revision = RepeatedRevisionPB::new();
|
||||
repeated_revision.set_items(revisions.into());
|
||||
repeated_revision
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn make_revision_key(object_id: &str, rev_id: i64) -> String {
|
||||
format!("{}:{}", object_id, rev_id)
|
||||
}
|
@ -1,50 +0,0 @@
|
||||
use log::LevelFilter;
|
||||
|
||||
use tracing::subscriber::set_global_default;
|
||||
|
||||
use tracing_bunyan_formatter::{BunyanFormattingLayer, JsonStorageLayer};
|
||||
use tracing_log::LogTracer;
|
||||
use tracing_subscriber::{layer::SubscriberExt, EnvFilter};
|
||||
|
||||
pub struct Builder {
|
||||
name: String,
|
||||
env_filter: String,
|
||||
}
|
||||
|
||||
impl Builder {
|
||||
pub fn new(name: &str) -> Self {
|
||||
Builder {
|
||||
name: name.to_owned(),
|
||||
env_filter: "Info".to_owned(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn env_filter(mut self, env_filter: &str) -> Self {
|
||||
self.env_filter = env_filter.to_owned();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> std::result::Result<(), String> {
|
||||
let env_filter = EnvFilter::new(self.env_filter);
|
||||
let subscriber = tracing_subscriber::fmt()
|
||||
.with_target(true)
|
||||
.with_max_level(tracing::Level::TRACE)
|
||||
.with_writer(std::io::stderr)
|
||||
.with_thread_ids(true)
|
||||
.compact()
|
||||
.finish()
|
||||
.with(env_filter);
|
||||
|
||||
let formatting_layer = BunyanFormattingLayer::new(self.name, std::io::stdout);
|
||||
let _ = set_global_default(subscriber.with(JsonStorageLayer).with(formatting_layer))
|
||||
.map_err(|e| format!("{:?}", e))?;
|
||||
|
||||
let _ = LogTracer::builder()
|
||||
.with_max_level(LevelFilter::Debug)
|
||||
.init()
|
||||
.map_err(|e| format!("{:?}", e))
|
||||
.unwrap();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -1,6 +0,0 @@
|
||||
pub mod document;
|
||||
pub mod folder;
|
||||
pub mod kv;
|
||||
pub(crate) mod log;
|
||||
pub mod user;
|
||||
pub mod web_socket;
|
@ -1,236 +0,0 @@
|
||||
use crate::{
|
||||
entities::{
|
||||
logged_user::{LoggedUser, AUTHORIZED_USERS},
|
||||
token::Token,
|
||||
user::UserTable,
|
||||
},
|
||||
util::{
|
||||
sqlx_ext::{map_sqlx_error, DBTransaction, SqlBuilder},
|
||||
user_ext::{hash_password, verify_password},
|
||||
},
|
||||
};
|
||||
use anyhow::Context;
|
||||
use backend_service::{
|
||||
errors::{invalid_params, ErrorCode, ServerError},
|
||||
response::FlowyResponse,
|
||||
};
|
||||
use chrono::Utc;
|
||||
use flowy_user_data_model::{
|
||||
parser::{UserEmail, UserName, UserPassword},
|
||||
protobuf::{
|
||||
SignInParams as SignInParamsPB, SignInResponse as SignInResponsePB, SignUpParams as SignUpParamsPB,
|
||||
SignUpResponse as SignUpResponsePB, UpdateUserParams as UpdateUserParamsPB, UserProfile as UserProfilePB,
|
||||
},
|
||||
};
|
||||
use sqlx::{PgPool, Postgres};
|
||||
|
||||
pub async fn sign_in(pool: &PgPool, params: SignInParamsPB) -> Result<SignInResponsePB, ServerError> {
|
||||
let email = UserEmail::parse(params.email).map_err(|e| ServerError::params_invalid().context(e))?;
|
||||
let password = UserPassword::parse(params.password).map_err(|e| ServerError::params_invalid().context(e))?;
|
||||
|
||||
let mut transaction = pool
|
||||
.begin()
|
||||
.await
|
||||
.context("Failed to acquire a Postgres connection to sign in")?;
|
||||
|
||||
let user = check_user_password(&mut transaction, email.as_ref(), password.as_ref()).await?;
|
||||
transaction
|
||||
.commit()
|
||||
.await
|
||||
.context("Failed to commit SQL transaction to sign in.")?;
|
||||
|
||||
let token = Token::create_token(&user.id.to_string())?;
|
||||
let logged_user = LoggedUser::new(&user.id.to_string());
|
||||
|
||||
AUTHORIZED_USERS.store_auth(logged_user, true);
|
||||
let mut response_data = SignInResponsePB::default();
|
||||
response_data.set_user_id(user.id.to_string());
|
||||
response_data.set_name(user.name);
|
||||
response_data.set_email(user.email);
|
||||
response_data.set_token(token.into());
|
||||
|
||||
Ok(response_data)
|
||||
}
|
||||
|
||||
pub async fn sign_out(logged_user: LoggedUser) -> Result<FlowyResponse, ServerError> {
|
||||
AUTHORIZED_USERS.store_auth(logged_user, false);
|
||||
Ok(FlowyResponse::success())
|
||||
}
|
||||
|
||||
pub async fn register_user(pool: &PgPool, params: SignUpParamsPB) -> Result<FlowyResponse, ServerError> {
|
||||
let name = UserName::parse(params.name).map_err(|e| ServerError::params_invalid().context(e))?;
|
||||
let email = UserEmail::parse(params.email).map_err(|e| ServerError::params_invalid().context(e))?;
|
||||
let password = UserPassword::parse(params.password).map_err(|e| ServerError::params_invalid().context(e))?;
|
||||
|
||||
let mut transaction = pool
|
||||
.begin()
|
||||
.await
|
||||
.context("Failed to acquire a Postgres connection to register user")?;
|
||||
|
||||
let _ = is_email_exist(&mut transaction, email.as_ref()).await?;
|
||||
let response_data = insert_new_user(&mut transaction, name.as_ref(), email.as_ref(), password.as_ref())
|
||||
.await
|
||||
.context("Failed to insert user")?;
|
||||
|
||||
let logged_user = LoggedUser::new(&response_data.user_id);
|
||||
AUTHORIZED_USERS.store_auth(logged_user, true);
|
||||
|
||||
transaction
|
||||
.commit()
|
||||
.await
|
||||
.context("Failed to commit SQL transaction to register user.")?;
|
||||
|
||||
FlowyResponse::success().pb(response_data)
|
||||
}
|
||||
|
||||
pub(crate) async fn get_user_profile(
|
||||
pool: &PgPool,
|
||||
token: Token,
|
||||
logged_user: LoggedUser,
|
||||
) -> Result<FlowyResponse, ServerError> {
|
||||
let mut transaction = pool
|
||||
.begin()
|
||||
.await
|
||||
.context("Failed to acquire a Postgres connection to get user detail")?;
|
||||
|
||||
let id = logged_user.as_uuid()?;
|
||||
let user_table = sqlx::query_as::<Postgres, UserTable>("SELECT * FROM user_table WHERE id = $1")
|
||||
.bind(id)
|
||||
.fetch_one(&mut transaction)
|
||||
.await
|
||||
.map_err(|err| ServerError::internal().context(err))?;
|
||||
|
||||
transaction
|
||||
.commit()
|
||||
.await
|
||||
.context("Failed to commit SQL transaction to get user detail.")?;
|
||||
|
||||
// update the user active time
|
||||
AUTHORIZED_USERS.store_auth(logged_user, true);
|
||||
|
||||
let mut user_profile = UserProfilePB::default();
|
||||
user_profile.set_id(user_table.id.to_string());
|
||||
user_profile.set_email(user_table.email);
|
||||
user_profile.set_name(user_table.name);
|
||||
user_profile.set_token(token.0);
|
||||
FlowyResponse::success().pb(user_profile)
|
||||
}
|
||||
|
||||
pub(crate) async fn set_user_profile(
|
||||
pool: &PgPool,
|
||||
logged_user: LoggedUser,
|
||||
params: UpdateUserParamsPB,
|
||||
) -> Result<FlowyResponse, ServerError> {
|
||||
let mut transaction = pool
|
||||
.begin()
|
||||
.await
|
||||
.context("Failed to acquire a Postgres connection to update user profile")?;
|
||||
|
||||
let name = match params.has_name() {
|
||||
false => None,
|
||||
true => Some(UserName::parse(params.get_name().to_owned()).map_err(invalid_params)?.0),
|
||||
};
|
||||
|
||||
let email = match params.has_email() {
|
||||
false => None,
|
||||
true => Some(
|
||||
UserEmail::parse(params.get_email().to_owned())
|
||||
.map_err(invalid_params)?
|
||||
.0,
|
||||
),
|
||||
};
|
||||
|
||||
let password = match params.has_password() {
|
||||
false => None,
|
||||
true => {
|
||||
let password = UserPassword::parse(params.get_password().to_owned()).map_err(invalid_params)?;
|
||||
let password = hash_password(password.as_ref())?;
|
||||
Some(password)
|
||||
}
|
||||
};
|
||||
|
||||
let (sql, args) = SqlBuilder::update("user_table")
|
||||
.add_some_arg("name", name)
|
||||
.add_some_arg("email", email)
|
||||
.add_some_arg("password", password)
|
||||
.and_where_eq("id", &logged_user.as_uuid()?)
|
||||
.build()?;
|
||||
|
||||
sqlx::query_with(&sql, args)
|
||||
.execute(&mut transaction)
|
||||
.await
|
||||
.map_err(map_sqlx_error)?;
|
||||
|
||||
transaction
|
||||
.commit()
|
||||
.await
|
||||
.context("Failed to commit SQL transaction to update user profile.")?;
|
||||
|
||||
Ok(FlowyResponse::success())
|
||||
}
|
||||
|
||||
async fn is_email_exist(transaction: &mut DBTransaction<'_>, email: &str) -> Result<(), ServerError> {
|
||||
let result = sqlx::query(r#"SELECT email FROM user_table WHERE email = $1"#)
|
||||
.bind(email)
|
||||
.fetch_optional(transaction)
|
||||
.await
|
||||
.map_err(|err| ServerError::internal().context(err))?;
|
||||
|
||||
match result {
|
||||
Some(_) => Err(ServerError {
|
||||
code: ErrorCode::EmailAlreadyExists,
|
||||
msg: format!("{} already exists", email),
|
||||
}),
|
||||
None => Ok(()),
|
||||
}
|
||||
}
|
||||
|
||||
async fn check_user_password(
|
||||
transaction: &mut DBTransaction<'_>,
|
||||
email: &str,
|
||||
password: &str,
|
||||
) -> Result<UserTable, ServerError> {
|
||||
let user = sqlx::query_as::<Postgres, UserTable>("SELECT * FROM user_table WHERE email = $1")
|
||||
.bind(email)
|
||||
.fetch_one(transaction)
|
||||
.await
|
||||
.map_err(|err| ServerError::internal().context(err))?;
|
||||
|
||||
match verify_password(password, &user.password) {
|
||||
Ok(true) => Ok(user),
|
||||
_ => Err(ServerError::password_not_match()),
|
||||
}
|
||||
}
|
||||
|
||||
async fn insert_new_user(
|
||||
transaction: &mut DBTransaction<'_>,
|
||||
name: &str,
|
||||
email: &str,
|
||||
password: &str,
|
||||
) -> Result<SignUpResponsePB, ServerError> {
|
||||
let uuid = uuid::Uuid::new_v4();
|
||||
let token = Token::create_token(&uuid.to_string())?;
|
||||
let password = hash_password(password)?;
|
||||
let _ = sqlx::query!(
|
||||
r#"
|
||||
INSERT INTO user_table (id, email, name, create_time, password)
|
||||
VALUES ($1, $2, $3, $4, $5)
|
||||
"#,
|
||||
uuid,
|
||||
email,
|
||||
name,
|
||||
Utc::now(),
|
||||
password,
|
||||
)
|
||||
.execute(transaction)
|
||||
.await
|
||||
.map_err(|e| ServerError::internal().context(e))?;
|
||||
|
||||
let mut response = SignUpResponsePB::default();
|
||||
response.set_user_id(uuid.to_string());
|
||||
response.set_name(name.to_string());
|
||||
response.set_email(email.to_string());
|
||||
response.set_token(token.into());
|
||||
|
||||
Ok(response)
|
||||
}
|
@ -1,4 +0,0 @@
|
||||
pub use controller::*;
|
||||
|
||||
mod controller;
|
||||
pub mod router;
|
@ -1,64 +0,0 @@
|
||||
use crate::{
|
||||
entities::{logged_user::LoggedUser, token::Token},
|
||||
services::user::{get_user_profile, register_user, set_user_profile, sign_in, sign_out},
|
||||
util::serde_ext::parse_from_payload,
|
||||
};
|
||||
use actix_identity::Identity;
|
||||
use actix_web::{
|
||||
web::{Data, Payload},
|
||||
HttpRequest, HttpResponse,
|
||||
};
|
||||
use backend_service::{errors::ServerError, response::FlowyResponse};
|
||||
use flowy_user_data_model::protobuf::{
|
||||
SignInParams as SignInParamsPB, SignUpParams as SignUpParamsPB, UpdateUserParams as UpdateUserParamsPB,
|
||||
};
|
||||
use sqlx::PgPool;
|
||||
|
||||
pub async fn sign_in_handler(payload: Payload, id: Identity, pool: Data<PgPool>) -> Result<HttpResponse, ServerError> {
|
||||
let params: SignInParamsPB = parse_from_payload(payload).await?;
|
||||
let data = sign_in(pool.get_ref(), params).await?;
|
||||
id.remember(data.token.clone());
|
||||
let response = FlowyResponse::success().pb(data)?;
|
||||
Ok(response.into())
|
||||
}
|
||||
|
||||
pub async fn sign_out_handler(logged_user: LoggedUser, id: Identity) -> Result<HttpResponse, ServerError> {
|
||||
id.forget();
|
||||
|
||||
let response = sign_out(logged_user).await?;
|
||||
Ok(response.into())
|
||||
}
|
||||
|
||||
pub async fn get_user_profile_handler(
|
||||
token: Token,
|
||||
logged_user: LoggedUser,
|
||||
pool: Data<PgPool>,
|
||||
) -> Result<HttpResponse, ServerError> {
|
||||
let response = get_user_profile(pool.get_ref(), token, logged_user).await?;
|
||||
Ok(response.into())
|
||||
}
|
||||
|
||||
pub async fn set_user_profile_handler(
|
||||
logged_user: LoggedUser,
|
||||
pool: Data<PgPool>,
|
||||
payload: Payload,
|
||||
) -> Result<HttpResponse, ServerError> {
|
||||
let params: UpdateUserParamsPB = parse_from_payload(payload).await?;
|
||||
let response = set_user_profile(pool.get_ref(), logged_user, params).await?;
|
||||
Ok(response.into())
|
||||
}
|
||||
|
||||
pub async fn register_handler(payload: Payload, pool: Data<PgPool>) -> Result<HttpResponse, ServerError> {
|
||||
let params: SignUpParamsPB = parse_from_payload(payload).await?;
|
||||
let resp = register_user(pool.get_ref(), params).await?;
|
||||
|
||||
Ok(resp.into())
|
||||
}
|
||||
|
||||
pub async fn change_password(
|
||||
_request: HttpRequest,
|
||||
_payload: Payload,
|
||||
_pool: Data<PgPool>,
|
||||
) -> Result<HttpResponse, ServerError> {
|
||||
unimplemented!()
|
||||
}
|
@ -1,50 +0,0 @@
|
||||
use crate::services::web_socket::WebSocketMessage;
|
||||
use actix::{Message, Recipient};
|
||||
use backend_service::errors::ServerError;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fmt::Formatter;
|
||||
|
||||
pub type Socket = Recipient<WebSocketMessage>;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Hash, PartialEq, Eq)]
|
||||
pub struct SessionId(pub String);
|
||||
|
||||
impl<T: AsRef<str>> std::convert::From<T> for SessionId {
|
||||
fn from(s: T) -> Self {
|
||||
SessionId(s.as_ref().to_owned())
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for SessionId {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
let desc = &self.0.to_string();
|
||||
f.write_str(desc)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Session {
|
||||
pub id: SessionId,
|
||||
pub socket: Socket,
|
||||
}
|
||||
|
||||
impl std::convert::From<Connect> for Session {
|
||||
fn from(c: Connect) -> Self {
|
||||
Self {
|
||||
id: c.sid,
|
||||
socket: c.socket,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Message, Clone)]
|
||||
#[rtype(result = "Result<(), ServerError>")]
|
||||
pub struct Connect {
|
||||
pub socket: Socket,
|
||||
pub sid: SessionId,
|
||||
}
|
||||
|
||||
#[derive(Debug, Message, Clone)]
|
||||
#[rtype(result = "Result<(), ServerError>")]
|
||||
pub struct Disconnect {
|
||||
pub sid: SessionId,
|
||||
}
|
@ -1,27 +0,0 @@
|
||||
use actix::Message;
|
||||
use bytes::Bytes;
|
||||
use flowy_collaboration::entities::ws_data::ServerRevisionWSData;
|
||||
use lib_ws::{WSChannel, WebSocketRawMessage};
|
||||
use std::convert::TryInto;
|
||||
|
||||
#[derive(Debug, Message, Clone)]
|
||||
#[rtype(result = "()")]
|
||||
pub struct WebSocketMessage(pub Bytes);
|
||||
|
||||
impl std::ops::Deref for WebSocketMessage {
|
||||
type Target = Bytes;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
pub fn revision_data_to_ws_message(data: ServerRevisionWSData, channel: WSChannel) -> WebSocketMessage {
|
||||
let bytes: Bytes = data.try_into().unwrap();
|
||||
let msg = WebSocketRawMessage {
|
||||
channel,
|
||||
data: bytes.to_vec(),
|
||||
};
|
||||
let bytes: Bytes = msg.try_into().unwrap();
|
||||
WebSocketMessage(bytes)
|
||||
}
|
@ -1,5 +0,0 @@
|
||||
pub use connect::*;
|
||||
pub use message::*;
|
||||
|
||||
mod connect;
|
||||
pub mod message;
|
@ -1,8 +0,0 @@
|
||||
pub use entities::message::*;
|
||||
pub use ws_client::*;
|
||||
pub use ws_server::*;
|
||||
|
||||
pub(crate) mod entities;
|
||||
pub mod router;
|
||||
mod ws_client;
|
||||
mod ws_server;
|
@ -1,61 +0,0 @@
|
||||
use crate::{
|
||||
entities::logged_user::LoggedUser,
|
||||
services::web_socket::{WSClient, WSServer, WSUser, WebSocketReceivers},
|
||||
};
|
||||
use actix::Addr;
|
||||
use actix_web::{
|
||||
get,
|
||||
web::{Data, Path, Payload},
|
||||
Error, HttpRequest, HttpResponse,
|
||||
};
|
||||
use actix_web_actors::ws;
|
||||
|
||||
#[rustfmt::skip]
|
||||
// WsClient
|
||||
// ┌─────────────┐
|
||||
// │ ┌────────┐ │
|
||||
// wss://xxx ─────▶│ │ WsUser │ │───┐
|
||||
// │ └────────┘ │ │
|
||||
// └─────────────┘ │
|
||||
// │
|
||||
// │ ┌──────────────────┐ 1 n ┌──────────────────┐
|
||||
// ├───▶│WebSocketReceivers│◆────│WebSocketReceiver │
|
||||
// │ └──────────────────┘ └──────────────────┘
|
||||
// WsClient │ △
|
||||
// ┌─────────────┐ │ │
|
||||
// │ ┌────────┐ │ │ │
|
||||
// wss://xxx ─────▶│ │ WsUser │ │───┘ │
|
||||
// │ └────────┘ │ ┌───────────────┐
|
||||
// └─────────────┘ │DocumentManager│
|
||||
// └───────────────┘
|
||||
#[get("/{token}")]
|
||||
pub async fn establish_ws_connection(
|
||||
request: HttpRequest,
|
||||
payload: Payload,
|
||||
token: Path<String>,
|
||||
server: Data<Addr<WSServer>>,
|
||||
ws_receivers: Data<WebSocketReceivers>,
|
||||
) -> Result<HttpResponse, Error> {
|
||||
tracing::info!("establish_ws_connection");
|
||||
match LoggedUser::from_token(token.clone()) {
|
||||
Ok(user) => {
|
||||
let ws_user = WSUser::new(user);
|
||||
let client = WSClient::new(ws_user, server.get_ref().clone(), ws_receivers);
|
||||
let result = ws::start(client, &request, payload);
|
||||
match result {
|
||||
Ok(response) => Ok(response),
|
||||
Err(e) => {
|
||||
log::error!("ws connection error: {:?}", e);
|
||||
Err(e)
|
||||
},
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
if e.is_unauthorized() {
|
||||
Ok(HttpResponse::Unauthorized().json(e))
|
||||
} else {
|
||||
Ok(HttpResponse::BadRequest().json(e))
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
@ -1,181 +0,0 @@
|
||||
use crate::{
|
||||
config::{HEARTBEAT_INTERVAL, PING_TIMEOUT},
|
||||
entities::logged_user::LoggedUser,
|
||||
services::web_socket::{
|
||||
entities::{Connect, Disconnect, Socket},
|
||||
WSServer, WebSocketMessage,
|
||||
},
|
||||
};
|
||||
use actix::*;
|
||||
use actix_web::web::Data;
|
||||
use actix_web_actors::{ws, ws::Message::Text};
|
||||
use bytes::Bytes;
|
||||
use lib_ws::{WSChannel, WebSocketRawMessage};
|
||||
use std::{collections::HashMap, convert::TryFrom, sync::Arc, time::Instant};
|
||||
|
||||
pub trait WebSocketReceiver: Send + Sync {
|
||||
fn receive(&self, data: WSClientData);
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct WebSocketReceivers {
|
||||
inner: HashMap<WSChannel, Arc<dyn WebSocketReceiver>>,
|
||||
}
|
||||
|
||||
impl WebSocketReceivers {
|
||||
pub fn new() -> Self {
|
||||
WebSocketReceivers::default()
|
||||
}
|
||||
|
||||
pub fn set(&mut self, channel: WSChannel, receiver: Arc<dyn WebSocketReceiver>) {
|
||||
tracing::trace!("Add {:?} receiver", channel);
|
||||
self.inner.insert(channel, receiver);
|
||||
}
|
||||
|
||||
pub fn get(&self, source: &WSChannel) -> Option<Arc<dyn WebSocketReceiver>> {
|
||||
self.inner.get(source).cloned()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct WSUser {
|
||||
inner: LoggedUser,
|
||||
}
|
||||
|
||||
impl WSUser {
|
||||
pub fn new(inner: LoggedUser) -> Self {
|
||||
Self { inner }
|
||||
}
|
||||
|
||||
pub fn id(&self) -> &str {
|
||||
&self.inner.user_id
|
||||
}
|
||||
}
|
||||
|
||||
pub struct WSClientData {
|
||||
pub(crate) user: Arc<WSUser>,
|
||||
pub(crate) socket: Socket,
|
||||
pub(crate) data: Bytes,
|
||||
}
|
||||
|
||||
pub struct WSClient {
|
||||
user: Arc<WSUser>,
|
||||
server: Addr<WSServer>,
|
||||
ws_receivers: Data<WebSocketReceivers>,
|
||||
hb: Instant,
|
||||
}
|
||||
|
||||
impl WSClient {
|
||||
pub fn new(user: WSUser, server: Addr<WSServer>, ws_receivers: Data<WebSocketReceivers>) -> Self {
|
||||
Self {
|
||||
user: Arc::new(user),
|
||||
server,
|
||||
ws_receivers,
|
||||
hb: Instant::now(),
|
||||
}
|
||||
}
|
||||
|
||||
fn hb(&self, ctx: &mut ws::WebsocketContext<Self>) {
|
||||
ctx.run_interval(HEARTBEAT_INTERVAL, |client, ctx| {
|
||||
if Instant::now().duration_since(client.hb) > PING_TIMEOUT {
|
||||
client.server.do_send(Disconnect {
|
||||
sid: client.user.id().into(),
|
||||
});
|
||||
ctx.stop();
|
||||
} else {
|
||||
ctx.ping(b"");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn handle_binary_message(&self, bytes: Bytes, socket: Socket) {
|
||||
// TODO: ok to unwrap?
|
||||
let message: WebSocketRawMessage = WebSocketRawMessage::try_from(bytes).unwrap();
|
||||
match self.ws_receivers.get(&message.channel) {
|
||||
None => {
|
||||
log::error!("Can't find the receiver for {:?}", message.channel);
|
||||
}
|
||||
Some(handler) => {
|
||||
let client_data = WSClientData {
|
||||
user: self.user.clone(),
|
||||
socket,
|
||||
data: Bytes::from(message.data),
|
||||
};
|
||||
handler.receive(client_data);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl StreamHandler<Result<ws::Message, ws::ProtocolError>> for WSClient {
|
||||
fn handle(&mut self, msg: Result<ws::Message, ws::ProtocolError>, ctx: &mut Self::Context) {
|
||||
match msg {
|
||||
Ok(ws::Message::Ping(msg)) => {
|
||||
self.hb = Instant::now();
|
||||
ctx.pong(&msg);
|
||||
}
|
||||
Ok(ws::Message::Pong(_msg)) => {
|
||||
// tracing::debug!("Receive {} pong {:?}", &self.session_id, &msg);
|
||||
self.hb = Instant::now();
|
||||
}
|
||||
Ok(ws::Message::Binary(bytes)) => {
|
||||
let socket = ctx.address().recipient();
|
||||
self.handle_binary_message(bytes, socket);
|
||||
}
|
||||
Ok(Text(_)) => {
|
||||
log::warn!("Receive unexpected text message");
|
||||
}
|
||||
Ok(ws::Message::Close(reason)) => {
|
||||
ctx.close(reason);
|
||||
ctx.stop();
|
||||
}
|
||||
Ok(ws::Message::Continuation(_)) => {}
|
||||
Ok(ws::Message::Nop) => {}
|
||||
Err(e) => {
|
||||
log::error!("[{}]: WebSocketStream protocol error {:?}", self.user.id(), e);
|
||||
ctx.stop();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Handler<WebSocketMessage> for WSClient {
|
||||
type Result = ();
|
||||
|
||||
fn handle(&mut self, msg: WebSocketMessage, ctx: &mut Self::Context) {
|
||||
ctx.binary(msg.0);
|
||||
}
|
||||
}
|
||||
|
||||
impl Actor for WSClient {
|
||||
type Context = ws::WebsocketContext<Self>;
|
||||
|
||||
fn started(&mut self, ctx: &mut Self::Context) {
|
||||
self.hb(ctx);
|
||||
let socket = ctx.address().recipient();
|
||||
let connect = Connect {
|
||||
socket,
|
||||
sid: self.user.id().into(),
|
||||
};
|
||||
self.server
|
||||
.send(connect)
|
||||
.into_actor(self)
|
||||
.then(|res, _client, _ctx| {
|
||||
match res {
|
||||
Ok(Ok(_)) => tracing::trace!("Send connect message to server success"),
|
||||
Ok(Err(e)) => log::error!("Send connect message to server failed: {:?}", e),
|
||||
Err(e) => log::error!("Send connect message to server failed: {:?}", e),
|
||||
}
|
||||
fut::ready(())
|
||||
})
|
||||
.wait(ctx);
|
||||
}
|
||||
|
||||
fn stopping(&mut self, _: &mut Self::Context) -> Running {
|
||||
self.server.do_send(Disconnect {
|
||||
sid: self.user.id().into(),
|
||||
});
|
||||
|
||||
Running::Stop
|
||||
}
|
||||
}
|
@ -1,65 +0,0 @@
|
||||
use crate::services::web_socket::{
|
||||
entities::{Connect, Disconnect, Session, SessionId},
|
||||
WebSocketMessage,
|
||||
};
|
||||
use actix::{Actor, Context, Handler};
|
||||
use backend_service::errors::ServerError;
|
||||
use dashmap::DashMap;
|
||||
|
||||
pub struct WSServer {
|
||||
sessions: DashMap<SessionId, Session>,
|
||||
}
|
||||
|
||||
impl std::default::Default for WSServer {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
sessions: DashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl WSServer {
|
||||
pub fn new() -> Self {
|
||||
WSServer::default()
|
||||
}
|
||||
|
||||
pub fn send(&self, _msg: WebSocketMessage) {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
impl Actor for WSServer {
|
||||
type Context = Context<Self>;
|
||||
fn started(&mut self, _ctx: &mut Self::Context) {}
|
||||
}
|
||||
|
||||
impl Handler<Connect> for WSServer {
|
||||
type Result = Result<(), ServerError>;
|
||||
fn handle(&mut self, msg: Connect, _ctx: &mut Context<Self>) -> Self::Result {
|
||||
let session: Session = msg.into();
|
||||
self.sessions.insert(session.id.clone(), session);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Handler<Disconnect> for WSServer {
|
||||
type Result = Result<(), ServerError>;
|
||||
fn handle(&mut self, msg: Disconnect, _: &mut Context<Self>) -> Self::Result {
|
||||
self.sessions.remove(&msg.sid);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Handler<WebSocketMessage> for WSServer {
|
||||
type Result = ();
|
||||
|
||||
fn handle(&mut self, _msg: WebSocketMessage, _ctx: &mut Context<Self>) -> Self::Result {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
impl actix::Supervised for WSServer {
|
||||
fn restarting(&mut self, _ctx: &mut Context<WSServer>) {
|
||||
log::warn!("restarting");
|
||||
}
|
||||
}
|
@ -1,3 +0,0 @@
|
||||
pub mod serde_ext;
|
||||
pub mod sqlx_ext;
|
||||
pub mod user_ext;
|
@ -1,46 +0,0 @@
|
||||
use crate::config::MAX_PAYLOAD_SIZE;
|
||||
use actix_web::web;
|
||||
use backend_service::errors::{ErrorCode, ServerError};
|
||||
use futures::StreamExt;
|
||||
use protobuf::{Message, ProtobufResult};
|
||||
|
||||
pub async fn parse_from_payload<T: Message>(payload: web::Payload) -> Result<T, ServerError> {
|
||||
let bytes = poll_payload(&mut payload.into_inner()).await?;
|
||||
parse_from_bytes(&bytes)
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub async fn parse_from_dev_payload<T: Message>(payload: &mut actix_web::dev::Payload) -> Result<T, ServerError> {
|
||||
let bytes = poll_payload(payload).await?;
|
||||
parse_from_bytes(&bytes)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn md5<T: AsRef<[u8]>>(data: T) -> String {
|
||||
let md5 = format!("{:x}", md5::compute(data));
|
||||
md5
|
||||
}
|
||||
|
||||
pub fn parse_from_bytes<T: Message>(bytes: &[u8]) -> Result<T, ServerError> {
|
||||
let result: ProtobufResult<T> = Message::parse_from_bytes(bytes);
|
||||
match result {
|
||||
Ok(data) => Ok(data),
|
||||
Err(e) => Err(e.into()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn poll_payload(payload: &mut actix_web::dev::Payload) -> Result<web::BytesMut, ServerError> {
|
||||
let mut body = web::BytesMut::new();
|
||||
while let Some(chunk) = payload.next().await {
|
||||
let chunk = chunk.map_err(|err| ServerError::internal().context(err))?;
|
||||
|
||||
if (body.len() + chunk.len()) > MAX_PAYLOAD_SIZE {
|
||||
return Err(ServerError::new(
|
||||
"Payload overflow".to_string(),
|
||||
ErrorCode::PayloadOverflow,
|
||||
));
|
||||
}
|
||||
body.extend_from_slice(&chunk);
|
||||
}
|
||||
Ok(body)
|
||||
}
|
@ -1,6 +0,0 @@
|
||||
mod query;
|
||||
mod utils;
|
||||
|
||||
pub use utils::*;
|
||||
|
||||
pub use query::*;
|
@ -1,159 +0,0 @@
|
||||
use backend_service::errors::ServerError;
|
||||
use sql_builder::SqlBuilder as InnerBuilder;
|
||||
use sqlx::{postgres::PgArguments, Arguments, Encode, Postgres, Type};
|
||||
|
||||
enum BuilderType {
|
||||
Create,
|
||||
Select,
|
||||
Update,
|
||||
Delete,
|
||||
}
|
||||
|
||||
pub struct SqlBuilder {
|
||||
table: String,
|
||||
fields: Vec<String>,
|
||||
filters: Vec<String>,
|
||||
fields_args: PgArguments,
|
||||
ty: BuilderType,
|
||||
}
|
||||
|
||||
impl SqlBuilder {
|
||||
fn new(table: &str) -> Self {
|
||||
Self {
|
||||
table: table.to_owned(),
|
||||
fields: vec![],
|
||||
filters: vec![],
|
||||
fields_args: PgArguments::default(),
|
||||
ty: BuilderType::Select,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create(table: &str) -> Self {
|
||||
let mut builder = Self::new(table);
|
||||
builder.ty = BuilderType::Create;
|
||||
builder
|
||||
}
|
||||
|
||||
pub fn select(table: &str) -> Self {
|
||||
let mut builder = Self::new(table);
|
||||
builder.ty = BuilderType::Select;
|
||||
builder
|
||||
}
|
||||
|
||||
pub fn update(table: &str) -> Self {
|
||||
let mut builder = Self::new(table);
|
||||
builder.ty = BuilderType::Update;
|
||||
builder
|
||||
}
|
||||
|
||||
pub fn delete(table: &str) -> Self {
|
||||
let mut builder = Self::new(table);
|
||||
builder.ty = BuilderType::Delete;
|
||||
builder
|
||||
}
|
||||
|
||||
pub fn add_field_with_arg<'a, T>(mut self, field: &str, arg: T) -> Self
|
||||
where
|
||||
T: 'a + Send + Encode<'a, Postgres> + Type<Postgres>,
|
||||
{
|
||||
self.fields.push(field.to_owned());
|
||||
self.fields_args.add(arg);
|
||||
self
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn add_arg_if<'a, T>(self, add: bool, field: &str, arg: T) -> Self
|
||||
where
|
||||
T: 'a + Send + Encode<'a, Postgres> + Type<Postgres>,
|
||||
{
|
||||
if add {
|
||||
self.add_field_with_arg(field, arg)
|
||||
} else {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_some_arg<'a, T>(self, field: &str, arg: Option<T>) -> Self
|
||||
where
|
||||
T: 'a + Send + Encode<'a, Postgres> + Type<Postgres>,
|
||||
{
|
||||
if let Some(arg) = arg {
|
||||
self.add_field_with_arg(field, arg)
|
||||
} else {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_field(mut self, field: &str) -> Self {
|
||||
self.fields.push(field.to_owned());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn and_where_eq<'a, T>(mut self, field: &str, arg: T) -> Self
|
||||
where
|
||||
T: 'a + Send + Encode<'a, Postgres> + Type<Postgres>,
|
||||
{
|
||||
self.filters.push(field.to_owned());
|
||||
self.fields_args.add(arg);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> Result<(String, PgArguments), ServerError> {
|
||||
match self.ty {
|
||||
BuilderType::Create => {
|
||||
let mut inner = InnerBuilder::insert_into(&self.table);
|
||||
self.fields.iter().for_each(|field| {
|
||||
inner.field(field);
|
||||
});
|
||||
|
||||
let values = self
|
||||
.fields
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(index, _)| format!("${}", index + 1))
|
||||
.collect::<Vec<String>>();
|
||||
|
||||
inner.values(&values);
|
||||
|
||||
let sql = inner.sql()?;
|
||||
Ok((sql, self.fields_args))
|
||||
}
|
||||
BuilderType::Select => {
|
||||
let mut inner = InnerBuilder::select_from(&self.table);
|
||||
self.fields.into_iter().for_each(|field| {
|
||||
inner.field(field);
|
||||
});
|
||||
|
||||
self.filters.into_iter().enumerate().for_each(|(index, filter)| {
|
||||
inner.and_where_eq(filter, format!("${}", index + 1));
|
||||
});
|
||||
|
||||
let sql = inner.sql()?;
|
||||
Ok((sql, self.fields_args))
|
||||
}
|
||||
BuilderType::Update => {
|
||||
let mut inner = InnerBuilder::update_table(&self.table);
|
||||
let field_len = self.fields.len();
|
||||
self.fields.into_iter().enumerate().for_each(|(index, field)| {
|
||||
inner.set(&field, format!("${}", index + 1));
|
||||
});
|
||||
|
||||
self.filters.into_iter().enumerate().for_each(|(index, filter)| {
|
||||
let index = index + field_len;
|
||||
inner.and_where_eq(filter, format!("${}", index + 1));
|
||||
});
|
||||
|
||||
let sql = inner.sql()?;
|
||||
Ok((sql, self.fields_args))
|
||||
}
|
||||
BuilderType::Delete => {
|
||||
let mut inner = InnerBuilder::delete_from(&self.table);
|
||||
self.filters.into_iter().enumerate().for_each(|(index, filter)| {
|
||||
inner.and_where_eq(filter, format!("${}", index + 1));
|
||||
});
|
||||
let sql = inner.sql()?;
|
||||
Ok((sql, self.fields_args))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
use backend_service::errors::{ErrorCode, ServerError};
|
||||
use sqlx::{Error, Postgres, Transaction};
|
||||
|
||||
pub type DBTransaction<'a> = Transaction<'a, Postgres>;
|
||||
|
||||
pub fn map_sqlx_error(error: sqlx::Error) -> ServerError {
|
||||
match error {
|
||||
Error::RowNotFound => ServerError::new("".to_string(), ErrorCode::RecordNotFound),
|
||||
_ => ServerError::internal().context(error),
|
||||
}
|
||||
}
|
@ -1,31 +0,0 @@
|
||||
use backend_service::errors::{ErrorCode, ServerError};
|
||||
use bcrypt::{hash, verify, DEFAULT_COST};
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn uuid() -> String {
|
||||
uuid::Uuid::new_v4().to_string()
|
||||
}
|
||||
|
||||
pub fn hash_password(plain: &str) -> Result<String, ServerError> {
|
||||
let hashing_cost = std::env::var("HASH_COST")
|
||||
.ok()
|
||||
.and_then(|c| c.parse().ok())
|
||||
.unwrap_or(DEFAULT_COST);
|
||||
|
||||
hash(plain, hashing_cost).map_err(|e| ServerError::internal().context(e))
|
||||
}
|
||||
|
||||
// The Source is the password user enter. The hash is the source after hashing.
|
||||
// let source = "123";
|
||||
// let hash = hash_password(source).unwrap();
|
||||
//
|
||||
// verify_password(source, hash)
|
||||
pub fn verify_password(source: &str, hash: &str) -> Result<bool, ServerError> {
|
||||
match verify(source, hash) {
|
||||
Ok(true) => Ok(true),
|
||||
_ => Err(ServerError::new(
|
||||
"Username and password don't match".to_string(),
|
||||
ErrorCode::PasswordNotMatch,
|
||||
)),
|
||||
}
|
||||
}
|
@ -1,126 +0,0 @@
|
||||
use crate::util::helper::{spawn_user_server, TestUserServer};
|
||||
use backend_service::errors::ErrorCode;
|
||||
use flowy_user_data_model::entities::{SignInParams, SignUpParams, SignUpResponse, UpdateUserParams};
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn user_register() {
|
||||
let app = spawn_user_server().await;
|
||||
let response = register_user(&app, "annie@appflowy.io", "HelloWorld123!").await;
|
||||
tracing::info!("{:?}", response);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
#[should_panic]
|
||||
async fn user_sign_in_with_invalid_password() {
|
||||
let app = spawn_user_server().await;
|
||||
let email = "annie@appflowy.io";
|
||||
let password = "123";
|
||||
let _ = register_user(&app, email, password).await;
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
#[should_panic]
|
||||
async fn user_sign_in_with_invalid_email() {
|
||||
let app = spawn_user_server().await;
|
||||
let email = "annie@gmail@";
|
||||
let password = "HelloWorld123!";
|
||||
let _ = register_user(&app, email, password).await;
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn user_sign_in() {
|
||||
let app = spawn_user_server().await;
|
||||
let email = "annie@appflowy.io";
|
||||
let password = "HelloWorld123!";
|
||||
let _ = register_user(&app, email, password).await;
|
||||
let params = SignInParams {
|
||||
email: email.to_string(),
|
||||
password: password.to_string(),
|
||||
name: "rust".to_string(),
|
||||
};
|
||||
let _ = app.sign_in(params).await.unwrap();
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
#[should_panic]
|
||||
async fn user_sign_out() {
|
||||
let server = TestUserServer::new().await;
|
||||
server.sign_out().await;
|
||||
|
||||
// user_detail will be empty because use was sign out.
|
||||
server.get_user_profile().await;
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn user_get_detail() {
|
||||
let server = TestUserServer::new().await;
|
||||
tracing::info!("{:?}", server.get_user_profile().await);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn user_update_password() {
|
||||
let mut server = spawn_user_server().await;
|
||||
let email = "annie@appflowy.io";
|
||||
let password = "HelloWorld123!";
|
||||
let sign_up_resp = register_user(&server, email, password).await;
|
||||
|
||||
let params = UpdateUserParams::new(&sign_up_resp.user_id).password("Hello123!");
|
||||
server.user_token = Some(sign_up_resp.token);
|
||||
|
||||
server.update_user_profile(params).await.unwrap();
|
||||
|
||||
let sign_in_params = SignInParams {
|
||||
email: email.to_string(),
|
||||
password: password.to_string(),
|
||||
name: "rust".to_string(),
|
||||
};
|
||||
|
||||
match server.sign_in(sign_in_params).await {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
assert_eq!(e.code, ErrorCode::PasswordNotMatch);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn user_update_name() {
|
||||
let server = TestUserServer::new().await;
|
||||
|
||||
let name = "tom".to_string();
|
||||
let params = UpdateUserParams::new(server.user_id()).name(&name);
|
||||
server.update_user_profile(params).await.unwrap();
|
||||
|
||||
let user = server.get_user_profile().await;
|
||||
assert_eq!(user.name, name);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn user_update_email() {
|
||||
let server = TestUserServer::new().await;
|
||||
let email = "123@gmail.com".to_string();
|
||||
let params = UpdateUserParams::new(server.user_id()).email(&email);
|
||||
server.update_user_profile(params).await.unwrap();
|
||||
|
||||
let user = server.get_user_profile().await;
|
||||
assert_eq!(user.email, email);
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
async fn sign_up_user(server: &TestUserServer) -> SignUpResponse {
|
||||
let email = "annie@appflowy.io";
|
||||
let password = "HelloWorld123!";
|
||||
let response = register_user(server, email, password).await;
|
||||
response
|
||||
}
|
||||
|
||||
async fn register_user(server: &TestUserServer, email: &str, password: &str) -> SignUpResponse {
|
||||
let params = SignUpParams {
|
||||
email: email.to_string(),
|
||||
name: "annie".to_string(),
|
||||
password: password.to_string(),
|
||||
};
|
||||
|
||||
let response = server.register(params).await;
|
||||
response
|
||||
}
|
@ -1,79 +0,0 @@
|
||||
use crate::util::helper::spawn_server;
|
||||
use backend::services::kv::KeyValue;
|
||||
use std::str;
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn kv_set_test() {
|
||||
let server = spawn_server().await;
|
||||
let kv = server.app_ctx.persistence.document_kv_store();
|
||||
let s1 = "123".to_string();
|
||||
let key = "1";
|
||||
|
||||
let _ = kv.set(key, s1.clone().into()).await.unwrap();
|
||||
let bytes = kv.get(key).await.unwrap().unwrap();
|
||||
let s2 = str::from_utf8(&bytes).unwrap();
|
||||
assert_eq!(s1, s2);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn kv_delete_test() {
|
||||
let server = spawn_server().await;
|
||||
let kv = server.app_ctx.persistence.document_kv_store();
|
||||
let s1 = "123".to_string();
|
||||
let key = "1";
|
||||
|
||||
let _ = kv.set(key, s1.clone().into()).await.unwrap();
|
||||
let _ = kv.remove(key).await.unwrap();
|
||||
assert_eq!(kv.get(key).await.unwrap(), None);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn kv_batch_set_test() {
|
||||
let server = spawn_server().await;
|
||||
let kv = server.app_ctx.persistence.document_kv_store();
|
||||
let kvs = vec![
|
||||
KeyValue {
|
||||
key: "1".to_string(),
|
||||
value: "a".to_string().into(),
|
||||
},
|
||||
KeyValue {
|
||||
key: "2".to_string(),
|
||||
value: "b".to_string().into(),
|
||||
},
|
||||
];
|
||||
|
||||
kv.batch_set(kvs.clone()).await.unwrap();
|
||||
let kvs_from_db = kv
|
||||
.batch_get(kvs.clone().into_iter().map(|value| value.key).collect::<Vec<String>>())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(kvs, kvs_from_db);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn kv_batch_get_start_with_test() {
|
||||
let server = spawn_server().await;
|
||||
let kv = server.app_ctx.persistence.document_kv_store();
|
||||
let kvs = vec![
|
||||
KeyValue {
|
||||
key: "abc:1".to_string(),
|
||||
value: "a".to_string().into(),
|
||||
},
|
||||
KeyValue {
|
||||
key: "abc:2".to_string(),
|
||||
value: "b".to_string().into(),
|
||||
},
|
||||
];
|
||||
|
||||
kv.batch_set(kvs.clone()).await.unwrap();
|
||||
kv.transaction(|mut transaction| {
|
||||
Box::pin(async move {
|
||||
let kvs_from_db = transaction.batch_get_start_with("abc").await.unwrap();
|
||||
assert_eq!(kvs, kvs_from_db);
|
||||
Ok(())
|
||||
})
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
@ -1,3 +0,0 @@
|
||||
mod auth_test;
|
||||
mod kv_test;
|
||||
mod workspace_test;
|
@ -1,282 +0,0 @@
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use crate::util::helper::{BackendViewTest, *};
|
||||
use flowy_collaboration::{
|
||||
client_document::{ClientDocument, PlainDoc},
|
||||
entities::{
|
||||
document_info::{CreateDocParams, DocumentId},
|
||||
revision::{md5, RepeatedRevision, Revision},
|
||||
},
|
||||
};
|
||||
use flowy_folder_data_model::entities::{
|
||||
app::{AppId, UpdateAppParams},
|
||||
trash::{RepeatedTrashId, TrashId, TrashType},
|
||||
view::{RepeatedViewId, UpdateViewParams, ViewId},
|
||||
workspace::{CreateWorkspaceParams, UpdateWorkspaceParams, WorkspaceId},
|
||||
};
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn workspace_create() {
|
||||
let test = BackendWorkspaceTest::new().await;
|
||||
tracing::info!("{:?}", test.workspace);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn workspace_read() {
|
||||
let test = BackendWorkspaceTest::new().await;
|
||||
let read_params = WorkspaceId::new(Some(test.workspace.id.clone()));
|
||||
let repeated_workspace = test.server.read_workspaces(read_params).await;
|
||||
tracing::info!("{:?}", repeated_workspace);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn workspace_read_with_belongs() {
|
||||
let test = BackendWorkspaceTest::new().await;
|
||||
|
||||
let _ = test.create_app().await;
|
||||
let _ = test.create_app().await;
|
||||
let _ = test.create_app().await;
|
||||
|
||||
let read_params = WorkspaceId::new(Some(test.workspace.id.clone()));
|
||||
let workspaces = test.server.read_workspaces(read_params).await;
|
||||
let workspace = workspaces.items.first().unwrap();
|
||||
assert_eq!(workspace.apps.len(), 3);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn workspace_update() {
|
||||
let test = BackendWorkspaceTest::new().await;
|
||||
let new_name = "rename workspace name";
|
||||
let new_desc = "rename workspace description";
|
||||
|
||||
let update_params = UpdateWorkspaceParams {
|
||||
id: test.workspace.id.clone(),
|
||||
name: Some(new_name.to_string()),
|
||||
desc: Some(new_desc.to_string()),
|
||||
};
|
||||
test.server.update_workspace(update_params).await;
|
||||
let read_params = WorkspaceId::new(Some(test.workspace.id.clone()));
|
||||
let repeated_workspace = test.server.read_workspaces(read_params).await;
|
||||
|
||||
let workspace = repeated_workspace.first().unwrap();
|
||||
assert_eq!(workspace.name, new_name);
|
||||
assert_eq!(workspace.desc, new_desc);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn workspace_delete() {
|
||||
let test = BackendWorkspaceTest::new().await;
|
||||
let delete_params = WorkspaceId {
|
||||
workspace_id: Some(test.workspace.id.clone()),
|
||||
};
|
||||
|
||||
let _ = test.server.delete_workspace(delete_params).await;
|
||||
let read_params = WorkspaceId::new(Some(test.workspace.id.clone()));
|
||||
let repeated_workspace = test.server.read_workspaces(read_params).await;
|
||||
assert_eq!(repeated_workspace.len(), 0);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn app_create() {
|
||||
let test = BackendAppTest::new().await;
|
||||
tracing::info!("{:?}", test.app);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn app_read() {
|
||||
let test = BackendAppTest::new().await;
|
||||
let read_params = AppId::new(&test.app.id);
|
||||
assert_eq!(test.server.read_app(read_params).await.is_some(), true);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn app_read_with_belongs() {
|
||||
let test = BackendAppTest::new().await;
|
||||
|
||||
let _ = create_test_view(&test.server, &test.app.id).await;
|
||||
let _ = create_test_view(&test.server, &test.app.id).await;
|
||||
|
||||
let read_params = AppId::new(&test.app.id);
|
||||
let app = test.server.read_app(read_params).await.unwrap();
|
||||
assert_eq!(app.belongings.len(), 2);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn app_read_with_belongs_in_trash() {
|
||||
let test = BackendAppTest::new().await;
|
||||
|
||||
let _ = create_test_view(&test.server, &test.app.id).await;
|
||||
let view = create_test_view(&test.server, &test.app.id).await;
|
||||
|
||||
test.server.create_view_trash(&view.id).await;
|
||||
|
||||
let read_params = AppId::new(&test.app.id);
|
||||
let app = test.server.read_app(read_params).await.unwrap();
|
||||
assert_eq!(app.belongings.len(), 1);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn app_update() {
|
||||
let test = BackendAppTest::new().await;
|
||||
|
||||
let new_name = "flowy";
|
||||
|
||||
let update_params = UpdateAppParams::new(&test.app.id).name(new_name);
|
||||
test.server.update_app(update_params).await;
|
||||
|
||||
let read_params = AppId::new(&test.app.id);
|
||||
let app = test.server.read_app(read_params).await.unwrap();
|
||||
assert_eq!(&app.name, new_name);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn app_delete() {
|
||||
let test = BackendAppTest::new().await;
|
||||
|
||||
let delete_params = AppId {
|
||||
app_id: test.app.id.clone(),
|
||||
};
|
||||
test.server.delete_app(delete_params).await;
|
||||
let read_params = AppId::new(&test.app.id);
|
||||
assert_eq!(test.server.read_app(read_params).await.is_none(), true);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn view_create() {
|
||||
let test = BackendViewTest::new().await;
|
||||
tracing::info!("{:?}", test.view);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn view_update() {
|
||||
let test = BackendViewTest::new().await;
|
||||
let new_name = "name view name";
|
||||
|
||||
// update
|
||||
let update_params = UpdateViewParams::new(&test.view.id).name(new_name);
|
||||
test.server.update_view(update_params).await;
|
||||
|
||||
// read
|
||||
let read_params: ViewId = test.view.id.clone().into();
|
||||
let view = test.server.read_view(read_params).await.unwrap();
|
||||
assert_eq!(&view.name, new_name);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn view_delete() {
|
||||
let test = BackendViewTest::new().await;
|
||||
test.server.create_view_trash(&test.view.id).await;
|
||||
|
||||
let trash_ids = test
|
||||
.server
|
||||
.read_trash()
|
||||
.await
|
||||
.items
|
||||
.into_iter()
|
||||
.map(|item| item.id)
|
||||
.collect::<Vec<String>>();
|
||||
// read
|
||||
let read_params: ViewId = test.view.id.clone().into();
|
||||
|
||||
// the view can't read from the server. it should be in the trash
|
||||
assert_eq!(test.server.read_view(read_params).await.is_none(), true);
|
||||
assert_eq!(trash_ids.contains(&test.view.id), true);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn trash_delete() {
|
||||
let test = BackendViewTest::new().await;
|
||||
test.server.create_view_trash(&test.view.id).await;
|
||||
|
||||
let identifier = TrashId {
|
||||
id: test.view.id.clone(),
|
||||
ty: TrashType::View,
|
||||
};
|
||||
test.server.delete_view_trash(vec![identifier].into()).await;
|
||||
|
||||
assert_eq!(test.server.read_trash().await.is_empty(), true);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn trash_delete_all() {
|
||||
let test = BackendViewTest::new().await;
|
||||
test.server.create_view_trash(&test.view.id).await;
|
||||
|
||||
test.server.delete_view_trash(RepeatedTrashId::all()).await;
|
||||
assert_eq!(test.server.read_trash().await.is_empty(), true);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn workspace_list_read() {
|
||||
let mut server = spawn_user_server().await;
|
||||
let token = server.register_user().await.token;
|
||||
server.user_token = Some(token);
|
||||
for i in 0..3 {
|
||||
let params = CreateWorkspaceParams {
|
||||
name: format!("{} workspace", i),
|
||||
desc: format!("This is my {} workspace", i),
|
||||
};
|
||||
let _ = server.create_workspace(params).await;
|
||||
}
|
||||
|
||||
let read_params = WorkspaceId::new(None);
|
||||
let workspaces = server.read_workspaces(read_params).await;
|
||||
assert_eq!(workspaces.len(), 3);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn doc_read() {
|
||||
let test = BackendViewTest::new().await;
|
||||
let params = DocumentId {
|
||||
doc_id: test.view.id.clone(),
|
||||
};
|
||||
let doc = test.server.read_doc(params).await;
|
||||
assert_eq!(doc.is_some(), true);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn doc_create() {
|
||||
let mut revisions: Vec<Revision> = vec![];
|
||||
let server = TestUserServer::new().await;
|
||||
let doc_id = uuid::Uuid::new_v4().to_string();
|
||||
let user_id = "a".to_owned();
|
||||
let mut document = ClientDocument::new::<PlainDoc>();
|
||||
let mut offset = 0;
|
||||
for i in 0..5 {
|
||||
let content = i.to_string();
|
||||
let delta = document.insert(offset, content.clone()).unwrap();
|
||||
offset += content.len();
|
||||
let bytes = delta.to_bytes();
|
||||
let md5 = md5(&bytes);
|
||||
let revision = if i == 0 {
|
||||
Revision::new(&doc_id, i, i, bytes, &user_id, md5)
|
||||
} else {
|
||||
Revision::new(&doc_id, i - 1, i, bytes, &user_id, md5)
|
||||
};
|
||||
revisions.push(revision);
|
||||
}
|
||||
|
||||
let params = CreateDocParams {
|
||||
id: doc_id.clone(),
|
||||
revisions: RepeatedRevision::new(revisions),
|
||||
};
|
||||
server.create_doc(params).await;
|
||||
|
||||
let doc = server.read_doc(DocumentId { doc_id }).await;
|
||||
assert_eq!(doc.unwrap().text, document.to_json());
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn doc_delete() {
|
||||
let test = BackendViewTest::new().await;
|
||||
let delete_params = RepeatedViewId {
|
||||
items: vec![test.view.id.clone()],
|
||||
};
|
||||
test.server.delete_view(delete_params).await;
|
||||
|
||||
let params = DocumentId {
|
||||
doc_id: test.view.id.clone(),
|
||||
};
|
||||
let doc = test.server.read_doc(params).await;
|
||||
assert_eq!(doc.is_none(), true);
|
||||
}
|
@ -1,179 +0,0 @@
|
||||
#![allow(clippy::all)]
|
||||
#![cfg_attr(rustfmt, rustfmt::skip)]
|
||||
use std::convert::TryInto;
|
||||
use actix_web::web::Data;
|
||||
use flowy_document::core::ClientDocumentEditor;
|
||||
use flowy_test::{helper::ViewTest, FlowySDKTest};
|
||||
use flowy_user::services::UserSession;
|
||||
use futures_util::{stream, stream::StreamExt};
|
||||
use std::sync::Arc;
|
||||
use bytes::Bytes;
|
||||
use tokio::time::{sleep, Duration};
|
||||
use crate::util::helper::{spawn_server, TestServer};
|
||||
use flowy_collaboration::{entities::document_info::DocumentId, protobuf::ResetDocumentParams as ResetDocumentParamsPB};
|
||||
use lib_ot::rich_text::{RichTextAttribute, RichTextDelta};
|
||||
use parking_lot::RwLock;
|
||||
use backend::services::document::persistence::{read_document, reset_document};
|
||||
use flowy_collaboration::entities::revision::{RepeatedRevision, Revision};
|
||||
use flowy_collaboration::protobuf::{RepeatedRevision as RepeatedRevisionPB, DocumentId as DocumentIdPB};
|
||||
use flowy_collaboration::server_document::ServerDocumentManager;
|
||||
use flowy_net::ws::connection::FlowyWebSocketConnect;
|
||||
use lib_ot::core::Interval;
|
||||
|
||||
pub struct DocumentTest {
|
||||
server: TestServer,
|
||||
flowy_test: FlowySDKTest,
|
||||
}
|
||||
#[derive(Clone)]
|
||||
pub enum DocScript {
|
||||
ClientInsertText(usize, &'static str),
|
||||
ClientFormatText(Interval, RichTextAttribute),
|
||||
ClientOpenDoc,
|
||||
AssertClient(&'static str),
|
||||
AssertServer(&'static str, i64),
|
||||
ServerResetDocument(String, i64), // delta_json, rev_id
|
||||
}
|
||||
|
||||
impl DocumentTest {
|
||||
pub async fn new() -> Self {
|
||||
let server = spawn_server().await;
|
||||
let flowy_test = FlowySDKTest::new(server.client_server_config.clone());
|
||||
Self { server, flowy_test }
|
||||
}
|
||||
|
||||
pub async fn run_scripts(self, scripts: Vec<DocScript>) {
|
||||
let _ = self.flowy_test.sign_up().await;
|
||||
let DocumentTest { server, flowy_test } = self;
|
||||
let script_context = Arc::new(RwLock::new(ScriptContext::new(flowy_test, server).await));
|
||||
run_scripts(script_context, scripts).await;
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct ScriptContext {
|
||||
client_editor: Option<Arc<ClientDocumentEditor>>,
|
||||
client_sdk: FlowySDKTest,
|
||||
client_user_session: Arc<UserSession>,
|
||||
#[allow(dead_code)]
|
||||
ws_conn: Arc<FlowyWebSocketConnect>,
|
||||
server: TestServer,
|
||||
doc_id: String,
|
||||
}
|
||||
|
||||
impl ScriptContext {
|
||||
async fn new(client_sdk: FlowySDKTest, server: TestServer) -> Self {
|
||||
let user_session = client_sdk.user_session.clone();
|
||||
let ws_conn = client_sdk.ws_conn.clone();
|
||||
let doc_id = create_doc(&client_sdk).await;
|
||||
|
||||
Self {
|
||||
client_editor: None,
|
||||
client_sdk,
|
||||
client_user_session: user_session,
|
||||
ws_conn,
|
||||
server,
|
||||
doc_id,
|
||||
}
|
||||
}
|
||||
|
||||
async fn open_doc(&mut self) {
|
||||
let doc_id = self.doc_id.clone();
|
||||
let edit_context = self.client_sdk.document_manager.open_document(doc_id).await.unwrap();
|
||||
self.client_editor = Some(edit_context);
|
||||
}
|
||||
|
||||
fn client_editor(&self) -> Arc<ClientDocumentEditor> { self.client_editor.as_ref().unwrap().clone() }
|
||||
}
|
||||
|
||||
async fn run_scripts(context: Arc<RwLock<ScriptContext>>, scripts: Vec<DocScript>) {
|
||||
let mut fut_scripts = vec![];
|
||||
for script in scripts {
|
||||
let context = context.clone();
|
||||
let fut = async move {
|
||||
let doc_id = context.read().doc_id.clone();
|
||||
match script {
|
||||
DocScript::ClientOpenDoc => {
|
||||
context.write().open_doc().await;
|
||||
},
|
||||
DocScript::ClientInsertText(index, s) => {
|
||||
context.read().client_editor().insert(index, s).await.unwrap();
|
||||
},
|
||||
DocScript::ClientFormatText(interval, attribute) => {
|
||||
context
|
||||
.read()
|
||||
.client_editor()
|
||||
.format(interval, attribute)
|
||||
.await
|
||||
.unwrap();
|
||||
},
|
||||
DocScript::AssertClient(s) => {
|
||||
sleep(Duration::from_millis(2000)).await;
|
||||
let json = context.read().client_editor().doc_json().await.unwrap();
|
||||
assert_eq(s, &json);
|
||||
},
|
||||
DocScript::AssertServer(s, rev_id) => {
|
||||
sleep(Duration::from_millis(2000)).await;
|
||||
let persistence = Data::new(context.read().server.app_ctx.persistence.document_kv_store());
|
||||
let doc_identifier: DocumentIdPB = DocumentId {
|
||||
doc_id
|
||||
}.try_into().unwrap();
|
||||
|
||||
let document_info = read_document(persistence.get_ref(), doc_identifier).await.unwrap();
|
||||
assert_eq(s, &document_info.text);
|
||||
assert_eq!(document_info.rev_id, rev_id);
|
||||
},
|
||||
DocScript::ServerResetDocument(document_json, rev_id) => {
|
||||
let delta_data = Bytes::from(document_json);
|
||||
let user_id = context.read().client_user_session.user_id().unwrap();
|
||||
let md5 = format!("{:x}", md5::compute(&delta_data));
|
||||
let base_rev_id = if rev_id == 0 { rev_id } else { rev_id - 1 };
|
||||
let revision = Revision::new(
|
||||
&doc_id,
|
||||
base_rev_id,
|
||||
rev_id,
|
||||
delta_data,
|
||||
&user_id,
|
||||
md5,
|
||||
);
|
||||
|
||||
let document_manager = context.read().server.app_ctx.document_manager.clone();
|
||||
reset_doc(&doc_id, RepeatedRevision::new(vec![revision]), document_manager.get_ref()).await;
|
||||
sleep(Duration::from_millis(2000)).await;
|
||||
},
|
||||
}
|
||||
};
|
||||
fut_scripts.push(fut);
|
||||
}
|
||||
|
||||
let mut stream = stream::iter(fut_scripts);
|
||||
while let Some(script) = stream.next().await {
|
||||
let _ = script.await;
|
||||
}
|
||||
|
||||
std::mem::forget(context);
|
||||
}
|
||||
|
||||
fn assert_eq(expect: &str, receive: &str) {
|
||||
let expected_delta: RichTextDelta = serde_json::from_str(expect).unwrap();
|
||||
let target_delta: RichTextDelta = serde_json::from_str(receive).unwrap();
|
||||
|
||||
if expected_delta != target_delta {
|
||||
log::error!("✅ expect: {}", expect,);
|
||||
log::error!("❌ receive: {}", receive);
|
||||
}
|
||||
assert_eq!(target_delta, expected_delta);
|
||||
}
|
||||
|
||||
async fn create_doc(flowy_test: &FlowySDKTest) -> String {
|
||||
let view_test = ViewTest::new(flowy_test).await;
|
||||
view_test.view.id
|
||||
}
|
||||
|
||||
async fn reset_doc(doc_id: &str, repeated_revision: RepeatedRevision, document_manager: &Arc<ServerDocumentManager>) {
|
||||
let pb: RepeatedRevisionPB = repeated_revision.try_into().unwrap();
|
||||
let mut params = ResetDocumentParamsPB::new();
|
||||
params.set_doc_id(doc_id.to_owned());
|
||||
params.set_revisions(pb);
|
||||
let _ = reset_document(document_manager, params).await.unwrap();
|
||||
}
|
@ -1,207 +0,0 @@
|
||||
use crate::document_test::edit_script::{DocScript, DocumentTest};
|
||||
use flowy_collaboration::client_document::{ClientDocument, NewlineDoc};
|
||||
use lib_ot::{core::Interval, rich_text::RichTextAttribute};
|
||||
|
||||
#[rustfmt::skip]
|
||||
// ┌─────────┐ ┌─────────┐
|
||||
// │ Server │ │ Client │
|
||||
// └─────────┘ └─────────┘
|
||||
// ┌────────────────┐ │ │ ┌────────────────┐
|
||||
// │ops: [] rev: 0 │◀┼──── Ping ─────┼─┤ops: [] rev: 0 │
|
||||
// └────────────────┘ │ │ └────────────────┘
|
||||
// ┌────────────────────┐ │ │ ┌────────────────────┐
|
||||
// │ops: ["abc"] rev: 1 │◀┼───ClientPush ───┼─│ops: ["abc"] rev: 1 │
|
||||
// └────────────────────┘ │ │ └────────────────────┘
|
||||
// ┌──────────────────────────┐ │ │ ┌──────────────────────┐
|
||||
// │ops: ["abc", "123"] rev: 2│◀┼── ClientPush ───┼─│ops: ["123"] rev: 2 │
|
||||
// └──────────────────────────┘ │ │ └──────────────────────┘
|
||||
// │ │
|
||||
#[actix_rt::test]
|
||||
async fn delta_sync_while_editing() {
|
||||
let test = DocumentTest::new().await;
|
||||
test.run_scripts(vec![
|
||||
DocScript::ClientOpenDoc,
|
||||
DocScript::ClientInsertText(0, "abc"),
|
||||
DocScript::ClientInsertText(3, "123"),
|
||||
DocScript::AssertClient(r#"[{"insert":"abc123\n"}]"#),
|
||||
DocScript::AssertServer(r#"[{"insert":"abc123\n"}]"#, 1),
|
||||
])
|
||||
.await;
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn delta_sync_multi_revs() {
|
||||
let test = DocumentTest::new().await;
|
||||
test.run_scripts(vec![
|
||||
DocScript::ClientOpenDoc,
|
||||
DocScript::ClientInsertText(0, "abc"),
|
||||
DocScript::ClientInsertText(3, "123"),
|
||||
DocScript::ClientInsertText(6, "efg"),
|
||||
DocScript::ClientInsertText(9, "456"),
|
||||
])
|
||||
.await;
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn delta_sync_while_editing_with_attribute() {
|
||||
let test = DocumentTest::new().await;
|
||||
test.run_scripts(vec![
|
||||
DocScript::ClientOpenDoc,
|
||||
DocScript::ClientInsertText(0, "abc"),
|
||||
DocScript::ClientFormatText(Interval::new(0, 3), RichTextAttribute::Bold(true)),
|
||||
DocScript::AssertClient(r#"[{"insert":"abc","attributes":{"bold":true}},{"insert":"\n"}]"#),
|
||||
DocScript::AssertServer(r#"[{"insert":"abc","attributes":{"bold":true}},{"insert":"\n"}]"#, 1),
|
||||
DocScript::ClientInsertText(3, "efg"),
|
||||
DocScript::ClientFormatText(Interval::new(3, 5), RichTextAttribute::Italic(true)),
|
||||
DocScript::AssertClient(r#"[{"insert":"abc","attributes":{"bold":true}},{"insert":"ef","attributes":{"bold":true,"italic":true}},{"insert":"g","attributes":{"bold":true}},{"insert":"\n"}]"#),
|
||||
DocScript::AssertServer(r#"[{"insert":"abc","attributes":{"bold":true}},{"insert":"ef","attributes":{"bold":true,"italic":true}},{"insert":"g","attributes":{"bold":true}},{"insert":"\n"}]"#, 3),
|
||||
])
|
||||
.await;
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
// ┌─────────┐ ┌─────────┐
|
||||
// │ Server │ │ Client │
|
||||
// └─────────┘ └─────────┘
|
||||
// ┌ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ │ │
|
||||
// ops: ["123", "456"] rev: 3│ │ │
|
||||
// └ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ │ │
|
||||
// │ │
|
||||
// ◀───── Ping ───┤ Open doc
|
||||
// │ │
|
||||
// │ │ ┌ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─
|
||||
// ├───ServerPush────┼─▶ ops: ["123", "456"] rev: 3│
|
||||
// │ │ └ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─
|
||||
#[actix_rt::test]
|
||||
async fn delta_sync_with_server_push() {
|
||||
let test = DocumentTest::new().await;
|
||||
let mut document = ClientDocument::new::<NewlineDoc>();
|
||||
document.insert(0, "123").unwrap();
|
||||
document.insert(3, "456").unwrap();
|
||||
let json = document.to_json();
|
||||
|
||||
test.run_scripts(vec![
|
||||
DocScript::ServerResetDocument(json, 3),
|
||||
DocScript::ClientOpenDoc,
|
||||
DocScript::AssertClient(r#"[{"insert":"123456\n"}]"#),
|
||||
DocScript::AssertServer(r#"[{"insert":"123456\n"}]"#, 3),
|
||||
])
|
||||
.await;
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
// ┌─────────┐ ┌─────────┐
|
||||
// │ Server │ │ Client │
|
||||
// └─────────┘ └─────────┘
|
||||
// ┌ ─ ─ ─ ─ ┐ │ │
|
||||
// ops: [] │ │
|
||||
// └ ─ ─ ─ ─ ┘ │ │
|
||||
// │ │
|
||||
// ◀───── Ping ───┤ Open doc
|
||||
// ◀───── Ping ───┤
|
||||
// ◀───── Ping ───┤
|
||||
// ┌ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┐ │ │
|
||||
// ops: ["123"], rev: 3 │ │
|
||||
// └ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┘ │ │ ┌ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┐
|
||||
// ├────ServerPush───▶ ops: ["123"] rev: 3
|
||||
// │ │ └ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┘
|
||||
// │ │
|
||||
#[actix_rt::test]
|
||||
async fn delta_sync_with_server_push_after_reset_document() {
|
||||
let test = DocumentTest::new().await;
|
||||
let mut document = ClientDocument::new::<NewlineDoc>();
|
||||
document.insert(0, "123").unwrap();
|
||||
let json = document.to_json();
|
||||
|
||||
test.run_scripts(vec![
|
||||
DocScript::ClientOpenDoc,
|
||||
DocScript::ServerResetDocument(json, 3),
|
||||
DocScript::AssertClient(r#"[{"insert":"123\n"}]"#),
|
||||
DocScript::AssertServer(r#"[{"insert":"123\n"}]"#, 3),
|
||||
])
|
||||
.await;
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
// ┌─────────┐ ┌─────────┐
|
||||
// │ Server │ │ Client │
|
||||
// └─────────┘ └─────────┘
|
||||
// │ │
|
||||
// │ │
|
||||
// ◀────── Ping ─────┤ Open doc
|
||||
// ┌ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┐ │ │
|
||||
// ops: ["123"] rev: 3 │ │ ┌ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─
|
||||
// └ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┘ │ │ ops: ["abc"] rev: 1 │
|
||||
// │ │ └ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─
|
||||
// │ │ ┌────────────────────┐
|
||||
// ◀───ClientPush ───┤ │ops: ["abc"] rev: 1 │
|
||||
// ┌───────────────────┐ │ │ └────────────────────┘
|
||||
// │ops: ["123"] rev: 3│ ├────ServerPush───▶ transform
|
||||
// └───────────────────┘ │ │ ┌ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─
|
||||
// │ │ ops: ["abc", "123"] rev: 4│
|
||||
// │ │ └ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─
|
||||
// │ │ ┌────────────────────────────────┐
|
||||
// ◀────ClientPush───┤ │ops: ["retain 3","abc"] rev: 4 │
|
||||
// ┌ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ │ │ └────────────────────────────────┘
|
||||
// ops: ["abc", "123"] rev: 4│ ├────ServerAck────▶
|
||||
// └ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ │ │
|
||||
#[actix_rt::test]
|
||||
async fn delta_sync_while_local_rev_less_than_server_rev() {
|
||||
let test = DocumentTest::new().await;
|
||||
let mut document = ClientDocument::new::<NewlineDoc>();
|
||||
document.insert(0, "123").unwrap();
|
||||
let json = document.to_json();
|
||||
|
||||
test.run_scripts(vec![
|
||||
DocScript::ClientOpenDoc,
|
||||
DocScript::ServerResetDocument(json, 3),
|
||||
DocScript::ClientInsertText(0, "abc"),
|
||||
DocScript::AssertClient(r#"[{"insert":"abc123\n"}]"#),
|
||||
DocScript::AssertServer(r#"[{"insert":"abc123\n"}]"#, 4),
|
||||
])
|
||||
.await;
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
// ┌─────────┐ ┌─────────┐
|
||||
// │ Server │ │ Client │
|
||||
// └─────────┘ └─────────┘
|
||||
// ┌ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┐ │ │
|
||||
// ops: ["123"] rev: 1 │ │
|
||||
// └ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┘ │ │
|
||||
// ◀──── Ping ────┤ Open doc
|
||||
// │ │
|
||||
// │ │ ┌──────────────────┐
|
||||
// ├───ServerPush────▶ │ops: [123] rev: 1 │
|
||||
// │ │ └──────────────────┘
|
||||
// │ │ ┌ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─
|
||||
// │ │ ops: ["123","abc", "efg"] rev: 3 │
|
||||
// │ │ └ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─
|
||||
// │ │ ┌──────────────────────────────┐
|
||||
// ◀────ClientPush───┤ │ops: [retain 3, "abc"] rev: 2 │
|
||||
// ┌──────────────────────────┐ │ │ └──────────────────────────────┘
|
||||
// │ops: ["123","abc"] rev: 2 │ ├────ServerAck────▶
|
||||
// └──────────────────────────┘ │ │
|
||||
// │ │ ┌──────────────────────────────┐
|
||||
// ◀────ClientPush───┤ │ops: [retain 6, "efg"] rev: 3 │
|
||||
// ┌──────────────────────────────────┐ │ │ └──────────────────────────────┘
|
||||
// │ops: ["123","abc", "efg"] rev: 3 │ ├────ServerAck────▶
|
||||
// └──────────────────────────────────┘ │ │
|
||||
#[actix_rt::test]
|
||||
async fn delta_sync_while_local_rev_greater_than_server_rev() {
|
||||
let test = DocumentTest::new().await;
|
||||
let mut document = ClientDocument::new::<NewlineDoc>();
|
||||
document.insert(0, "123").unwrap();
|
||||
let json = document.to_json();
|
||||
|
||||
test.run_scripts(vec![
|
||||
DocScript::ServerResetDocument(json, 1),
|
||||
DocScript::ClientOpenDoc,
|
||||
DocScript::AssertClient(r#"[{"insert":"123\n"}]"#),
|
||||
DocScript::ClientInsertText(3, "abc"),
|
||||
DocScript::ClientInsertText(6, "efg"),
|
||||
DocScript::AssertClient(r#"[{"insert":"123abcefg\n"}]"#),
|
||||
DocScript::AssertServer(r#"[{"insert":"123abcefg\n"}]"#, 3),
|
||||
])
|
||||
.await;
|
||||
}
|
@ -1,2 +0,0 @@
|
||||
mod edit_script;
|
||||
mod edit_test;
|
@ -1,3 +0,0 @@
|
||||
mod api_test;
|
||||
mod document_test;
|
||||
pub mod util;
|
@ -1,388 +0,0 @@
|
||||
use backend::{
|
||||
application::{init_app_context, Application},
|
||||
config::{get_configuration, DatabaseSettings},
|
||||
context::AppContext,
|
||||
};
|
||||
use backend_service::{
|
||||
configuration::{get_client_server_configuration, ClientServerConfiguration},
|
||||
errors::ServerError,
|
||||
};
|
||||
use flowy_collaboration::{
|
||||
client_document::default::initial_delta_string,
|
||||
entities::document_info::{CreateDocParams, DocumentId, DocumentInfo},
|
||||
};
|
||||
use flowy_folder_data_model::entities::{app::*, trash::*, view::*, workspace::*};
|
||||
use flowy_net::http_server::{
|
||||
core::*,
|
||||
document::{create_document_request, read_document_request},
|
||||
user::*,
|
||||
};
|
||||
use flowy_user_data_model::entities::*;
|
||||
use lib_infra::uuid_string;
|
||||
use sqlx::{Connection, Executor, PgConnection, PgPool};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub struct TestUserServer {
|
||||
pub inner: TestServer,
|
||||
pub user_token: Option<String>,
|
||||
pub user_id: Option<String>,
|
||||
}
|
||||
|
||||
impl TestUserServer {
|
||||
pub async fn new() -> Self {
|
||||
let mut server: TestUserServer = spawn_server().await.into();
|
||||
let response = server.register_user().await;
|
||||
server.user_token = Some(response.token);
|
||||
server.user_id = Some(response.user_id);
|
||||
server
|
||||
}
|
||||
|
||||
pub async fn sign_in(&self, params: SignInParams) -> Result<SignInResponse, ServerError> {
|
||||
let url = format!("{}/api/auth", self.http_addr());
|
||||
let resp = user_sign_in_request(params, &url).await?;
|
||||
Ok(resp)
|
||||
}
|
||||
|
||||
pub async fn sign_out(&self) {
|
||||
let url = format!("{}/api/auth", self.http_addr());
|
||||
let _ = user_sign_out_request(self.user_token(), &url).await.unwrap();
|
||||
}
|
||||
|
||||
pub fn user_token(&self) -> &str {
|
||||
self.user_token.as_ref().expect("must call register_user first ")
|
||||
}
|
||||
|
||||
pub fn user_id(&self) -> &str {
|
||||
self.user_id.as_ref().expect("must call register_user first ")
|
||||
}
|
||||
|
||||
pub async fn get_user_profile(&self) -> UserProfile {
|
||||
let url = format!("{}/api/user", self.http_addr());
|
||||
let user_profile = get_user_profile_request(self.user_token(), &url).await.unwrap();
|
||||
user_profile
|
||||
}
|
||||
|
||||
pub async fn update_user_profile(&self, params: UpdateUserParams) -> Result<(), ServerError> {
|
||||
let url = format!("{}/api/user", self.http_addr());
|
||||
let _ = update_user_profile_request(self.user_token(), params, &url).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn create_workspace(&self, params: CreateWorkspaceParams) -> Workspace {
|
||||
let url = format!("{}/api/workspace", self.http_addr());
|
||||
let workspace = create_workspace_request(self.user_token(), params, &url).await.unwrap();
|
||||
workspace
|
||||
}
|
||||
|
||||
pub async fn read_workspaces(&self, params: WorkspaceId) -> RepeatedWorkspace {
|
||||
let url = format!("{}/api/workspace", self.http_addr());
|
||||
let workspaces = read_workspaces_request(self.user_token(), params, &url).await.unwrap();
|
||||
workspaces
|
||||
}
|
||||
|
||||
pub async fn update_workspace(&self, params: UpdateWorkspaceParams) {
|
||||
let url = format!("{}/api/workspace", self.http_addr());
|
||||
update_workspace_request(self.user_token(), params, &url).await.unwrap();
|
||||
}
|
||||
|
||||
pub async fn delete_workspace(&self, params: WorkspaceId) {
|
||||
let url = format!("{}/api/workspace", self.http_addr());
|
||||
delete_workspace_request(self.user_token(), params, &url).await.unwrap();
|
||||
}
|
||||
|
||||
pub async fn create_app(&self, params: CreateAppParams) -> App {
|
||||
let url = format!("{}/api/app", self.http_addr());
|
||||
let app = create_app_request(self.user_token(), params, &url).await.unwrap();
|
||||
app
|
||||
}
|
||||
|
||||
pub async fn read_app(&self, params: AppId) -> Option<App> {
|
||||
let url = format!("{}/api/app", self.http_addr());
|
||||
let app = read_app_request(self.user_token(), params, &url).await.unwrap();
|
||||
app
|
||||
}
|
||||
|
||||
pub async fn update_app(&self, params: UpdateAppParams) {
|
||||
let url = format!("{}/api/app", self.http_addr());
|
||||
update_app_request(self.user_token(), params, &url).await.unwrap();
|
||||
}
|
||||
|
||||
pub async fn delete_app(&self, params: AppId) {
|
||||
let url = format!("{}/api/app", self.http_addr());
|
||||
delete_app_request(self.user_token(), params, &url).await.unwrap();
|
||||
}
|
||||
|
||||
pub async fn create_view(&self, params: CreateViewParams) -> View {
|
||||
let url = format!("{}/api/view", self.http_addr());
|
||||
let view = create_view_request(self.user_token(), params, &url).await.unwrap();
|
||||
view
|
||||
}
|
||||
|
||||
pub async fn read_view(&self, params: ViewId) -> Option<View> {
|
||||
let url = format!("{}/api/view", self.http_addr());
|
||||
let view = read_view_request(self.user_token(), params, &url).await.unwrap();
|
||||
view
|
||||
}
|
||||
|
||||
pub async fn update_view(&self, params: UpdateViewParams) {
|
||||
let url = format!("{}/api/view", self.http_addr());
|
||||
update_view_request(self.user_token(), params, &url).await.unwrap();
|
||||
}
|
||||
|
||||
pub async fn delete_view(&self, params: RepeatedViewId) {
|
||||
let url = format!("{}/api/view", self.http_addr());
|
||||
delete_view_request(self.user_token(), params, &url).await.unwrap();
|
||||
}
|
||||
|
||||
pub async fn create_view_trash(&self, view_id: &str) {
|
||||
let identifier = TrashId {
|
||||
id: view_id.to_string(),
|
||||
ty: TrashType::View,
|
||||
};
|
||||
let url = format!("{}/api/trash", self.http_addr());
|
||||
create_trash_request(self.user_token(), vec![identifier].into(), &url)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
pub async fn delete_view_trash(&self, trash_identifiers: RepeatedTrashId) {
|
||||
let url = format!("{}/api/trash", self.http_addr());
|
||||
|
||||
delete_trash_request(self.user_token(), trash_identifiers, &url)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
pub async fn read_trash(&self) -> RepeatedTrash {
|
||||
let url = format!("{}/api/trash", self.http_addr());
|
||||
read_trash_request(self.user_token(), &url).await.unwrap()
|
||||
}
|
||||
|
||||
pub async fn read_doc(&self, params: DocumentId) -> Option<DocumentInfo> {
|
||||
let url = format!("{}/api/doc", self.http_addr());
|
||||
let doc = read_document_request(self.user_token(), params, &url).await.unwrap();
|
||||
doc
|
||||
}
|
||||
|
||||
pub async fn create_doc(&self, params: CreateDocParams) {
|
||||
let url = format!("{}/api/doc", self.http_addr());
|
||||
let _ = create_document_request(self.user_token(), params, &url).await.unwrap();
|
||||
}
|
||||
|
||||
pub async fn register_user(&self) -> SignUpResponse {
|
||||
let params = SignUpParams {
|
||||
email: "annie@appflowy.io".to_string(),
|
||||
name: "annie".to_string(),
|
||||
password: "HelloAppFlowy123!".to_string(),
|
||||
};
|
||||
|
||||
self.register(params).await
|
||||
}
|
||||
|
||||
pub async fn register(&self, params: SignUpParams) -> SignUpResponse {
|
||||
let url = format!("{}/api/register", self.http_addr());
|
||||
let response = user_sign_up_request(params, &url).await.unwrap();
|
||||
response
|
||||
}
|
||||
|
||||
pub fn http_addr(&self) -> String {
|
||||
self.inner.client_server_config.base_url()
|
||||
}
|
||||
|
||||
pub fn ws_addr(&self) -> String {
|
||||
format!(
|
||||
"{}/{}",
|
||||
self.inner.client_server_config.ws_addr(),
|
||||
self.user_token.as_ref().unwrap()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::convert::From<TestServer> for TestUserServer {
|
||||
fn from(server: TestServer) -> Self {
|
||||
TestUserServer {
|
||||
inner: server,
|
||||
user_token: None,
|
||||
user_id: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn spawn_user_server() -> TestUserServer {
|
||||
let server: TestUserServer = spawn_server().await.into();
|
||||
server
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct TestServer {
|
||||
pub app_ctx: AppContext,
|
||||
pub client_server_config: ClientServerConfiguration,
|
||||
}
|
||||
|
||||
pub async fn spawn_server() -> TestServer {
|
||||
let database_name = Uuid::new_v4().to_string();
|
||||
let configuration = {
|
||||
let mut c = get_configuration().expect("Failed to read configuration.");
|
||||
c.database.database_name = database_name.clone();
|
||||
// Use a random OS port
|
||||
c.application.port = 0;
|
||||
c
|
||||
};
|
||||
|
||||
let _ = configure_database(&configuration.database).await;
|
||||
let app_ctx = init_app_context(&configuration).await;
|
||||
let application = Application::build(configuration.clone(), app_ctx.clone())
|
||||
.await
|
||||
.expect("Failed to build application.");
|
||||
let application_port = application.port();
|
||||
|
||||
let _ = tokio::spawn(async {
|
||||
let _ = application.run_until_stopped();
|
||||
// drop_test_database(database_name).await;
|
||||
});
|
||||
|
||||
let mut client_server_config = get_client_server_configuration().expect("Failed to read configuration.");
|
||||
client_server_config.reset_host_with_port("localhost", application_port);
|
||||
|
||||
TestServer {
|
||||
app_ctx,
|
||||
client_server_config,
|
||||
}
|
||||
}
|
||||
|
||||
async fn configure_database(config: &DatabaseSettings) -> PgPool {
|
||||
// Create database
|
||||
let mut connection = PgConnection::connect_with(&config.without_db())
|
||||
.await
|
||||
.expect("Failed to connect to Postgres");
|
||||
connection
|
||||
.execute(&*format!(r#"CREATE DATABASE "{}";"#, config.database_name))
|
||||
.await
|
||||
.expect("Failed to create database.");
|
||||
|
||||
// Migrate database
|
||||
let connection_pool = PgPool::connect_with(config.with_db())
|
||||
.await
|
||||
.expect("Failed to connect to Postgres.");
|
||||
|
||||
sqlx::migrate!("./migrations")
|
||||
.run(&connection_pool)
|
||||
.await
|
||||
.expect("Failed to migrate the database");
|
||||
|
||||
connection_pool
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
async fn drop_test_database(database_name: String) {
|
||||
// https://stackoverflow.com/questions/36502401/postgres-drop-database-error-pq-cannot-drop-the-currently-open-database?rq=1
|
||||
let configuration = {
|
||||
let mut c = get_configuration().expect("Failed to read configuration.");
|
||||
c.database.database_name = "flowy".to_owned();
|
||||
c.application.port = 0;
|
||||
c
|
||||
};
|
||||
|
||||
let mut connection = PgConnection::connect_with(&configuration.database.without_db())
|
||||
.await
|
||||
.expect("Failed to connect to Postgres");
|
||||
|
||||
connection
|
||||
.execute(&*format!(r#"Drop DATABASE "{}";"#, database_name))
|
||||
.await
|
||||
.expect("Failed to drop database.");
|
||||
}
|
||||
|
||||
pub async fn create_test_workspace(server: &TestUserServer) -> Workspace {
|
||||
let params = CreateWorkspaceParams {
|
||||
name: "My first workspace".to_string(),
|
||||
desc: "This is my first workspace".to_string(),
|
||||
};
|
||||
|
||||
let workspace = server.create_workspace(params).await;
|
||||
workspace
|
||||
}
|
||||
|
||||
pub async fn create_test_app(server: &TestUserServer, workspace_id: &str) -> App {
|
||||
let params = CreateAppParams {
|
||||
workspace_id: workspace_id.to_owned(),
|
||||
name: "My first app".to_string(),
|
||||
desc: "This is my first app".to_string(),
|
||||
color_style: ColorStyle::default(),
|
||||
};
|
||||
|
||||
let app = server.create_app(params).await;
|
||||
app
|
||||
}
|
||||
|
||||
pub async fn create_test_view(application: &TestUserServer, app_id: &str) -> View {
|
||||
let name = "My first view".to_string();
|
||||
let desc = "This is my first view".to_string();
|
||||
let thumbnail = "http://1.png".to_string();
|
||||
|
||||
let params = CreateViewParams::new(
|
||||
app_id.to_owned(),
|
||||
name,
|
||||
desc,
|
||||
ViewType::Doc,
|
||||
thumbnail,
|
||||
initial_delta_string(),
|
||||
uuid_string(),
|
||||
);
|
||||
let app = application.create_view(params).await;
|
||||
app
|
||||
}
|
||||
|
||||
pub struct BackendWorkspaceTest {
|
||||
pub server: TestUserServer,
|
||||
pub workspace: Workspace,
|
||||
}
|
||||
|
||||
impl BackendWorkspaceTest {
|
||||
pub async fn new() -> Self {
|
||||
let server = TestUserServer::new().await;
|
||||
let workspace = create_test_workspace(&server).await;
|
||||
Self { server, workspace }
|
||||
}
|
||||
|
||||
pub async fn create_app(&self) -> App {
|
||||
create_test_app(&self.server, &self.workspace.id).await
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BackendAppTest {
|
||||
pub server: TestUserServer,
|
||||
pub workspace: Workspace,
|
||||
pub app: App,
|
||||
}
|
||||
|
||||
impl BackendAppTest {
|
||||
pub async fn new() -> Self {
|
||||
let server = TestUserServer::new().await;
|
||||
let workspace = create_test_workspace(&server).await;
|
||||
let app = create_test_app(&server, &workspace.id).await;
|
||||
Self { server, workspace, app }
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BackendViewTest {
|
||||
pub server: TestUserServer,
|
||||
pub workspace: Workspace,
|
||||
pub app: App,
|
||||
pub view: View,
|
||||
}
|
||||
|
||||
impl BackendViewTest {
|
||||
pub async fn new() -> Self {
|
||||
let server = TestUserServer::new().await;
|
||||
let workspace = create_test_workspace(&server).await;
|
||||
let app = create_test_app(&server, &workspace.id).await;
|
||||
let view = create_test_view(&server, &app.id).await;
|
||||
Self {
|
||||
server,
|
||||
workspace,
|
||||
app,
|
||||
view,
|
||||
}
|
||||
}
|
||||
}
|
@ -1 +0,0 @@
|
||||
pub mod helper;
|
Loading…
Reference in New Issue
Block a user