Feat/database view (#1765)

* chore: rename flowy-database to flowy-sqlite

* refactor: rename flowy-grid to flowy-database

* refactor: rename grid to database

* refactor: rename GridEvent to DatabaseEvent

* refactor: rename grid_id to database_id

* refactor: rename dart code
This commit is contained in:
Nathan.fooo
2023-01-31 08:28:31 +08:00
committed by GitHub
parent 5de3912fe3
commit 5b07656295
400 changed files with 2447 additions and 2435 deletions

View File

@ -0,0 +1 @@
DATABASE_URL=/tmp/database.sql

View File

@ -0,0 +1,23 @@
[package]
name = "flowy-sqlite"
version = "0.1.0"
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
diesel = { version = "1.4.8", features = ["sqlite"] }
diesel_derives = { version = "1.4.1", features = ["sqlite"] }
diesel_migrations = { version = "1.4.0", features = ["sqlite"] }
tracing = { version = "0.1", features = ["log"] }
lazy_static = "1.4.0"
r2d2 = "0.8.9"
libsqlite3-sys = { version = ">=0.8.0, <0.24.0", features = ["bundled"] }
scheduled-thread-pool = "0.2.5"
error-chain = "=0.12.0"
openssl = { version = "0.10.38", optional = true, features = ["vendored"] }
openssl-sys = { version = "0.9.69", optional = true, features = ["vendored"] }
[features]
openssl_vendored = ["openssl", "openssl-sys"]

View File

@ -0,0 +1,5 @@
# For documentation on how to configure this file,
# see diesel.rs/guides/configuring-diesel-cli
[print_schema]
file = "src/schema.rs"

View File

@ -0,0 +1,5 @@
-- This file should undo anything in `up.sql`
DROP TABLE user_table;
DROP TABLE workspace_table;
DROP TABLE app_table;
DROP TABLE view_table;

View File

@ -0,0 +1,53 @@
-- Your SQL goes here
CREATE TABLE user_table (
id TEXT NOT NULL PRIMARY KEY,
name TEXT NOT NULL DEFAULT '',
token TEXT NOT NULL DEFAULT '',
email TEXT NOT NULL DEFAULT ''
);
CREATE TABLE workspace_table (
id TEXT NOT NULL PRIMARY KEY,
name TEXT NOT NULL DEFAULT '',
desc TEXT NOT NULL DEFAULT '',
modified_time BIGINT NOT NULL DEFAULT 0,
create_time BIGINT NOT NULL DEFAULT 0,
user_id TEXT NOT NULL DEFAULT '',
version BIGINT NOT NULL DEFAULT 0
);
CREATE TABLE app_table (
id TEXT NOT NULL PRIMARY KEY,
workspace_id TEXT NOT NULL DEFAULT '',
name TEXT NOT NULL DEFAULT '',
desc TEXT NOT NULL DEFAULT '',
color_style BLOB NOT NULL DEFAULT (x''),
last_view_id TEXT DEFAULT '',
modified_time BIGINT NOT NULL DEFAULT 0,
create_time BIGINT NOT NULL DEFAULT 0,
version BIGINT NOT NULL DEFAULT 0,
is_trash Boolean NOT NULL DEFAULT false
);
CREATE TABLE view_table (
id TEXT NOT NULL PRIMARY KEY,
belong_to_id TEXT NOT NULL DEFAULT '',
name TEXT NOT NULL DEFAULT '',
desc TEXT NOT NULL DEFAULT '',
modified_time BIGINT NOT NULL DEFAULT 0,
create_time BIGINT NOT NULL DEFAULT 0,
thumbnail TEXT NOT NULL DEFAULT '',
view_type INTEGER NOT NULL DEFAULT 0,
version BIGINT NOT NULL DEFAULT 0,
is_trash Boolean NOT NULL DEFAULT false
);
CREATE TABLE trash_table (
id TEXT NOT NULL PRIMARY KEY,
name TEXT NOT NULL DEFAULT '',
desc TEXT NOT NULL DEFAULT '',
modified_time BIGINT NOT NULL DEFAULT 0,
create_time BIGINT NOT NULL DEFAULT 0,
ty INTEGER NOT NULL DEFAULT 0
);

View File

@ -0,0 +1,2 @@
-- This file should undo anything in `up.sql`
ALTER TABLE user_table DROP COLUMN workspace;

View File

@ -0,0 +1,2 @@
-- Your SQL goes here
ALTER TABLE user_table ADD COLUMN workspace TEXT NOT NULL DEFAULT '';

View File

@ -0,0 +1,2 @@
-- This file should undo anything in `up.sql`
DROP TABLE rev_table;

View File

@ -0,0 +1,10 @@
-- Your SQL goes here
CREATE TABLE rev_table (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
doc_id TEXT NOT NULL DEFAULT '',
base_rev_id BIGINT NOT NULL DEFAULT 0,
rev_id BIGINT NOT NULL DEFAULT 0,
data BLOB NOT NULL DEFAULT (x''),
state INTEGER NOT NULL DEFAULT 0,
ty INTEGER NOT NULL DEFAULT 0
);

View File

@ -0,0 +1,4 @@
-- This file should undo anything in `up.sql`
DROP TABLE kv_table;
DROP TABLE grid_rev_table;
DROP TABLE grid_meta_rev_table;

View File

@ -0,0 +1,22 @@
-- Your SQL goes here
CREATE TABLE kv_table (
key TEXT NOT NULL PRIMARY KEY,
value BLOB NOT NULL DEFAULT (x'')
);
CREATE TABLE grid_rev_table (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
object_id TEXT NOT NULL DEFAULT '',
base_rev_id BIGINT NOT NULL DEFAULT 0,
rev_id BIGINT NOT NULL DEFAULT 0,
data BLOB NOT NULL DEFAULT (x''),
state INTEGER NOT NULL DEFAULT 0
);
CREATE TABLE grid_meta_rev_table (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
object_id TEXT NOT NULL DEFAULT '',
base_rev_id BIGINT NOT NULL DEFAULT 0,
rev_id BIGINT NOT NULL DEFAULT 0,
data BLOB NOT NULL DEFAULT (x''),
state INTEGER NOT NULL DEFAULT 0
);

View File

@ -0,0 +1,3 @@
-- This file should undo anything in `up.sql`
DROP TABLE grid_block_index_table;
-- DROP TABLE grid_block_fts_table;

View File

@ -0,0 +1,7 @@
-- Your SQL goes here
CREATE TABLE grid_block_index_table (
row_id TEXT NOT NULL PRIMARY KEY,
block_id TEXT NOT NULL
);
-- CREATE VIRTUAL TABLE grid_block_fts_table USING FTS5(content, grid_id, block_id, row_id);

View File

@ -0,0 +1,2 @@
-- This file should undo anything in `up.sql`
DROP TABLE rev_snapshot;

View File

@ -0,0 +1,7 @@
-- Your SQL goes here
CREATE TABLE rev_snapshot (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
object_id TEXT NOT NULL DEFAULT '',
rev_id BIGINT NOT NULL DEFAULT 0,
data BLOB NOT NULL DEFAULT (x'')
);

View File

@ -0,0 +1,2 @@
-- This file should undo anything in `up.sql`
ALTER TABLE view_table DROP COLUMN ext_data;

View File

@ -0,0 +1,2 @@
-- Your SQL goes here
ALTER TABLE view_table ADD COLUMN ext_data TEXT NOT NULL DEFAULT '';

View File

@ -0,0 +1 @@
ALTER TABLE user_table DROP COLUMN icon_url;

View File

@ -0,0 +1 @@
ALTER TABLE user_table ADD COLUMN icon_url TEXT NOT NULL DEFAULT '';

View File

@ -0,0 +1,2 @@
-- This file should undo anything in `up.sql`
DROP TABLE grid_view_rev_table;

View File

@ -0,0 +1,11 @@
-- Your SQL goes here
CREATE TABLE grid_view_rev_table (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
object_id TEXT NOT NULL DEFAULT '',
base_rev_id BIGINT NOT NULL DEFAULT 0,
rev_id BIGINT NOT NULL DEFAULT 0,
data BLOB NOT NULL DEFAULT (x''),
state INTEGER NOT NULL DEFAULT 0
);

View File

@ -0,0 +1,2 @@
-- This file should undo anything in `up.sql`
DROP TABLE grid_view_rev_table;

View File

@ -0,0 +1,9 @@
-- Your SQL goes here
CREATE TABLE document_rev_table (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
document_id TEXT NOT NULL DEFAULT '',
base_rev_id BIGINT NOT NULL DEFAULT 0,
rev_id BIGINT NOT NULL DEFAULT 0,
data BLOB NOT NULL DEFAULT (x''),
state INTEGER NOT NULL DEFAULT 0
);

View File

@ -0,0 +1,2 @@
-- This file should undo anything in `up.sql`
DROP TABLE grid_rev_snapshot;

View File

@ -0,0 +1,9 @@
-- Your SQL goes here
CREATE TABLE grid_rev_snapshot (
snapshot_id TEXT NOT NULL PRIMARY KEY DEFAULT '',
object_id TEXT NOT NULL DEFAULT '',
rev_id BIGINT NOT NULL DEFAULT 0,
base_rev_id BIGINT NOT NULL DEFAULT 0,
timestamp BIGINT NOT NULL DEFAULT 0,
data BLOB NOT NULL DEFAULT (x'')
);

View File

@ -0,0 +1,2 @@
-- This file should undo anything in `up.sql`
DROP TABLE folder_rev_snapshot;

View File

@ -0,0 +1,9 @@
-- Your SQL goes here
CREATE TABLE folder_rev_snapshot (
snapshot_id TEXT NOT NULL PRIMARY KEY DEFAULT '',
object_id TEXT NOT NULL DEFAULT '',
rev_id BIGINT NOT NULL DEFAULT 0,
base_rev_id BIGINT NOT NULL DEFAULT 0,
timestamp BIGINT NOT NULL DEFAULT 0,
data BLOB NOT NULL DEFAULT (x'')
);

View File

@ -0,0 +1,2 @@
-- This file should undo anything in `up.sql`
DROP TABLE document_rev_snapshot;

View File

@ -0,0 +1,9 @@
-- Your SQL goes here
CREATE TABLE document_rev_snapshot (
snapshot_id TEXT NOT NULL PRIMARY KEY DEFAULT '',
object_id TEXT NOT NULL DEFAULT '',
rev_id BIGINT NOT NULL DEFAULT 0,
base_rev_id BIGINT NOT NULL DEFAULT 0,
timestamp BIGINT NOT NULL DEFAULT 0,
data BLOB NOT NULL DEFAULT (x'')
);

View File

@ -0,0 +1,187 @@
use crate::kv::schema::{kv_table, kv_table::dsl, KV_SQL};
use crate::sqlite::{DBConnection, Database, PoolConfig};
use ::diesel::{query_dsl::*, ExpressionMethods};
use diesel::{Connection, SqliteConnection};
use lazy_static::lazy_static;
use std::{path::Path, sync::RwLock};
macro_rules! impl_get_func {
(
$func_name:ident,
$get_method:ident=>$target:ident
) => {
#[allow(dead_code)]
pub fn $func_name(k: &str) -> Option<$target> {
match KV::get(k) {
Ok(item) => item.$get_method,
Err(_) => None,
}
}
};
}
macro_rules! impl_set_func {
($func_name:ident,$set_method:ident,$key_type:ident) => {
#[allow(dead_code)]
pub fn $func_name(key: &str, value: $key_type) {
let mut item = KeyValue::new(key);
item.$set_method = Some(value);
match KV::set(item) {
Ok(_) => {}
Err(e) => {
tracing::error!("{:?}", e)
}
};
}
};
}
const DB_NAME: &str = "kv.db";
lazy_static! {
static ref KV_HOLDER: RwLock<KV> = RwLock::new(KV::new());
}
pub struct KV {
database: Option<Database>,
}
impl KV {
fn new() -> Self {
KV { database: None }
}
fn set(value: KeyValue) -> Result<(), String> {
// tracing::trace!("[KV]: set value: {:?}", value);
let _ = diesel::replace_into(kv_table::table)
.values(&value)
.execute(&*(get_connection()?))
.map_err(|e| format!("KV set error: {:?}", e))?;
Ok(())
}
fn get(key: &str) -> Result<KeyValue, String> {
let conn = get_connection()?;
let value = dsl::kv_table
.filter(kv_table::key.eq(key))
.first::<KeyValue>(&*conn)
.map_err(|e| format!("KV get error: {:?}", e))?;
Ok(value)
}
#[allow(dead_code)]
pub fn remove(key: &str) -> Result<(), String> {
// tracing::debug!("remove key: {}", key);
let conn = get_connection()?;
let sql = dsl::kv_table.filter(kv_table::key.eq(key));
let _ = diesel::delete(sql)
.execute(&*conn)
.map_err(|e| format!("KV remove error: {:?}", e))?;
Ok(())
}
#[tracing::instrument(level = "trace", err)]
pub fn init(root: &str) -> Result<(), String> {
if !Path::new(root).exists() {
return Err(format!("Init KVStore failed. {} not exists", root));
}
let pool_config = PoolConfig::default();
let database = Database::new(root, DB_NAME, pool_config).unwrap();
let conn = database.get_connection().unwrap();
SqliteConnection::execute(&*conn, KV_SQL).unwrap();
let mut store = KV_HOLDER
.write()
.map_err(|e| format!("KVStore write failed: {:?}", e))?;
tracing::trace!("Init kv with path: {}", root);
store.database = Some(database);
Ok(())
}
pub fn get_bool(key: &str) -> bool {
match KV::get(key) {
Ok(item) => item.bool_value.unwrap_or(false),
Err(_) => false,
}
}
impl_set_func!(set_str, str_value, String);
impl_set_func!(set_bool, bool_value, bool);
impl_set_func!(set_int, int_value, i64);
impl_set_func!(set_float, float_value, f64);
impl_get_func!(get_str,str_value=>String);
impl_get_func!(get_int,int_value=>i64);
impl_get_func!(get_float,float_value=>f64);
}
fn get_connection() -> Result<DBConnection, String> {
match KV_HOLDER.read() {
Ok(store) => {
let conn = store
.database
.as_ref()
.expect("KVStore is not init")
.get_connection()
.map_err(|e| format!("KVStore error: {:?}", e))?;
Ok(conn)
}
Err(e) => {
let msg = format!("KVStore get connection failed: {:?}", e);
tracing::error!("{:?}", msg);
Err(msg)
}
}
}
#[derive(Clone, Debug, Default, Queryable, Identifiable, Insertable, AsChangeset)]
#[table_name = "kv_table"]
#[primary_key(key)]
pub struct KeyValue {
pub key: String,
pub str_value: Option<String>,
pub int_value: Option<i64>,
pub float_value: Option<f64>,
pub bool_value: Option<bool>,
}
impl KeyValue {
pub fn new(key: &str) -> Self {
KeyValue {
key: key.to_string(),
..Default::default()
}
}
}
#[cfg(test)]
mod tests {
use crate::kv::KV;
#[test]
fn kv_store_test() {
let dir = "./temp/";
if !std::path::Path::new(dir).exists() {
std::fs::create_dir_all(dir).unwrap();
}
KV::init(dir).unwrap();
KV::set_str("1", "hello".to_string());
assert_eq!(KV::get_str("1").unwrap(), "hello");
assert_eq!(KV::get_str("2"), None);
KV::set_bool("1", true);
assert!(KV::get_bool("1"));
assert!(!KV::get_bool("2"));
}
}

View File

@ -0,0 +1,6 @@
#![allow(clippy::module_inception)]
mod kv;
mod schema;
pub use kv::*;

View File

@ -0,0 +1,20 @@
#[allow(dead_code)]
pub const KV_SQL: &str = r#"
CREATE TABLE IF NOT EXISTS kv_table (
key TEXT NOT NULL PRIMARY KEY,
str_value TEXT,
int_value BIGINT,
float_value DOUBLE,
bool_value BOOLEAN
);
"#;
table! {
kv_table (key) {
key -> Text,
str_value -> Nullable<Text>,
int_value -> Nullable<BigInt>,
float_value -> Nullable<Double>,
bool_value -> Nullable<Bool>,
}
}

View File

@ -0,0 +1,55 @@
pub use diesel::*;
pub use diesel_derives::*;
use diesel_migrations::*;
use std::{fmt::Debug, io, path::Path};
pub mod kv;
mod sqlite;
use crate::sqlite::PoolConfig;
pub use crate::sqlite::{ConnectionPool, DBConnection, Database};
pub mod schema;
#[macro_use]
pub mod macros;
#[macro_use]
extern crate diesel;
#[macro_use]
extern crate diesel_derives;
#[macro_use]
extern crate diesel_migrations;
pub type Error = diesel::result::Error;
pub mod prelude {
pub use super::UserDatabaseConnection;
pub use crate::*;
pub use diesel::SqliteConnection;
pub use diesel::{query_dsl::*, BelongingToDsl, ExpressionMethods, RunQueryDsl};
}
embed_migrations!("../flowy-sqlite/migrations/");
pub const DB_NAME: &str = "flowy-database.db";
pub fn init(storage_path: &str) -> Result<Database, io::Error> {
if !Path::new(storage_path).exists() {
std::fs::create_dir_all(storage_path)?;
}
let pool_config = PoolConfig::default();
let database = Database::new(storage_path, DB_NAME, pool_config).map_err(as_io_error)?;
let conn = database.get_connection().map_err(as_io_error)?;
embedded_migrations::run(&*conn).map_err(as_io_error)?;
Ok(database)
}
fn as_io_error<E>(e: E) -> io::Error
where
E: Into<crate::sqlite::Error> + Debug,
{
let msg = format!("{:?}", e);
io::Error::new(io::ErrorKind::NotConnected, msg)
}
pub trait UserDatabaseConnection: Send + Sync {
fn get_connection(&self) -> Result<DBConnection, String>;
}

View File

@ -0,0 +1,213 @@
#[rustfmt::skip]
/*
diesel master support on_conflict on sqlite but not 1.4.7 version. Workaround for this
match dsl::workspace_table
.filter(workspace_table::id.eq(table.id.clone()))
.count()
.get_result(conn)
.unwrap_or(0)
{
0 => diesel::insert_into(workspace_table::table).values(table)
.on_conflict(workspace_table::id)
.do_update()
.set(WorkspaceTableChangeset::from_table(workspace_table))
.execute(conn)?,
_ => {
let changeset = WorkspaceTableChangeset::from_table(table);
let filter = dsl::workspace_table.filter(workspace_table::id.eq(changeset.id.clone()));
diesel::update(filter).set(changeset).execute(conn)?;
},
}
is equivalent to:
match diesel_record_count!(workspace_table, &table.id, conn) {
0 => diesel_insert_table!(workspace_table, table, conn),
_ => diesel_update_table!(workspace_table, WorkspaceTableChangeset::from_table(table), &*conn),
}
*/
#[macro_export]
macro_rules! diesel_insert_table {
(
$table_name:ident,
$table:expr,
$connection:expr
) => {
{
let _ = diesel::insert_into($table_name::table)
.values($table)
// .on_conflict($table_name::dsl::id)
// .do_update()
// .set(WorkspaceTableChangeset::from_table(workspace_table))
.execute($connection)?;
}
};
}
#[macro_export]
macro_rules! diesel_record_count {
(
$table_name:ident,
$id:expr,
$connection:expr
) => {
$table_name::dsl::$table_name
.filter($table_name::dsl::id.eq($id.clone()))
.count()
.get_result($connection)
.unwrap_or(0);
};
}
#[macro_export]
macro_rules! diesel_revision_record_count {
(
$table_name:expr,
$filter:expr,
$connection:expr
) => {
$table_name
.filter($table_name::dsl::id.eq($id))
.count()
.get_result($connection)
.unwrap_or(0);
};
}
#[macro_export]
macro_rules! diesel_update_table {
(
$table_name:ident,
$changeset:expr,
$connection:expr
) => {{
let filter = $table_name::dsl::$table_name.filter($table_name::dsl::id.eq($changeset.id.clone()));
let affected_row = diesel::update(filter).set($changeset).execute($connection)?;
debug_assert_eq!(affected_row, 1);
}};
}
#[macro_export]
macro_rules! diesel_delete_table {
(
$table_name:ident,
$id:ident,
$connection:ident
) => {
let filter = $table_name::dsl::$table_name.filter($table_name::dsl::id.eq($id));
let affected_row = diesel::delete(filter).execute(&*$connection)?;
debug_assert_eq!(affected_row, 1);
};
}
#[macro_export]
macro_rules! impl_sql_binary_expression {
($target:ident) => {
impl diesel::serialize::ToSql<diesel::sql_types::Binary, diesel::sqlite::Sqlite> for $target {
fn to_sql<W: std::io::Write>(
&self,
out: &mut diesel::serialize::Output<W, diesel::sqlite::Sqlite>,
) -> diesel::serialize::Result {
let bytes: Vec<u8> = self.try_into().map_err(|e| format!("{:?}", e))?;
diesel::serialize::ToSql::<diesel::sql_types::Binary, diesel::sqlite::Sqlite>::to_sql(&bytes, out)
}
}
// https://docs.diesel.rs/src/diesel/sqlite/types/mod.rs.html#30-33
// impl FromSql<sql_types::Binary, Sqlite> for *const [u8] {
// fn from_sql(bytes: Option<&SqliteValue>) -> deserialize::Result<Self> {
// let bytes = not_none!(bytes).read_blob();
// Ok(bytes as *const _)
// }
// }
impl<DB> diesel::deserialize::FromSql<diesel::sql_types::Binary, DB> for $target
where
DB: diesel::backend::Backend,
*const [u8]: diesel::deserialize::FromSql<diesel::sql_types::Binary, DB>,
{
fn from_sql(bytes: Option<&DB::RawValue>) -> diesel::deserialize::Result<Self> {
let slice_ptr =
<*const [u8] as diesel::deserialize::FromSql<diesel::sql_types::Binary, DB>>::from_sql(bytes)?;
let bytes = unsafe { &*slice_ptr };
match $target::try_from(bytes) {
Ok(object) => Ok(object),
Err(e) => {
log::error!(
"{:?} deserialize from bytes fail. {:?}",
std::any::type_name::<$target>(),
e
);
panic!();
}
}
}
}
};
}
#[macro_export]
macro_rules! impl_sql_integer_expression {
($target:ident) => {
impl<DB> diesel::serialize::ToSql<Integer, DB> for $target
where
DB: diesel::backend::Backend,
i32: diesel::serialize::ToSql<Integer, DB>,
{
fn to_sql<W: std::io::Write>(
&self,
out: &mut diesel::serialize::Output<W, DB>,
) -> diesel::serialize::Result {
(*self as i32).to_sql(out)
}
}
impl<DB> diesel::deserialize::FromSql<Integer, DB> for $target
where
DB: diesel::backend::Backend,
i32: diesel::deserialize::FromSql<Integer, DB>,
{
fn from_sql(bytes: Option<&DB::RawValue>) -> diesel::deserialize::Result<Self> {
let smaill_int = i32::from_sql(bytes)?;
Ok($target::from(smaill_int))
}
}
};
}
#[macro_export]
macro_rules! impl_rev_state_map {
($target:ident) => {
impl std::convert::From<i32> for $target {
fn from(value: i32) -> Self {
match value {
0 => $target::Sync,
1 => $target::Ack,
o => {
tracing::error!("Unsupported rev state {}, fallback to RevState::Local", o);
$target::Sync
}
}
}
}
impl std::convert::From<$target> for RevisionState {
fn from(s: $target) -> Self {
match s {
$target::Sync => RevisionState::Sync,
$target::Ack => RevisionState::Ack,
}
}
}
impl std::convert::From<RevisionState> for $target {
fn from(s: RevisionState) -> Self {
match s {
RevisionState::Sync => $target::Sync,
RevisionState::Ack => $target::Ack,
}
}
}
};
}

View File

@ -0,0 +1,197 @@
// @generated automatically by Diesel CLI.
diesel::table! {
app_table (id) {
id -> Text,
workspace_id -> Text,
name -> Text,
desc -> Text,
color_style -> Binary,
last_view_id -> Nullable<Text>,
modified_time -> BigInt,
create_time -> BigInt,
version -> BigInt,
is_trash -> Bool,
}
}
diesel::table! {
document_rev_snapshot (snapshot_id) {
snapshot_id -> Text,
object_id -> Text,
rev_id -> BigInt,
base_rev_id -> BigInt,
timestamp -> BigInt,
data -> Binary,
}
}
diesel::table! {
document_rev_table (id) {
id -> Integer,
document_id -> Text,
base_rev_id -> BigInt,
rev_id -> BigInt,
data -> Binary,
state -> Integer,
}
}
diesel::table! {
folder_rev_snapshot (snapshot_id) {
snapshot_id -> Text,
object_id -> Text,
rev_id -> BigInt,
base_rev_id -> BigInt,
timestamp -> BigInt,
data -> Binary,
}
}
diesel::table! {
grid_block_index_table (row_id) {
row_id -> Text,
block_id -> Text,
}
}
diesel::table! {
grid_meta_rev_table (id) {
id -> Integer,
object_id -> Text,
base_rev_id -> BigInt,
rev_id -> BigInt,
data -> Binary,
state -> Integer,
}
}
diesel::table! {
grid_rev_snapshot (snapshot_id) {
snapshot_id -> Text,
object_id -> Text,
rev_id -> BigInt,
base_rev_id -> BigInt,
timestamp -> BigInt,
data -> Binary,
}
}
diesel::table! {
grid_rev_table (id) {
id -> Integer,
object_id -> Text,
base_rev_id -> BigInt,
rev_id -> BigInt,
data -> Binary,
state -> Integer,
}
}
diesel::table! {
grid_view_rev_table (id) {
id -> Integer,
object_id -> Text,
base_rev_id -> BigInt,
rev_id -> BigInt,
data -> Binary,
state -> Integer,
}
}
diesel::table! {
kv_table (key) {
key -> Text,
value -> Binary,
}
}
diesel::table! {
rev_snapshot (id) {
id -> Integer,
object_id -> Text,
rev_id -> BigInt,
data -> Binary,
}
}
diesel::table! {
rev_table (id) {
id -> Integer,
doc_id -> Text,
base_rev_id -> BigInt,
rev_id -> BigInt,
data -> Binary,
state -> Integer,
ty -> Integer,
}
}
diesel::table! {
trash_table (id) {
id -> Text,
name -> Text,
desc -> Text,
modified_time -> BigInt,
create_time -> BigInt,
ty -> Integer,
}
}
diesel::table! {
user_table (id) {
id -> Text,
name -> Text,
token -> Text,
email -> Text,
workspace -> Text,
icon_url -> Text,
}
}
diesel::table! {
view_table (id) {
id -> Text,
belong_to_id -> Text,
name -> Text,
desc -> Text,
modified_time -> BigInt,
create_time -> BigInt,
thumbnail -> Text,
view_type -> Integer,
version -> BigInt,
is_trash -> Bool,
ext_data -> Text,
}
}
diesel::table! {
workspace_table (id) {
id -> Text,
name -> Text,
desc -> Text,
modified_time -> BigInt,
create_time -> BigInt,
user_id -> Text,
version -> BigInt,
}
}
diesel::allow_tables_to_appear_in_same_query!(
app_table,
document_rev_snapshot,
document_rev_table,
folder_rev_snapshot,
grid_block_index_table,
grid_meta_rev_table,
grid_rev_snapshot,
grid_rev_table,
grid_view_rev_table,
kv_table,
rev_snapshot,
rev_table,
trash_table,
user_table,
view_table,
workspace_table,
);

View File

@ -0,0 +1,23 @@
use crate::sqlite::errors::*;
use diesel::{dsl::sql, expression::SqlLiteral, query_dsl::LoadQuery, Connection, RunQueryDsl, SqliteConnection};
pub trait ConnectionExtension: Connection {
fn query<ST, T>(&self, query: &str) -> Result<T>
where
SqlLiteral<ST>: LoadQuery<SqliteConnection, T>;
fn exec(&self, query: impl AsRef<str>) -> Result<usize>;
}
impl ConnectionExtension for SqliteConnection {
fn query<ST, T>(&self, query: &str) -> Result<T>
where
SqlLiteral<ST>: LoadQuery<SqliteConnection, T>,
{
Ok(sql::<ST>(query).get_result(self)?)
}
fn exec(&self, query: impl AsRef<str>) -> Result<usize> {
Ok(SqliteConnection::execute(self, query.as_ref())?)
}
}

View File

@ -0,0 +1,53 @@
use crate::sqlite::{
errors::*,
pool::{ConnectionManager, ConnectionPool, PoolConfig},
};
use r2d2::PooledConnection;
use std::sync::Arc;
pub struct Database {
uri: String,
pool: Arc<ConnectionPool>,
}
pub type DBConnection = PooledConnection<ConnectionManager>;
impl Database {
pub fn new(dir: &str, name: &str, pool_config: PoolConfig) -> Result<Self> {
let uri = db_file_uri(dir, name);
if !std::path::PathBuf::from(dir).exists() {
tracing::error!("Create database failed. {} not exists", &dir);
}
let pool = ConnectionPool::new(pool_config, &uri)?;
Ok(Self {
uri,
pool: Arc::new(pool),
})
}
pub fn get_uri(&self) -> &str {
&self.uri
}
pub fn get_connection(&self) -> Result<DBConnection> {
let conn = self.pool.get()?;
Ok(conn)
}
pub fn get_pool(&self) -> Arc<ConnectionPool> {
self.pool.clone()
}
}
pub fn db_file_uri(dir: &str, name: &str) -> String {
use std::path::MAIN_SEPARATOR;
let mut uri = dir.to_owned();
if !uri.ends_with(MAIN_SEPARATOR) {
uri.push(MAIN_SEPARATOR);
}
uri.push_str(name);
uri
}

View File

@ -0,0 +1,18 @@
use error_chain::{
error_chain, error_chain_processing, impl_error_chain_kind, impl_error_chain_processed, impl_extract_backtrace,
};
error_chain! {
errors {
UnknownMigrationExists(v: String) {
display("unknown migration version: '{}'", v),
}
}
foreign_links {
R2D2(::r2d2::Error);
Migrations(::diesel_migrations::RunMigrationsError);
Diesel(::diesel::result::Error);
Connection(::diesel::ConnectionError);
Io(::std::io::Error);
}
}

View File

@ -0,0 +1,11 @@
mod conn_ext;
mod database;
#[allow(deprecated, clippy::large_enum_variant)]
mod errors;
mod pool;
mod pragma;
pub use database::*;
pub use pool::*;
pub use errors::{Error, ErrorKind, Result};

View File

@ -0,0 +1,154 @@
use crate::sqlite::{errors::*, pragma::*};
use diesel::{connection::Connection, SqliteConnection};
use r2d2::{CustomizeConnection, ManageConnection, Pool};
use scheduled_thread_pool::ScheduledThreadPool;
use std::{sync::Arc, time::Duration};
lazy_static::lazy_static! {
static ref DB_POOL: Arc<ScheduledThreadPool> = Arc::new(
ScheduledThreadPool::with_name("db-pool-{}:", 4)
);
}
pub struct ConnectionPool {
pub(crate) inner: Pool<ConnectionManager>,
}
impl std::ops::Deref for ConnectionPool {
type Target = Pool<ConnectionManager>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl ConnectionPool {
pub fn new<T>(config: PoolConfig, uri: T) -> Result<Self>
where
T: Into<String>,
{
let manager = ConnectionManager::new(uri);
let thread_pool = DB_POOL.clone();
let config = Arc::new(config);
let customizer_config = DatabaseCustomizerConfig::default();
let pool = r2d2::Pool::builder()
.thread_pool(thread_pool)
.min_idle(Some(config.min_idle))
.connection_customizer(Box::new(DatabaseCustomizer::new(customizer_config)))
.max_size(config.max_size)
.max_lifetime(None)
.connection_timeout(config.connection_timeout)
.idle_timeout(Some(config.idle_timeout))
.build_unchecked(manager);
Ok(ConnectionPool { inner: pool })
}
}
#[allow(dead_code)]
pub type OnExecFunc = Box<dyn Fn() -> Box<dyn Fn(&SqliteConnection, &str)> + Send + Sync>;
pub struct PoolConfig {
min_idle: u32,
max_size: u32,
connection_timeout: Duration,
idle_timeout: Duration,
}
impl Default for PoolConfig {
fn default() -> Self {
Self {
min_idle: 1,
max_size: 10,
connection_timeout: Duration::from_secs(10),
idle_timeout: Duration::from_secs(5 * 60),
}
}
}
impl PoolConfig {
#[allow(dead_code)]
pub fn min_idle(mut self, min_idle: u32) -> Self {
self.min_idle = min_idle;
self
}
#[allow(dead_code)]
pub fn max_size(mut self, max_size: u32) -> Self {
self.max_size = max_size;
self
}
}
pub struct ConnectionManager {
db_uri: String,
}
impl ManageConnection for ConnectionManager {
type Connection = SqliteConnection;
type Error = crate::sqlite::Error;
fn connect(&self) -> Result<Self::Connection> {
Ok(SqliteConnection::establish(&self.db_uri)?)
}
fn is_valid(&self, conn: &mut Self::Connection) -> Result<()> {
Ok(conn.execute("SELECT 1").map(|_| ())?)
}
fn has_broken(&self, _conn: &mut Self::Connection) -> bool {
false
}
}
impl ConnectionManager {
pub fn new<S: Into<String>>(uri: S) -> Self {
ConnectionManager { db_uri: uri.into() }
}
}
#[derive(Debug)]
pub struct DatabaseCustomizerConfig {
pub(crate) journal_mode: SQLiteJournalMode,
pub(crate) synchronous: SQLiteSynchronous,
pub(crate) busy_timeout: i32,
#[allow(dead_code)]
pub(crate) secure_delete: bool,
}
impl Default for DatabaseCustomizerConfig {
fn default() -> Self {
Self {
journal_mode: SQLiteJournalMode::WAL,
synchronous: SQLiteSynchronous::NORMAL,
busy_timeout: 5000,
secure_delete: true,
}
}
}
#[derive(Debug)]
struct DatabaseCustomizer {
config: DatabaseCustomizerConfig,
}
impl DatabaseCustomizer {
fn new(config: DatabaseCustomizerConfig) -> Self
where
Self: Sized,
{
Self { config }
}
}
impl CustomizeConnection<SqliteConnection, crate::sqlite::Error> for DatabaseCustomizer {
fn on_acquire(&self, conn: &mut SqliteConnection) -> Result<()> {
conn.pragma_set_busy_timeout(self.config.busy_timeout)?;
if self.config.journal_mode != SQLiteJournalMode::WAL {
conn.pragma_set_journal_mode(self.config.journal_mode, None)?;
}
conn.pragma_set_synchronous(self.config.synchronous, None)?;
Ok(())
}
}

View File

@ -0,0 +1,170 @@
#![allow(clippy::upper_case_acronyms)]
use crate::sqlite::errors::{Error, Result};
use diesel::{
expression::SqlLiteral,
query_dsl::load_dsl::LoadQuery,
sql_types::{Integer, Text},
SqliteConnection,
};
use crate::sqlite::conn_ext::ConnectionExtension;
use std::{
convert::{TryFrom, TryInto},
fmt,
str::FromStr,
};
pub trait PragmaExtension: ConnectionExtension {
fn pragma<D: std::fmt::Display>(&self, key: &str, val: D, schema: Option<&str>) -> Result<()> {
let query = match schema {
Some(schema) => format!("PRAGMA {}.{} = '{}'", schema, key, val),
None => format!("PRAGMA {} = '{}'", key, val),
};
tracing::trace!("SQLITE {}", query);
self.exec(&query)?;
Ok(())
}
fn pragma_ret<ST, T, D: std::fmt::Display>(&self, key: &str, val: D, schema: Option<&str>) -> Result<T>
where
SqlLiteral<ST>: LoadQuery<SqliteConnection, T>,
{
let query = match schema {
Some(schema) => format!("PRAGMA {}.{} = '{}'", schema, key, val),
None => format!("PRAGMA {} = '{}'", key, val),
};
tracing::trace!("SQLITE {}", query);
self.query::<ST, T>(&query)
}
fn pragma_get<ST, T>(&self, key: &str, schema: Option<&str>) -> Result<T>
where
SqlLiteral<ST>: LoadQuery<SqliteConnection, T>,
{
let query = match schema {
Some(schema) => format!("PRAGMA {}.{}", schema, key),
None => format!("PRAGMA {}", key),
};
tracing::trace!("SQLITE {}", query);
self.query::<ST, T>(&query)
}
fn pragma_set_busy_timeout(&self, timeout_ms: i32) -> Result<i32> {
self.pragma_ret::<Integer, i32, i32>("busy_timeout", timeout_ms, None)
}
fn pragma_get_busy_timeout(&self) -> Result<i32> {
self.pragma_get::<Integer, i32>("busy_timeout", None)
}
fn pragma_set_journal_mode(&self, mode: SQLiteJournalMode, schema: Option<&str>) -> Result<i32> {
self.pragma_ret::<Integer, i32, SQLiteJournalMode>("journal_mode", mode, schema)
}
fn pragma_get_journal_mode(&self, schema: Option<&str>) -> Result<SQLiteJournalMode> {
self.pragma_get::<Text, String>("journal_mode", schema)?.parse()
}
fn pragma_set_synchronous(&self, synchronous: SQLiteSynchronous, schema: Option<&str>) -> Result<()> {
self.pragma("synchronous", synchronous as u8, schema)
}
fn pragma_get_synchronous(&self, schema: Option<&str>) -> Result<SQLiteSynchronous> {
self.pragma_get::<Integer, i32>("synchronous", schema)?.try_into()
}
}
impl PragmaExtension for SqliteConnection {}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum SQLiteJournalMode {
DELETE,
TRUNCATE,
PERSIST,
MEMORY,
WAL,
OFF,
}
impl fmt::Display for SQLiteJournalMode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}",
match self {
Self::DELETE => "DELETE",
Self::TRUNCATE => "TRUNCATE",
Self::PERSIST => "PERSIST",
Self::MEMORY => "MEMORY",
Self::WAL => "WAL",
Self::OFF => "OFF",
}
)
}
}
impl FromStr for SQLiteJournalMode {
type Err = Error;
fn from_str(s: &str) -> Result<Self> {
match s.to_uppercase().as_ref() {
"DELETE" => Ok(Self::DELETE),
"TRUNCATE" => Ok(Self::TRUNCATE),
"PERSIST" => Ok(Self::PERSIST),
"MEMORY" => Ok(Self::MEMORY),
"WAL" => Ok(Self::WAL),
"OFF" => Ok(Self::OFF),
_ => Err(format!("Unknown value {} for JournalMode", s).into()),
}
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum SQLiteSynchronous {
EXTRA = 3,
FULL = 2,
NORMAL = 1,
OFF = 0,
}
impl fmt::Display for SQLiteSynchronous {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}",
match self {
Self::OFF => "OFF",
Self::NORMAL => "NORMAL",
Self::FULL => "FULL",
Self::EXTRA => "EXTRA",
}
)
}
}
impl TryFrom<i32> for SQLiteSynchronous {
type Error = Error;
fn try_from(v: i32) -> Result<Self> {
match v {
0 => Ok(Self::OFF),
1 => Ok(Self::NORMAL),
2 => Ok(Self::FULL),
3 => Ok(Self::EXTRA),
_ => Err(format!("Unknown value {} for Synchronous", v).into()),
}
}
}
impl FromStr for SQLiteSynchronous {
type Err = Error;
fn from_str(s: &str) -> Result<Self> {
match s.to_uppercase().as_ref() {
"0" | "OFF" => Ok(Self::OFF),
"1" | "NORMAL" => Ok(Self::NORMAL),
"2" | "FULL" => Ok(Self::FULL),
"3" | "EXTRA" => Ok(Self::EXTRA),
_ => Err(format!("Unknown value {} for Synchronous", s).into()),
}
}
}