1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
|
use async_trait::async_trait;
use hbb_common::{log, ResultType};
use sqlx::{
sqlite::SqliteConnectOptions, ConnectOptions, Connection, Error as SqlxError, SqliteConnection,
};
use std::{ops::DerefMut, str::FromStr};
//use sqlx::postgres::PgPoolOptions;
//use sqlx::mysql::MySqlPoolOptions;
type Pool = deadpool::managed::Pool<DbPool>;
pub struct DbPool {
url: String,
}
#[async_trait]
impl deadpool::managed::Manager for DbPool {
type Type = SqliteConnection;
type Error = SqlxError;
async fn create(&self) -> Result<SqliteConnection, SqlxError> {
let mut opt = SqliteConnectOptions::from_str(&self.url).unwrap();
opt.log_statements(log::LevelFilter::Debug);
SqliteConnection::connect_with(&opt).await
}
async fn recycle(
&self,
obj: &mut SqliteConnection,
) -> deadpool::managed::RecycleResult<SqlxError> {
Ok(obj.ping().await?)
}
}
#[derive(Clone)]
pub struct Database {
pool: Pool,
}
#[derive(Default)]
pub struct Peer {
pub guid: Vec<u8>,
pub id: String,
pub uuid: Vec<u8>,
pub pk: Vec<u8>,
pub user: Option<Vec<u8>>,
pub info: String,
pub status: Option<i64>,
}
impl Database {
pub async fn new(url: &str) -> ResultType<Database> {
if !std::path::Path::new(url).exists() {
std::fs::File::create(url).ok();
}
let n: usize = std::env::var("MAX_DATABASE_CONNECTIONS")
.unwrap_or_else(|_| "1".to_owned())
.parse()
.unwrap_or(1);
log::debug!("MAX_DATABASE_CONNECTIONS={}", n);
let pool = Pool::new(
DbPool {
url: url.to_owned(),
},
n,
);
let _ = pool.get().await?; // test
let db = Database { pool };
db.create_tables().await?;
Ok(db)
}
async fn create_tables(&self) -> ResultType<()> {
sqlx::query!(
"
create table if not exists peer (
guid blob primary key not null,
id varchar(100) not null,
uuid blob not null,
pk blob not null,
created_at datetime not null default(current_timestamp),
user blob,
status tinyint,
note varchar(300),
info text not null
) without rowid;
create unique index if not exists index_peer_id on peer (id);
create index if not exists index_peer_user on peer (user);
create index if not exists index_peer_created_at on peer (created_at);
create index if not exists index_peer_status on peer (status);
"
)
.execute(self.pool.get().await?.deref_mut())
.await?;
Ok(())
}
pub async fn get_peer(&self, id: &str) -> ResultType<Option<Peer>> {
Ok(sqlx::query_as!(
Peer,
"select guid, id, uuid, pk, user, status, info from peer where id = ?",
id
)
.fetch_optional(self.pool.get().await?.deref_mut())
.await?)
}
pub async fn insert_peer(
&self,
id: &str,
uuid: &[u8],
pk: &[u8],
info: &str,
) -> ResultType<Vec<u8>> {
let guid = uuid::Uuid::new_v4().as_bytes().to_vec();
sqlx::query!(
"insert into peer(guid, id, uuid, pk, info) values(?, ?, ?, ?, ?)",
guid,
id,
uuid,
pk,
info
)
.execute(self.pool.get().await?.deref_mut())
.await?;
Ok(guid)
}
pub async fn update_pk(
&self,
guid: &Vec<u8>,
id: &str,
pk: &[u8],
info: &str,
) -> ResultType<()> {
sqlx::query!(
"update peer set id=?, pk=?, info=? where guid=?",
id,
pk,
info,
guid
)
.execute(self.pool.get().await?.deref_mut())
.await?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use hbb_common::tokio;
#[test]
fn test_insert() {
insert();
}
#[tokio::main(flavor = "multi_thread")]
async fn insert() {
let db = super::Database::new("test.sqlite3").await.unwrap();
let mut jobs = vec![];
for i in 0..10000 {
let cloned = db.clone();
let id = i.to_string();
let a = tokio::spawn(async move {
let empty_vec = Vec::new();
cloned
.insert_peer(&id, &empty_vec, &empty_vec, "")
.await
.unwrap();
});
jobs.push(a);
}
for i in 0..10000 {
let cloned = db.clone();
let id = i.to_string();
let a = tokio::spawn(async move {
cloned.get_peer(&id).await.unwrap();
});
jobs.push(a);
}
hbb_common::futures::future::join_all(jobs).await;
}
}
|