server: poll for new messages and update clients via WS
This commit is contained in:
parent
0662e6230e
commit
6f93aa4f34
1
Cargo.lock
generated
1
Cargo.lock
generated
@ -3090,6 +3090,7 @@ dependencies = [
|
|||||||
"build-info",
|
"build-info",
|
||||||
"letterbox-notmuch",
|
"letterbox-notmuch",
|
||||||
"serde",
|
"serde",
|
||||||
|
"strum_macros 0.27.1",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
|||||||
@ -470,7 +470,7 @@ pub enum NotmuchError {
|
|||||||
MailParseError(#[from] mailparse::MailParseError),
|
MailParseError(#[from] mailparse::MailParseError),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Clone, Default)]
|
||||||
pub struct Notmuch {
|
pub struct Notmuch {
|
||||||
config_path: Option<PathBuf>,
|
config_path: Option<PathBuf>,
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
// Rocket generates a lot of warnings for handlers
|
// Rocket generates a lot of warnings for handlers
|
||||||
// TODO: figure out why
|
// TODO: figure out why
|
||||||
#![allow(unreachable_patterns)]
|
#![allow(unreachable_patterns)]
|
||||||
use std::{error::Error, io::Cursor, net::SocketAddr, str::FromStr, sync::Arc};
|
use std::{error::Error, io::Cursor, net::SocketAddr, str::FromStr, sync::Arc, time::Duration};
|
||||||
|
|
||||||
use async_graphql::{extensions, http::GraphiQLSource, Schema};
|
use async_graphql::{extensions, http::GraphiQLSource, Schema};
|
||||||
use async_graphql_axum::{GraphQL, GraphQLSubscription};
|
use async_graphql_axum::{GraphQL, GraphQLSubscription};
|
||||||
@ -21,7 +21,9 @@ use letterbox_server::tantivy::TantivyConnection;
|
|||||||
use letterbox_server::{
|
use letterbox_server::{
|
||||||
config::Config,
|
config::Config,
|
||||||
error::ServerError,
|
error::ServerError,
|
||||||
graphql::{Attachment, GraphqlSchema, MutationRoot, QueryRoot, SubscriptionRoot},
|
graphql::{
|
||||||
|
compute_catchup_ids, Attachment, GraphqlSchema, MutationRoot, QueryRoot, SubscriptionRoot,
|
||||||
|
},
|
||||||
nm::{attachment_bytes, cid_attachment_bytes},
|
nm::{attachment_bytes, cid_attachment_bytes},
|
||||||
ws::ConnectionTracker,
|
ws::ConnectionTracker,
|
||||||
};
|
};
|
||||||
@ -294,19 +296,44 @@ async fn main() -> Result<(), Box<dyn Error>> {
|
|||||||
std::fs::create_dir_all(&config.slurp_cache_path)?;
|
std::fs::create_dir_all(&config.slurp_cache_path)?;
|
||||||
}
|
}
|
||||||
let pool = PgPool::connect(&config.newsreader_database_url).await?;
|
let pool = PgPool::connect(&config.newsreader_database_url).await?;
|
||||||
|
let nm = Notmuch::default();
|
||||||
sqlx::migrate!("./migrations").run(&pool).await?;
|
sqlx::migrate!("./migrations").run(&pool).await?;
|
||||||
#[cfg(feature = "tantivy")]
|
#[cfg(feature = "tantivy")]
|
||||||
let tantivy_conn = TantivyConnection::new(&config.newsreader_tantivy_db_path)?;
|
let tantivy_conn = TantivyConnection::new(&config.newsreader_tantivy_db_path)?;
|
||||||
|
|
||||||
let cacher = FilesystemCacher::new(&config.slurp_cache_path)?;
|
let cacher = FilesystemCacher::new(&config.slurp_cache_path)?;
|
||||||
let schema = Schema::build(QueryRoot, MutationRoot, SubscriptionRoot)
|
let schema = Schema::build(QueryRoot, MutationRoot, SubscriptionRoot)
|
||||||
.data(Notmuch::default())
|
.data(nm.clone())
|
||||||
.data(cacher)
|
.data(cacher)
|
||||||
.data(pool.clone());
|
.data(pool.clone());
|
||||||
|
|
||||||
let schema = schema.extension(extensions::Logger).finish();
|
let schema = schema.extension(extensions::Logger).finish();
|
||||||
|
|
||||||
let conn_tracker = Arc::new(Mutex::new(ConnectionTracker::default()));
|
let conn_tracker = Arc::new(Mutex::new(ConnectionTracker::default()));
|
||||||
|
async fn watch_new(
|
||||||
|
nm: Notmuch,
|
||||||
|
pool: PgPool,
|
||||||
|
conn_tracker: Arc<Mutex<ConnectionTracker>>,
|
||||||
|
poll_time: Duration,
|
||||||
|
) -> Result<(), async_graphql::Error> {
|
||||||
|
let mut old_ids = Vec::new();
|
||||||
|
loop {
|
||||||
|
let ids = compute_catchup_ids(&nm, &pool, "is:unread").await?;
|
||||||
|
if old_ids != ids {
|
||||||
|
info!("old_ids: {old_ids:?}\n ids: {ids:?}");
|
||||||
|
conn_tracker
|
||||||
|
.lock()
|
||||||
|
.await
|
||||||
|
.send_message_all(WebsocketMessage::RefreshMessages)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
old_ids = ids;
|
||||||
|
tokio::time::sleep(poll_time).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let ct = Arc::clone(&conn_tracker);
|
||||||
|
let poll_time = Duration::from_secs(10);
|
||||||
|
let _h = tokio::spawn(watch_new(nm, pool, ct, poll_time));
|
||||||
|
|
||||||
let app = Router::new()
|
let app = Router::new()
|
||||||
.route("/test", get(test_handler))
|
.route("/test", get(test_handler))
|
||||||
|
|||||||
@ -319,37 +319,7 @@ impl QueryRoot {
|
|||||||
) -> Result<Vec<String>, Error> {
|
) -> Result<Vec<String>, Error> {
|
||||||
let nm = ctx.data_unchecked::<Notmuch>();
|
let nm = ctx.data_unchecked::<Notmuch>();
|
||||||
let pool = ctx.data_unchecked::<PgPool>();
|
let pool = ctx.data_unchecked::<PgPool>();
|
||||||
let query: Query = query.parse()?;
|
compute_catchup_ids(nm, pool, &query).await
|
||||||
// TODO: implement optimized versions of fetching just IDs
|
|
||||||
let newsreader_fut = newsreader_search(pool, None, None, None, None, &query);
|
|
||||||
let notmuch_fut = notmuch_search(nm, None, None, None, None, &query);
|
|
||||||
let (newsreader_results, notmuch_results) = join!(newsreader_fut, notmuch_fut);
|
|
||||||
|
|
||||||
let newsreader_results = newsreader_results?;
|
|
||||||
let notmuch_results = notmuch_results?;
|
|
||||||
info!(
|
|
||||||
"newsreader_results ({}) notmuch_results ({})",
|
|
||||||
newsreader_results.len(),
|
|
||||||
notmuch_results.len(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut results: Vec<_> = newsreader_results
|
|
||||||
.into_iter()
|
|
||||||
.chain(notmuch_results)
|
|
||||||
.collect();
|
|
||||||
// The leading '-' is to reverse sort
|
|
||||||
results.sort_by_key(|item| match item {
|
|
||||||
ThreadSummaryCursor::Newsreader(_, ts) => -ts.timestamp,
|
|
||||||
ThreadSummaryCursor::Notmuch(_, ts) => -ts.timestamp,
|
|
||||||
});
|
|
||||||
let ids = results
|
|
||||||
.into_iter()
|
|
||||||
.map(|r| match r {
|
|
||||||
ThreadSummaryCursor::Newsreader(_, ts) => ts.thread,
|
|
||||||
ThreadSummaryCursor::Notmuch(_, ts) => ts.thread,
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
Ok(ids)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: this function doesn't get parallelism, possibly because notmuch is sync and blocks,
|
// TODO: this function doesn't get parallelism, possibly because notmuch is sync and blocks,
|
||||||
@ -649,7 +619,6 @@ impl MutationRoot {
|
|||||||
|
|
||||||
tantivy.drop_and_load_index()?;
|
tantivy.drop_and_load_index()?;
|
||||||
tantivy.reindex_all(pool).await?;
|
tantivy.reindex_all(pool).await?;
|
||||||
println("hit");
|
|
||||||
|
|
||||||
Ok(true)
|
Ok(true)
|
||||||
}
|
}
|
||||||
@ -679,3 +648,42 @@ impl SubscriptionRoot {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub type GraphqlSchema = Schema<QueryRoot, MutationRoot, SubscriptionRoot>;
|
pub type GraphqlSchema = Schema<QueryRoot, MutationRoot, SubscriptionRoot>;
|
||||||
|
|
||||||
|
#[instrument(skip_all, fields(query=query))]
|
||||||
|
pub async fn compute_catchup_ids(
|
||||||
|
nm: &Notmuch,
|
||||||
|
pool: &PgPool,
|
||||||
|
query: &str,
|
||||||
|
) -> Result<Vec<String>, Error> {
|
||||||
|
let query: Query = query.parse()?;
|
||||||
|
// TODO: implement optimized versions of fetching just IDs
|
||||||
|
let newsreader_fut = newsreader_search(pool, None, None, None, None, &query);
|
||||||
|
let notmuch_fut = notmuch_search(nm, None, None, None, None, &query);
|
||||||
|
let (newsreader_results, notmuch_results) = join!(newsreader_fut, notmuch_fut);
|
||||||
|
|
||||||
|
let newsreader_results = newsreader_results?;
|
||||||
|
let notmuch_results = notmuch_results?;
|
||||||
|
info!(
|
||||||
|
"newsreader_results ({}) notmuch_results ({})",
|
||||||
|
newsreader_results.len(),
|
||||||
|
notmuch_results.len(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut results: Vec<_> = newsreader_results
|
||||||
|
.into_iter()
|
||||||
|
.chain(notmuch_results)
|
||||||
|
.collect();
|
||||||
|
// The leading '-' is to reverse sort
|
||||||
|
results.sort_by_key(|item| match item {
|
||||||
|
ThreadSummaryCursor::Newsreader(_, ts) => -ts.timestamp,
|
||||||
|
ThreadSummaryCursor::Notmuch(_, ts) => -ts.timestamp,
|
||||||
|
});
|
||||||
|
let ids = results
|
||||||
|
.into_iter()
|
||||||
|
.map(|r| match r {
|
||||||
|
ThreadSummaryCursor::Newsreader(_, ts) => ts.thread,
|
||||||
|
ThreadSummaryCursor::Notmuch(_, ts) => ts.thread,
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
Ok(ids)
|
||||||
|
}
|
||||||
|
|||||||
@ -445,19 +445,16 @@ pub fn sanitize_html(
|
|||||||
let mut element_content_handlers = vec![
|
let mut element_content_handlers = vec![
|
||||||
// Remove width and height attributes on elements
|
// Remove width and height attributes on elements
|
||||||
element!("[width],[height]", |el| {
|
element!("[width],[height]", |el| {
|
||||||
println!("width or height {el:?}");
|
|
||||||
el.remove_attribute("width");
|
el.remove_attribute("width");
|
||||||
el.remove_attribute("height");
|
el.remove_attribute("height");
|
||||||
Ok(())
|
Ok(())
|
||||||
}),
|
}),
|
||||||
// Remove width and height values from inline styles
|
// Remove width and height values from inline styles
|
||||||
element!("[style]", |el| {
|
element!("[style]", |el| {
|
||||||
println!("style {el:?}");
|
|
||||||
let style = el.get_attribute("style").unwrap();
|
let style = el.get_attribute("style").unwrap();
|
||||||
let style = style
|
let style = style
|
||||||
.split(";")
|
.split(";")
|
||||||
.filter(|s| {
|
.filter(|s| {
|
||||||
println!("s {s}");
|
|
||||||
let Some((k, _)) = s.split_once(':') else {
|
let Some((k, _)) = s.split_once(':') else {
|
||||||
return true;
|
return true;
|
||||||
};
|
};
|
||||||
@ -469,7 +466,6 @@ pub fn sanitize_html(
|
|||||||
})
|
})
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.join(";");
|
.join(";");
|
||||||
println!("style: {style}");
|
|
||||||
if let Err(e) = el.set_attribute("style", &style) {
|
if let Err(e) = el.set_attribute("style", &style) {
|
||||||
error!("Failed to set style attribute: {e}");
|
error!("Failed to set style attribute: {e}");
|
||||||
}
|
}
|
||||||
|
|||||||
@ -61,8 +61,6 @@ pub async fn read_mail_to_db(pool: &PgPool, path: &str) -> Result<(), MailError>
|
|||||||
.ok_or(MailError::MissingDate)?,
|
.ok_or(MailError::MissingDate)?,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
println!("Feed: {feed_id} Subject: {}", subject);
|
|
||||||
|
|
||||||
if let Some(_m) = first_html(&m) {
|
if let Some(_m) = first_html(&m) {
|
||||||
info!("add email {slug} {subject} {message_id} {date} {uid} {url}");
|
info!("add email {slug} {subject} {message_id} {date} {uid} {url}");
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@ -349,9 +349,7 @@ fn email_addresses(
|
|||||||
for ma in mal.into_inner() {
|
for ma in mal.into_inner() {
|
||||||
match ma {
|
match ma {
|
||||||
mailparse::MailAddr::Group(gi) => {
|
mailparse::MailAddr::Group(gi) => {
|
||||||
if !gi.group_name.contains("ndisclosed") {
|
if !gi.group_name.contains("ndisclosed") {}
|
||||||
println!("[{path}][{header_name}] Group: {gi}");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
mailparse::MailAddr::Single(s) => addrs.push(Email {
|
mailparse::MailAddr::Single(s) => addrs.push(Email {
|
||||||
name: s.display_name,
|
name: s.display_name,
|
||||||
|
|||||||
@ -13,8 +13,10 @@ impl ConnectionTracker {
|
|||||||
pub fn add_peer(&mut self, socket: WebSocket, who: SocketAddr) {
|
pub fn add_peer(&mut self, socket: WebSocket, who: SocketAddr) {
|
||||||
warn!("adding {who:?} to connection tracker");
|
warn!("adding {who:?} to connection tracker");
|
||||||
self.peers.insert(who, socket);
|
self.peers.insert(who, socket);
|
||||||
|
self.send_message_all(WebsocketMessage::RefreshMessages);
|
||||||
}
|
}
|
||||||
pub async fn send_message_all(&mut self, msg: WebsocketMessage) {
|
pub async fn send_message_all(&mut self, msg: WebsocketMessage) {
|
||||||
|
info!("send_message_all {msg}");
|
||||||
let m = serde_json::to_string(&msg).expect("failed to json encode WebsocketMessage");
|
let m = serde_json::to_string(&msg).expect("failed to json encode WebsocketMessage");
|
||||||
let mut bad_peers = Vec::new();
|
let mut bad_peers = Vec::new();
|
||||||
for (who, socket) in &mut self.peers.iter_mut() {
|
for (who, socket) in &mut self.peers.iter_mut() {
|
||||||
|
|||||||
@ -14,3 +14,4 @@ version.workspace = true
|
|||||||
build-info = "0.0.40"
|
build-info = "0.0.40"
|
||||||
letterbox-notmuch = { version = "0.12.1", path = "../notmuch", registry = "xinu" }
|
letterbox-notmuch = { version = "0.12.1", path = "../notmuch", registry = "xinu" }
|
||||||
serde = { version = "1.0.147", features = ["derive"] }
|
serde = { version = "1.0.147", features = ["derive"] }
|
||||||
|
strum_macros = "0.27.1"
|
||||||
|
|||||||
@ -13,7 +13,7 @@ pub struct SearchResult {
|
|||||||
pub total: usize,
|
pub total: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
#[derive(Serialize, Deserialize, Debug, strum_macros::Display)]
|
||||||
pub enum WebsocketMessage {
|
pub enum WebsocketMessage {
|
||||||
RefreshMessages,
|
RefreshMessages,
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user