server: make unread message counting much faster, remove rayon dep

This commit is contained in:
Bill Thiede 2023-12-02 15:41:22 -08:00
parent 7a32d5c630
commit b14000952c
3 changed files with 26 additions and 8 deletions

1
Cargo.lock generated
View File

@ -3113,7 +3113,6 @@ dependencies = [
"mailparse", "mailparse",
"memmap", "memmap",
"notmuch", "notmuch",
"rayon",
"rocket 0.5.0", "rocket 0.5.0",
"rocket_contrib", "rocket_contrib",
"rocket_cors", "rocket_cors",

View File

@ -20,7 +20,6 @@ urlencoding = "2.1.3"
async-graphql = { version = "6.0.11", features = ["log"] } async-graphql = { version = "6.0.11", features = ["log"] }
async-graphql-rocket = "6.0.11" async-graphql-rocket = "6.0.11"
rocket_cors = "0.6.0" rocket_cors = "0.6.0"
rayon = "1.8.0"
memmap = "0.7.0" memmap = "0.7.0"
mailparse = "0.14.0" mailparse = "0.14.0"
ammonia = "3.3.0" ammonia = "3.3.0"

View File

@ -1,4 +1,5 @@
use std::{ use std::{
collections::HashMap,
fs::File, fs::File,
hash::{DefaultHasher, Hash, Hasher}, hash::{DefaultHasher, Hash, Hasher},
}; };
@ -12,7 +13,7 @@ use log::{error, info, warn};
use mailparse::{parse_mail, MailHeaderMap, ParsedMail}; use mailparse::{parse_mail, MailHeaderMap, ParsedMail};
use memmap::MmapOptions; use memmap::MmapOptions;
use notmuch::Notmuch; use notmuch::Notmuch;
use rayon::prelude::*; use rocket::time::Instant;
pub struct QueryRoot; pub struct QueryRoot;
@ -214,15 +215,32 @@ impl QueryRoot {
async fn tags<'ctx>(&self, ctx: &Context<'ctx>) -> FieldResult<Vec<Tag>> { async fn tags<'ctx>(&self, ctx: &Context<'ctx>) -> FieldResult<Vec<Tag>> {
let nm = ctx.data_unchecked::<Notmuch>(); let nm = ctx.data_unchecked::<Notmuch>();
Ok(nm let now = Instant::now();
let needs_unread = ctx.look_ahead().field("unread").exists();
let unread_msg_cnt: HashMap<String, usize> = if needs_unread {
// 10000 is an arbitrary number, if there's more than 10k unread messages, we'll
// get an inaccurate count.
nm.search("is:unread", 0, 10000)?
.0
.iter()
.fold(HashMap::new(), |mut m, ts| {
ts.tags.iter().for_each(|t| {
m.entry(t.clone()).and_modify(|c| *c += 1).or_insert(1);
});
m
})
} else {
HashMap::new()
};
let tags = nm
.tags()? .tags()?
.into_par_iter() .into_iter()
.map(|tag| { .map(|tag| {
let mut hasher = DefaultHasher::new(); let mut hasher = DefaultHasher::new();
tag.hash(&mut hasher); tag.hash(&mut hasher);
let hex = format!("#{:06x}", hasher.finish() % (1 << 24)); let hex = format!("#{:06x}", hasher.finish() % (1 << 24));
let unread = if ctx.look_ahead().field("unread").exists() { let unread = if needs_unread {
nm.count(&format!("tag:{tag} is:unread")).unwrap_or(0) *unread_msg_cnt.get(&tag).unwrap_or(&0)
} else { } else {
0 0
}; };
@ -233,7 +251,9 @@ impl QueryRoot {
unread, unread,
} }
}) })
.collect()) .collect();
info!("Fetching tags took {}", now.elapsed());
Ok(tags)
} }
async fn thread<'ctx>(&self, ctx: &Context<'ctx>, thread_id: String) -> Result<Thread, Error> { async fn thread<'ctx>(&self, ctx: &Context<'ctx>, thread_id: String) -> Result<Thread, Error> {
// TODO(wathiede): normalize all email addresses through an address book with preferred // TODO(wathiede): normalize all email addresses through an address book with preferred