letterbox/web/src/state.rs

809 lines
27 KiB
Rust

use std::collections::HashSet;
use graphql_client::GraphQLQuery;
use log::{debug, error, info, warn};
use seed::{prelude::*, *};
use thiserror::Error;
use web_sys::HtmlElement;
use crate::{
api::urls,
consts::SEARCH_RESULTS_PER_PAGE,
graphql,
graphql::{front_page_query::*, send_graphql, show_thread_query::*},
};
/// Used to fake the unread string while in development
pub fn unread_query() -> &'static str {
let host = seed::window()
.location()
.host()
.expect("failed to get host");
if host.starts_with("6758.") {
return "tag:letterbox";
}
"is:unread"
}
// `init` describes what should happen when your app started.
pub fn init(url: Url, orders: &mut impl Orders<Msg>) -> Model {
let version = letterbox_shared::build_version(bi);
info!("Build Info: {}", version);
if url.hash().is_none() {
orders.request_url(urls::search(unread_query(), 0));
} else {
orders.request_url(url);
};
// TODO(wathiede): only do this while viewing the index? Or maybe add a new message that force
// 'notmuch new' on the server periodically?
orders.stream(streams::interval(30_000, || Msg::RefreshStart));
orders.subscribe(Msg::OnUrlChanged);
orders.stream(streams::window_event(Ev::Scroll, |_| Msg::WindowScrolled));
build_info::build_info!(fn bi);
Model {
context: Context::None,
query: "".to_string(),
refreshing_state: RefreshingState::None,
tags: None,
read_completion_ratio: 0.,
content_el: ElRef::<HtmlElement>::default(),
versions: Version {
client: version,
server: None,
},
catchup: None,
last_url: Url::current(),
}
}
fn on_url_changed(old: &Url, mut new: Url) -> Msg {
let did_change = *old != new;
let mut messages = Vec::new();
if did_change {
messages.push(Msg::ScrollToTop)
}
info!(
"url changed\nold '{old}'\nnew '{new}', history {}",
history().length().unwrap_or(0)
);
let hpp = new.remaining_hash_path_parts();
let msg = match hpp.as_slice() {
["t", tid] => Msg::ShowThreadRequest {
thread_id: tid.to_string(),
},
["s", query] => {
let query = Url::decode_uri_component(query).unwrap_or("".to_string());
Msg::FrontPageRequest {
query,
after: None,
before: None,
first: None,
last: None,
}
}
["s", query, page] => {
let query = Url::decode_uri_component(query).unwrap_or("".to_string());
let page = page[1..].parse().unwrap_or(0);
Msg::FrontPageRequest {
query,
after: Some(page.to_string()),
before: None,
first: None,
last: None,
}
}
p => {
if !p.is_empty() {
info!("Unhandled path '{p:?}'");
}
Msg::FrontPageRequest {
query: "".to_string(),
after: None,
before: None,
first: None,
last: None,
}
}
};
messages.push(msg);
Msg::MultiMsg(messages)
}
// `update` describes how to handle each `Msg`.
pub fn update(msg: Msg, model: &mut Model, orders: &mut impl Orders<Msg>) {
debug!("update({})", msg);
match msg {
Msg::Noop => {}
Msg::RefreshStart => {
model.refreshing_state = RefreshingState::Loading;
orders.perform_cmd(async move {
Msg::RefreshDone(
send_graphql::<_, graphql::refresh_mutation::ResponseData>(
graphql::RefreshMutation::build_query(
graphql::refresh_mutation::Variables {},
),
)
.await
.err(),
)
});
}
Msg::RefreshDone(err) => {
model.refreshing_state = if let Some(err) = err {
RefreshingState::Error(format!("{:?}", err))
} else {
RefreshingState::None
};
orders.perform_cmd(async move { Msg::Refresh });
}
Msg::Refresh => {
orders.request_url(Url::current());
}
Msg::Reload => {
window()
.location()
.reload()
.expect("failed to reload window");
}
Msg::OnUrlChanged(new_url) => {
orders.send_msg(on_url_changed(&model.last_url, new_url.0.clone()));
model.last_url = new_url.0;
}
Msg::NextPage => {
match &model.context {
Context::SearchResult { query, pager, .. } => {
let query = query.to_string();
let after = pager.end_cursor.clone();
orders.perform_cmd(async move {
Msg::FrontPageRequest {
query,
after,
before: None,
first: Some(SEARCH_RESULTS_PER_PAGE as i64),
last: None,
}
});
}
Context::ThreadResult { .. } => (), // do nothing (yet?)
Context::None => (), // do nothing (yet?)
};
}
Msg::PreviousPage => {
match &model.context {
Context::SearchResult { query, pager, .. } => {
let query = query.to_string();
let before = pager.start_cursor.clone();
orders.perform_cmd(async move {
Msg::FrontPageRequest {
query,
after: None,
before,
first: None,
last: Some(SEARCH_RESULTS_PER_PAGE as i64),
}
});
}
Context::ThreadResult { .. } => (), // do nothing (yet?)
Context::None => (), // do nothing (yet?)
};
}
Msg::GoToSearchResults => {
orders.send_msg(Msg::SearchQuery(model.query.clone()));
}
Msg::UpdateQuery(query) => model.query = query,
Msg::SearchQuery(query) => {
orders.request_url(urls::search(&query, 0));
}
Msg::SetUnread(query, unread) => {
orders.skip().perform_cmd(async move {
let res: Result<
graphql_client::Response<graphql::mark_read_mutation::ResponseData>,
gloo_net::Error,
> = send_graphql(graphql::MarkReadMutation::build_query(
graphql::mark_read_mutation::Variables {
query: query.clone(),
unread,
},
))
.await;
if let Err(e) = res {
error!("Failed to set read for {query} to {unread}: {e}");
}
Msg::Refresh
});
}
Msg::AddTag(query, tag) => {
orders.skip().perform_cmd(async move {
let res: Result<
graphql_client::Response<graphql::add_tag_mutation::ResponseData>,
gloo_net::Error,
> = send_graphql(graphql::AddTagMutation::build_query(
graphql::add_tag_mutation::Variables {
query: query.clone(),
tag: tag.clone(),
},
))
.await;
if let Err(e) = res {
error!("Failed to add tag {tag} to {query}: {e}");
}
Msg::GoToSearchResults
});
}
Msg::RemoveTag(query, tag) => {
orders.skip().perform_cmd(async move {
let res: Result<
graphql_client::Response<graphql::remove_tag_mutation::ResponseData>,
gloo_net::Error,
> = send_graphql(graphql::RemoveTagMutation::build_query(
graphql::remove_tag_mutation::Variables {
query: query.clone(),
tag: tag.clone(),
},
))
.await;
if let Err(e) = res {
error!("Failed to remove tag {tag} to {query}: {e}");
}
// TODO: reconsider this behavior
Msg::GoToSearchResults
});
}
Msg::FrontPageRequest {
query,
after,
before,
first,
last,
} => {
let (after, before, first, last) = match (after.as_ref(), before.as_ref(), first, last)
{
// If no pagination set, set reasonable defaults
(None, None, None, None) => {
(None, None, Some(SEARCH_RESULTS_PER_PAGE as i64), None)
}
_ => (after, before, first, last),
};
model.query = query.clone();
orders.skip().perform_cmd(async move {
Msg::FrontPageResult(
send_graphql(graphql::FrontPageQuery::build_query(
graphql::front_page_query::Variables {
query,
after,
before,
first,
last,
},
))
.await,
)
});
}
Msg::FrontPageResult(Err(e)) => error!("error FrontPageResult: {e:?}"),
Msg::FrontPageResult(Ok(graphql_client::Response {
data: None,
errors: None,
..
})) => {
error!("FrontPageResult no data or errors, should not happen");
}
Msg::FrontPageResult(Ok(graphql_client::Response {
data: None,
errors: Some(e),
..
})) => {
error!("FrontPageResult error: {e:?}");
}
Msg::FrontPageResult(Ok(graphql_client::Response {
data: Some(data), ..
})) => {
model.tags = Some(
data.tags
.into_iter()
.map(|t| Tag {
name: t.name,
bg_color: t.bg_color,
unread: t.unread,
})
.collect(),
);
let selected_threads = 'context: {
if let Context::SearchResult {
results,
selected_threads,
..
} = &model.context
{
let old: HashSet<_> = results.iter().map(|n| &n.thread).collect();
let new: HashSet<_> = data.search.nodes.iter().map(|n| &n.thread).collect();
if old == new {
break 'context selected_threads.clone();
}
}
HashSet::new()
};
model.context = Context::SearchResult {
query: model.query.clone(),
results: data.search.nodes,
count: data.count as usize,
pager: data.search.page_info,
selected_threads,
};
orders.send_msg(Msg::UpdateServerVersion(data.version));
// Generate signal so progress bar is reset
orders.send_msg(Msg::WindowScrolled);
}
Msg::ShowThreadRequest { thread_id } => {
orders.skip().perform_cmd(async move {
Msg::ShowThreadResult(
send_graphql(graphql::ShowThreadQuery::build_query(
graphql::show_thread_query::Variables { thread_id },
))
.await,
)
});
}
Msg::ShowThreadResult(Ok(graphql_client::Response {
data: Some(data), ..
})) => {
model.tags = Some(
data.tags
.into_iter()
.map(|t| Tag {
name: t.name,
bg_color: t.bg_color,
unread: t.unread,
})
.collect(),
);
match &data.thread {
graphql::show_thread_query::ShowThreadQueryThread::EmailThread(
ShowThreadQueryThreadOnEmailThread { messages, .. },
) => {
let mut open_messages: HashSet<_> = messages
.iter()
.filter(|msg| msg.tags.iter().any(|t| t == "unread"))
.map(|msg| msg.id.clone())
.collect();
if open_messages.is_empty() {
open_messages = messages.iter().map(|msg| msg.id.clone()).collect();
}
model.context = Context::ThreadResult {
thread: data.thread,
open_messages,
};
}
graphql::show_thread_query::ShowThreadQueryThread::NewsPost(..) => {
model.context = Context::ThreadResult {
thread: data.thread,
open_messages: HashSet::new(),
};
}
}
orders.send_msg(Msg::UpdateServerVersion(data.version));
// Generate signal so progress bar is reset
orders.send_msg(Msg::WindowScrolled);
}
Msg::ShowThreadResult(bad) => {
error!("show_thread_query error: {bad:#?}");
}
Msg::CatchupRequest { query } => {
orders.perform_cmd(async move {
Msg::CatchupResult(
send_graphql::<_, graphql::catchup_query::ResponseData>(
graphql::CatchupQuery::build_query(graphql::catchup_query::Variables {
query,
}),
)
.await,
)
});
}
Msg::CatchupResult(Ok(graphql_client::Response {
data: Some(data), ..
})) => {
let items = data.catchup;
if items.is_empty() {
orders.send_msg(Msg::GoToSearchResults);
model.catchup = None;
} else {
orders.request_url(urls::thread(&items[0]));
model.catchup = Some(Catchup {
items: items
.into_iter()
.map(|id| CatchupItem { id, seen: false })
.collect(),
});
}
}
Msg::CatchupResult(bad) => {
error!("catchup_query error: {bad:#?}");
}
Msg::SelectionSetNone => {
if let Context::SearchResult {
selected_threads, ..
} = &mut model.context
{
*selected_threads = HashSet::new();
}
}
Msg::SelectionSetAll => {
if let Context::SearchResult {
results,
selected_threads,
..
} = &mut model.context
{
*selected_threads = results.iter().map(|node| node.thread.clone()).collect();
}
}
Msg::SelectionAddTag(tag) => {
if let Context::SearchResult {
selected_threads, ..
} = &mut model.context
{
let threads = selected_threads
.iter()
.map(|tid| tid.to_string())
.collect::<Vec<_>>()
.join(" ");
orders
.skip()
.perform_cmd(async move { Msg::AddTag(threads, tag) });
}
}
Msg::SelectionRemoveTag(tag) => {
if let Context::SearchResult {
selected_threads, ..
} = &mut model.context
{
let threads = selected_threads
.iter()
.map(|tid| tid.to_string())
.collect::<Vec<_>>()
.join(" ");
orders
.skip()
.perform_cmd(async move { Msg::RemoveTag(threads, tag) });
}
}
Msg::SelectionMarkAsRead => {
if let Context::SearchResult {
selected_threads, ..
} = &mut model.context
{
let threads = selected_threads
.iter()
.map(|tid| tid.to_string())
.collect::<Vec<_>>()
.join(" ");
orders
.skip()
.perform_cmd(async move { Msg::SetUnread(threads, false) });
}
}
Msg::SelectionMarkAsUnread => {
if let Context::SearchResult {
selected_threads, ..
} = &mut model.context
{
let threads = selected_threads
.iter()
.map(|tid| tid.to_string())
.collect::<Vec<_>>()
.join(" ");
orders
.skip()
.perform_cmd(async move { Msg::SetUnread(threads, true) });
}
}
Msg::SelectionAddThread(tid) => {
if let Context::SearchResult {
selected_threads, ..
} = &mut model.context
{
selected_threads.insert(tid);
}
}
Msg::SelectionRemoveThread(tid) => {
if let Context::SearchResult {
selected_threads, ..
} = &mut model.context
{
selected_threads.remove(&tid);
}
}
Msg::MessageCollapse(id) => {
if let Context::ThreadResult { open_messages, .. } = &mut model.context {
open_messages.remove(&id);
}
}
Msg::MessageExpand(id) => {
if let Context::ThreadResult { open_messages, .. } = &mut model.context {
open_messages.insert(id);
}
}
Msg::MultiMsg(msgs) => msgs.into_iter().for_each(|msg| update(msg, model, orders)),
Msg::CopyToClipboard(text) => {
let clipboard = seed::window().navigator().clipboard();
orders.perform_cmd(async move {
wasm_bindgen_futures::JsFuture::from(clipboard.write_text(&text))
.await
.expect("failed to copy to clipboard");
});
}
Msg::ScrollToTop => {
info!("scrolling to the top");
web_sys::window().unwrap().scroll_to_with_x_and_y(0., 0.);
}
Msg::WindowScrolled => {
if let Some(el) = model.content_el.get() {
let ih = window()
.inner_height()
.expect("window height")
.unchecked_into::<js_sys::Number>()
.value_of();
let r = el.get_bounding_client_rect();
if r.height() < ih {
// The whole content fits in the window, no scrollbar
orders.send_msg(Msg::SetProgress(0.));
return;
}
let end: f64 = r.height() - ih;
if end < 0. {
orders.send_msg(Msg::SetProgress(0.));
return;
}
// Flip Y, normally it's 0-point when the top of the content hits the top of the
// screen and goes negative from there.
let y = -r.y();
let ratio: f64 = (y / end).max(0.);
debug!(
"WindowScrolled ih {ih} end {end} ratio {ratio:.02} {}x{} @ {},{}",
r.width(),
r.height(),
r.x(),
r.y()
);
orders.send_msg(Msg::SetProgress(ratio));
} else {
orders.send_msg(Msg::SetProgress(0.));
}
}
Msg::SetProgress(ratio) => {
model.read_completion_ratio = ratio;
}
Msg::UpdateServerVersion(version) => {
if version != model.versions.client {
warn!(
"Server ({}) and client ({}) version mismatch, reloading",
version, model.versions.client
);
orders.send_msg(Msg::Reload);
}
model.versions.server = Some(version);
}
Msg::CatchupStart => {
let query = if model.query.contains("is:unread") {
model.query.to_string()
} else {
format!("{} is:unread", model.query)
};
info!("starting catchup mode w/ {}", query);
orders.send_msg(Msg::ScrollToTop);
orders.send_msg(Msg::CatchupRequest { query });
}
Msg::CatchupKeepUnread => {
if let Some(thread_id) = current_thread_id(&model.context) {
orders.send_msg(Msg::SetUnread(thread_id, true));
};
orders.send_msg(Msg::CatchupNext);
}
Msg::CatchupMarkAsRead => {
if let Some(thread_id) = current_thread_id(&model.context) {
orders.send_msg(Msg::SetUnread(thread_id, false));
};
orders.send_msg(Msg::CatchupNext);
}
Msg::CatchupNext => {
orders.send_msg(Msg::ScrollToTop);
let Some(catchup) = &mut model.catchup else {
orders.send_msg(Msg::GoToSearchResults);
return;
};
let Some(idx) = catchup.items.iter().position(|i| !i.seen) else {
// All items have been seen
orders.send_msg(Msg::CatchupExit);
orders.send_msg(Msg::GoToSearchResults);
return;
};
catchup.items[idx].seen = true;
if idx < catchup.items.len() - 1 {
// Reached last item
orders.request_url(urls::thread(&catchup.items[idx + 1].id));
return;
} else {
orders.send_msg(Msg::CatchupExit);
orders.send_msg(Msg::GoToSearchResults);
return;
};
}
Msg::CatchupExit => {
orders.send_msg(Msg::ScrollToTop);
model.catchup = None;
}
}
}
fn current_thread_id(context: &Context) -> Option<String> {
match context {
Context::ThreadResult {
thread:
ShowThreadQueryThread::EmailThread(ShowThreadQueryThreadOnEmailThread {
thread_id, ..
}),
..
} => Some(thread_id.clone()),
Context::ThreadResult {
thread:
ShowThreadQueryThread::NewsPost(ShowThreadQueryThreadOnNewsPost { thread_id, .. }),
..
} => Some(thread_id.clone()),
_ => None,
}
}
// `Model` describes our app state.
pub struct Model {
pub query: String,
pub context: Context,
pub refreshing_state: RefreshingState,
pub tags: Option<Vec<Tag>>,
pub read_completion_ratio: f64,
pub content_el: ElRef<HtmlElement>,
pub versions: Version,
pub catchup: Option<Catchup>,
pub last_url: Url,
}
#[derive(Debug)]
pub struct Version {
pub client: String,
pub server: Option<String>,
}
#[derive(Error, Debug)]
#[allow(dead_code)] // Remove once the UI is showing errors
pub enum UIError {
#[error("No error, this should never be presented to user")]
NoError,
#[error("failed to fetch {0}: {1:?}")]
FetchError(&'static str, gloo_net::Error),
#[error("{0} error decoding: {1:?}")]
FetchDecodeError(&'static str, Vec<graphql_client::Error>),
#[error("no data or errors for {0}")]
NoData(&'static str),
}
pub enum Context {
None,
SearchResult {
query: String,
results: Vec<FrontPageQuerySearchNodes>,
count: usize,
pager: FrontPageQuerySearchPageInfo,
selected_threads: HashSet<String>,
},
ThreadResult {
thread: ShowThreadQueryThread,
open_messages: HashSet<String>,
},
}
pub struct Catchup {
pub items: Vec<CatchupItem>,
}
pub struct CatchupItem {
pub id: String,
pub seen: bool,
}
pub struct Tag {
pub name: String,
pub bg_color: String,
pub unread: i64,
}
#[derive(Debug, PartialEq)]
pub enum RefreshingState {
None,
Loading,
Error(String),
}
// `Msg` describes the different events you can modify state with.
#[derive(strum_macros::Display)]
pub enum Msg {
Noop,
// Tell the client to refresh its state
Refresh,
// Tell the client to reload whole page from server
Reload,
// TODO: add GoToUrl
OnUrlChanged(subs::UrlChanged),
// Tell the server to update state
RefreshStart,
RefreshDone(Option<gloo_net::Error>),
NextPage,
PreviousPage,
GoToSearchResults,
UpdateQuery(String),
SearchQuery(String),
SetUnread(String, bool),
AddTag(String, String),
RemoveTag(String, String),
FrontPageRequest {
query: String,
after: Option<String>,
before: Option<String>,
first: Option<i64>,
last: Option<i64>,
},
FrontPageResult(
Result<graphql_client::Response<graphql::front_page_query::ResponseData>, gloo_net::Error>,
),
ShowThreadRequest {
thread_id: String,
},
ShowThreadResult(
Result<graphql_client::Response<graphql::show_thread_query::ResponseData>, gloo_net::Error>,
),
CatchupRequest {
query: String,
},
CatchupResult(
Result<graphql_client::Response<graphql::catchup_query::ResponseData>, gloo_net::Error>,
),
SelectionSetNone,
SelectionSetAll,
SelectionAddTag(String),
#[allow(dead_code)]
SelectionRemoveTag(String),
SelectionMarkAsRead,
SelectionMarkAsUnread,
SelectionAddThread(String),
SelectionRemoveThread(String),
MessageCollapse(String),
MessageExpand(String),
MultiMsg(Vec<Msg>),
CopyToClipboard(String),
ScrollToTop,
WindowScrolled,
SetProgress(f64),
UpdateServerVersion(String),
CatchupStart,
CatchupKeepUnread,
CatchupMarkAsRead,
CatchupNext,
CatchupExit,
}