308 lines
9.8 KiB
Rust
308 lines
9.8 KiB
Rust
// Rocket generates a lot of warnings for handlers
|
|
// TODO: figure out why
|
|
#![allow(unreachable_patterns)]
|
|
use std::{error::Error, net::SocketAddr, sync::Arc, time::Duration};
|
|
|
|
use async_graphql::{extensions, http::GraphiQLSource, Schema};
|
|
use async_graphql_axum::{GraphQL, GraphQLSubscription};
|
|
//allows to extract the IP of connecting user
|
|
use axum::extract::connect_info::ConnectInfo;
|
|
use axum::{
|
|
extract::{self, ws::WebSocketUpgrade, Query, State},
|
|
http::{header, StatusCode},
|
|
response::{self, IntoResponse, Response},
|
|
routing::{any, get, post},
|
|
Router,
|
|
};
|
|
use cacher::FilesystemCacher;
|
|
use clap::Parser;
|
|
use letterbox_notmuch::Notmuch;
|
|
#[cfg(feature = "tantivy")]
|
|
use letterbox_server::tantivy::TantivyConnection;
|
|
use letterbox_server::{
|
|
graphql::{compute_catchup_ids, Attachment, MutationRoot, QueryRoot, SubscriptionRoot},
|
|
nm::{attachment_bytes, cid_attachment_bytes},
|
|
ws::ConnectionTracker,
|
|
};
|
|
use letterbox_shared::WebsocketMessage;
|
|
use serde::Deserialize;
|
|
use sqlx::postgres::PgPool;
|
|
use tokio::{net::TcpListener, sync::Mutex};
|
|
use tower_http::trace::{DefaultMakeSpan, TraceLayer};
|
|
use tracing::info;
|
|
|
|
// Make our own error that wraps `anyhow::Error`.
|
|
struct AppError(letterbox_server::ServerError);
|
|
|
|
// Tell axum how to convert `AppError` into a response.
|
|
impl IntoResponse for AppError {
|
|
fn into_response(self) -> Response {
|
|
(
|
|
StatusCode::INTERNAL_SERVER_ERROR,
|
|
format!("Something went wrong: {}", self.0),
|
|
)
|
|
.into_response()
|
|
}
|
|
}
|
|
// This enables using `?` on functions that return `Result<_, letterbox_server::Error>` to turn them into
|
|
// `Result<_, AppError>`. That way you don't need to do that manually.
|
|
impl<E> From<E> for AppError
|
|
where
|
|
E: Into<letterbox_server::ServerError>,
|
|
{
|
|
fn from(err: E) -> Self {
|
|
Self(err.into())
|
|
}
|
|
}
|
|
|
|
fn inline_attachment_response(attachment: Attachment) -> impl IntoResponse {
|
|
info!("attachment filename {:?}", attachment.filename);
|
|
let mut hdr_map = headers::HeaderMap::new();
|
|
if let Some(filename) = attachment.filename {
|
|
hdr_map.insert(
|
|
header::CONTENT_DISPOSITION,
|
|
format!(r#"inline; filename="{}""#, filename)
|
|
.parse()
|
|
.unwrap(),
|
|
);
|
|
}
|
|
if let Some(ct) = attachment.content_type {
|
|
hdr_map.insert(header::CONTENT_TYPE, ct.parse().unwrap());
|
|
}
|
|
info!("hdr_map {hdr_map:?}");
|
|
(hdr_map, attachment.bytes).into_response()
|
|
}
|
|
|
|
fn download_attachment_response(attachment: Attachment) -> impl IntoResponse {
|
|
info!("attachment filename {:?}", attachment.filename);
|
|
let mut hdr_map = headers::HeaderMap::new();
|
|
if let Some(filename) = attachment.filename {
|
|
hdr_map.insert(
|
|
header::CONTENT_DISPOSITION,
|
|
format!(r#"attachment; filename="{}""#, filename)
|
|
.parse()
|
|
.unwrap(),
|
|
);
|
|
}
|
|
if let Some(ct) = attachment.content_type {
|
|
hdr_map.insert(header::CONTENT_TYPE, ct.parse().unwrap());
|
|
}
|
|
info!("hdr_map {hdr_map:?}");
|
|
(hdr_map, attachment.bytes).into_response()
|
|
}
|
|
|
|
#[axum_macros::debug_handler]
|
|
async fn view_attachment(
|
|
State(AppState { nm, .. }): State<AppState>,
|
|
extract::Path((id, idx, _)): extract::Path<(String, String, String)>,
|
|
) -> Result<impl IntoResponse, AppError> {
|
|
let mid = if id.starts_with("id:") {
|
|
id.to_string()
|
|
} else {
|
|
format!("id:{}", id)
|
|
};
|
|
info!("view attachment {mid} {idx}");
|
|
let idx: Vec<_> = idx
|
|
.split('.')
|
|
.map(|s| s.parse().expect("not a usize"))
|
|
.collect();
|
|
let attachment = attachment_bytes(&nm, &mid, &idx)?;
|
|
Ok(inline_attachment_response(attachment))
|
|
}
|
|
|
|
async fn download_attachment(
|
|
State(AppState { nm, .. }): State<AppState>,
|
|
extract::Path((id, idx, _)): extract::Path<(String, String, String)>,
|
|
) -> Result<impl IntoResponse, AppError> {
|
|
let mid = if id.starts_with("id:") {
|
|
id.to_string()
|
|
} else {
|
|
format!("id:{}", id)
|
|
};
|
|
info!("download attachment {mid} {idx}");
|
|
let idx: Vec<_> = idx
|
|
.split('.')
|
|
.map(|s| s.parse().expect("not a usize"))
|
|
.collect();
|
|
let attachment = attachment_bytes(&nm, &mid, &idx)?;
|
|
Ok(download_attachment_response(attachment))
|
|
}
|
|
|
|
async fn view_cid(
|
|
State(AppState { nm, .. }): State<AppState>,
|
|
extract::Path((id, cid)): extract::Path<(String, String)>,
|
|
) -> Result<impl IntoResponse, AppError> {
|
|
let mid = if id.starts_with("id:") {
|
|
id.to_string()
|
|
} else {
|
|
format!("id:{}", id)
|
|
};
|
|
info!("view cid attachment {mid} {cid}");
|
|
let attachment = cid_attachment_bytes(&nm, &mid, &cid)?;
|
|
Ok(inline_attachment_response(attachment))
|
|
}
|
|
|
|
// TODO make this work with gitea message ids like `wathiede/letterbox/pulls/91@git.z.xinu.tv`
|
|
async fn view_original(
|
|
State(AppState { nm, .. }): State<AppState>,
|
|
extract::Path(id): extract::Path<String>,
|
|
) -> Result<impl IntoResponse, AppError> {
|
|
info!("view_original {id}");
|
|
let bytes = nm.show_original(&id)?;
|
|
let s = String::from_utf8_lossy(&bytes).to_string();
|
|
Ok(s.into_response())
|
|
}
|
|
|
|
async fn graphiql() -> impl IntoResponse {
|
|
response::Html(
|
|
GraphiQLSource::build()
|
|
.endpoint("/api/graphql/")
|
|
.subscription_endpoint("/api/graphql/ws")
|
|
.finish(),
|
|
)
|
|
}
|
|
|
|
async fn start_ws(
|
|
ws: WebSocketUpgrade,
|
|
ConnectInfo(addr): ConnectInfo<SocketAddr>,
|
|
State(AppState {
|
|
connection_tracker, ..
|
|
}): State<AppState>,
|
|
) -> impl IntoResponse {
|
|
info!("intiating websocket connection for {addr}");
|
|
ws.on_upgrade(async move |socket| connection_tracker.lock().await.add_peer(socket, addr).await)
|
|
}
|
|
|
|
#[derive(Debug, Deserialize)]
|
|
struct NotificationParams {
|
|
delay_ms: Option<u64>,
|
|
}
|
|
|
|
async fn send_refresh_websocket_handler(
|
|
State(AppState {
|
|
connection_tracker, ..
|
|
}): State<AppState>,
|
|
params: Query<NotificationParams>,
|
|
) -> impl IntoResponse {
|
|
info!("send_refresh_websocket_handler params {params:?}");
|
|
if let Some(delay_ms) = params.delay_ms {
|
|
let delay = Duration::from_millis(delay_ms);
|
|
info!("sleeping {delay:?}");
|
|
tokio::time::sleep(delay).await;
|
|
}
|
|
connection_tracker
|
|
.lock()
|
|
.await
|
|
.send_message_all(WebsocketMessage::RefreshMessages)
|
|
.await;
|
|
"refresh triggered"
|
|
}
|
|
|
|
async fn watch_new(
|
|
nm: Notmuch,
|
|
pool: PgPool,
|
|
conn_tracker: Arc<Mutex<ConnectionTracker>>,
|
|
poll_time: Duration,
|
|
) -> Result<(), async_graphql::Error> {
|
|
let mut old_ids = Vec::new();
|
|
loop {
|
|
let ids = compute_catchup_ids(&nm, &pool, "is:unread").await?;
|
|
if old_ids != ids {
|
|
info!("old_ids: {old_ids:?}\n ids: {ids:?}");
|
|
conn_tracker
|
|
.lock()
|
|
.await
|
|
.send_message_all(WebsocketMessage::RefreshMessages)
|
|
.await
|
|
}
|
|
old_ids = ids;
|
|
tokio::time::sleep(poll_time).await;
|
|
}
|
|
}
|
|
|
|
#[derive(Clone)]
|
|
struct AppState {
|
|
nm: Notmuch,
|
|
connection_tracker: Arc<Mutex<ConnectionTracker>>,
|
|
}
|
|
|
|
#[derive(Parser)]
|
|
#[command(version, about, long_about = None)]
|
|
struct Cli {
|
|
#[arg(short, long, default_value = "0.0.0.0:9345")]
|
|
addr: SocketAddr,
|
|
newsreader_database_url: String,
|
|
newsreader_tantivy_db_path: String,
|
|
slurp_cache_path: String,
|
|
}
|
|
|
|
#[tokio::main]
|
|
async fn main() -> Result<(), Box<dyn Error>> {
|
|
let cli = Cli::parse();
|
|
let _guard = xtracing::init(env!("CARGO_BIN_NAME"))?;
|
|
build_info::build_info!(fn bi);
|
|
info!("Build Info: {}", letterbox_shared::build_version(bi));
|
|
if !std::fs::exists(&cli.slurp_cache_path)? {
|
|
info!("Creating slurp cache @ '{}'", &cli.slurp_cache_path);
|
|
std::fs::create_dir_all(&cli.slurp_cache_path)?;
|
|
}
|
|
let pool = PgPool::connect(&cli.newsreader_database_url).await?;
|
|
let nm = Notmuch::default();
|
|
sqlx::migrate!("./migrations").run(&pool).await?;
|
|
#[cfg(feature = "tantivy")]
|
|
let tantivy_conn = TantivyConnection::new(&cli.newsreader_tantivy_db_path)?;
|
|
|
|
let cacher = FilesystemCacher::new(&cli.slurp_cache_path)?;
|
|
let schema = Schema::build(QueryRoot, MutationRoot, SubscriptionRoot)
|
|
.data(nm.clone())
|
|
.data(cacher)
|
|
.data(pool.clone());
|
|
|
|
let schema = schema.extension(extensions::Logger).finish();
|
|
|
|
let connection_tracker = Arc::new(Mutex::new(ConnectionTracker::default()));
|
|
let ct = Arc::clone(&connection_tracker);
|
|
let poll_time = Duration::from_secs(60);
|
|
let _h = tokio::spawn(watch_new(nm.clone(), pool, ct, poll_time));
|
|
|
|
let api_routes = Router::new()
|
|
.route(
|
|
"/download/attachment/{id}/{idx}/{*rest}",
|
|
get(download_attachment),
|
|
)
|
|
.route("/view/attachment/{id}/{idx}/{*rest}", get(view_attachment))
|
|
.route("/original/{id}", get(view_original))
|
|
.route("/cid/{id}/{cid}", get(view_cid))
|
|
.route("/ws", any(start_ws))
|
|
.route_service("/graphql/ws", GraphQLSubscription::new(schema.clone()))
|
|
.route(
|
|
"/graphql/",
|
|
get(graphiql).post_service(GraphQL::new(schema.clone())),
|
|
);
|
|
|
|
let notification_routes = Router::new()
|
|
.route("/mail", post(send_refresh_websocket_handler))
|
|
.route("/news", post(send_refresh_websocket_handler));
|
|
let app = Router::new()
|
|
.nest("/api", api_routes)
|
|
.nest("/notification", notification_routes)
|
|
.with_state(AppState {
|
|
nm,
|
|
connection_tracker,
|
|
})
|
|
.layer(
|
|
TraceLayer::new_for_http()
|
|
.make_span_with(DefaultMakeSpan::default().include_headers(true)),
|
|
);
|
|
|
|
let listener = TcpListener::bind(cli.addr).await.unwrap();
|
|
tracing::info!("listening on {}", listener.local_addr().unwrap());
|
|
axum::serve(
|
|
listener,
|
|
app.into_make_service_with_connect_info::<SocketAddr>(),
|
|
)
|
|
.await
|
|
.unwrap();
|
|
Ok(())
|
|
}
|