519 lines
14 KiB
Rust
519 lines
14 KiB
Rust
pub mod error;
|
|
pub mod graphql;
|
|
pub mod newsreader;
|
|
pub mod nm;
|
|
|
|
use std::{convert::Infallible, str::FromStr};
|
|
|
|
use css_inline::{CSSInliner, InlineError, InlineOptions};
|
|
use linkify::{LinkFinder, LinkKind};
|
|
use log::{error, info};
|
|
use lol_html::{element, errors::RewritingError, rewrite_str, text, RewriteStrSettings};
|
|
use maplit::{hashmap, hashset};
|
|
use thiserror::Error;
|
|
use url::Url;
|
|
|
|
use crate::newsreader::{
|
|
extract_thread_id, is_newsreader_search, is_newsreader_thread, make_news_tag,
|
|
};
|
|
const NON_EXISTENT_SITE_NAME: &'static str = "NO-SUCH-SITE";
|
|
|
|
// TODO: figure out how to use Cow
|
|
trait Transformer {
|
|
fn should_run(&self, _html: &str) -> bool {
|
|
true
|
|
}
|
|
// TODO: should html be something like `html_escape` uses:
|
|
// <S: ?Sized + AsRef<str>>(text: &S) -> Cow<str>
|
|
fn transform(&self, html: &str) -> Result<String, TransformError>;
|
|
}
|
|
|
|
// TODO: how would we make this more generic to allow good implementations of Transformer outside
|
|
// of this module?
|
|
#[derive(Error, Debug)]
|
|
pub enum TransformError {
|
|
#[error("lol-html rewrite error")]
|
|
RewritingError(#[from] RewritingError),
|
|
#[error("css inline error")]
|
|
InlineError(#[from] InlineError),
|
|
}
|
|
|
|
struct SanitizeHtml<'a> {
|
|
cid_prefix: &'a str,
|
|
base_url: &'a Option<Url>,
|
|
}
|
|
|
|
impl<'a> Transformer for SanitizeHtml<'a> {
|
|
fn transform(&self, html: &str) -> Result<String, TransformError> {
|
|
Ok(sanitize_html(html, self.cid_prefix, self.base_url)?)
|
|
}
|
|
}
|
|
|
|
struct EscapeHtml;
|
|
|
|
impl Transformer for EscapeHtml {
|
|
fn should_run(&self, html: &str) -> bool {
|
|
html.contains("&")
|
|
}
|
|
fn transform(&self, html: &str) -> Result<String, TransformError> {
|
|
Ok(html_escape::decode_html_entities(html).to_string())
|
|
}
|
|
}
|
|
|
|
struct StripHtml;
|
|
|
|
impl Transformer for StripHtml {
|
|
fn should_run(&self, html: &str) -> bool {
|
|
// Lame test
|
|
html.contains("<")
|
|
}
|
|
fn transform(&self, html: &str) -> Result<String, TransformError> {
|
|
let mut text = String::new();
|
|
let element_content_handlers = vec![text!("*", |t| {
|
|
text += t.as_str();
|
|
Ok(())
|
|
})];
|
|
let _ = rewrite_str(
|
|
html,
|
|
RewriteStrSettings {
|
|
element_content_handlers,
|
|
..RewriteStrSettings::default()
|
|
},
|
|
)?;
|
|
|
|
Ok(text)
|
|
}
|
|
}
|
|
|
|
struct InlineStyle;
|
|
|
|
impl Transformer for InlineStyle {
|
|
fn transform(&self, html: &str) -> Result<String, TransformError> {
|
|
let css = concat!(
|
|
"/* mvp.css */\n",
|
|
include_str!("mvp.css"),
|
|
"/* Xinu Specific overrides */\n",
|
|
include_str!("custom.css"),
|
|
);
|
|
let inline_opts = InlineOptions {
|
|
inline_style_tags: false,
|
|
keep_style_tags: false,
|
|
keep_link_tags: false,
|
|
base_url: None,
|
|
load_remote_stylesheets: false,
|
|
extra_css: Some(css.into()),
|
|
preallocate_node_capacity: 32,
|
|
..InlineOptions::default()
|
|
};
|
|
|
|
Ok(match CSSInliner::new(inline_opts).inline(&html) {
|
|
Ok(inlined_html) => inlined_html,
|
|
Err(err) => {
|
|
error!("failed to inline CSS: {err}");
|
|
html.to_string()
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
struct AddOutlink(Option<url::Url>);
|
|
|
|
impl Transformer for AddOutlink {
|
|
fn should_run(&self, _: &str) -> bool {
|
|
self.0
|
|
.as_ref()
|
|
.map(|l| l.scheme().starts_with("http"))
|
|
.unwrap_or(false)
|
|
}
|
|
fn transform(&self, html: &str) -> Result<String, TransformError> {
|
|
if let Some(url) = &self.0 {
|
|
Ok(format!(
|
|
r#"
|
|
{html}
|
|
<div><a href="{}">View on site</a></div>
|
|
"#,
|
|
url
|
|
))
|
|
} else {
|
|
Ok(html.to_string())
|
|
}
|
|
}
|
|
}
|
|
pub fn linkify_html(text: &str) -> String {
|
|
let mut finder = LinkFinder::new();
|
|
let finder = finder.url_must_have_scheme(false).kinds(&[LinkKind::Url]);
|
|
let mut parts = Vec::new();
|
|
for span in finder.spans(text) {
|
|
// TODO(wathiede): use Cow<str>?
|
|
match span.kind() {
|
|
// Text as-is
|
|
None => parts.push(span.as_str().to_string()),
|
|
// Wrap in anchor tag
|
|
Some(LinkKind::Url) => {
|
|
let text = span.as_str();
|
|
let schema = if text.starts_with("http") {
|
|
""
|
|
} else {
|
|
"http://"
|
|
};
|
|
let a = format!(r#"<a href="{schema}{0}">{0}</a>"#, text);
|
|
parts.push(a);
|
|
}
|
|
_ => todo!("unhandled kind: {:?}", span.kind().unwrap()),
|
|
}
|
|
}
|
|
parts.join("")
|
|
}
|
|
|
|
// html contains the content to be cleaned, and cid_prefix is used to resolve mixed part image
|
|
// referrences
|
|
pub fn sanitize_html(
|
|
html: &str,
|
|
cid_prefix: &str,
|
|
base_url: &Option<Url>,
|
|
) -> Result<String, TransformError> {
|
|
let mut element_content_handlers = vec![
|
|
// Open links in new tab
|
|
element!("a[href]", |el| {
|
|
el.set_attribute("target", "_blank").unwrap();
|
|
|
|
Ok(())
|
|
}),
|
|
// Replace mixed part CID images with URL
|
|
element!("img[src]", |el| {
|
|
let src = el
|
|
.get_attribute("src")
|
|
.expect("src was required")
|
|
.replace("cid:", cid_prefix);
|
|
|
|
el.set_attribute("src", &src)?;
|
|
|
|
Ok(())
|
|
}),
|
|
// Only secure image URLs
|
|
element!("img[src]", |el| {
|
|
let src = el
|
|
.get_attribute("src")
|
|
.expect("src was required")
|
|
.replace("http:", "https:");
|
|
|
|
el.set_attribute("src", &src)?;
|
|
|
|
Ok(())
|
|
}),
|
|
];
|
|
if let Some(base_url) = base_url {
|
|
element_content_handlers.extend(vec![
|
|
// Make links with relative URLs absolute
|
|
element!("a[href]", |el| {
|
|
if let Some(Ok(href)) = el.get_attribute("href").map(|href| base_url.join(&href)) {
|
|
el.set_attribute("href", &href.as_str()).unwrap();
|
|
}
|
|
|
|
Ok(())
|
|
}),
|
|
// Make images with relative srcs absolute
|
|
element!("img[src]", |el| {
|
|
if let Some(Ok(src)) = el.get_attribute("src").map(|src| base_url.join(&src)) {
|
|
el.set_attribute("src", &src.as_str()).unwrap();
|
|
}
|
|
|
|
Ok(())
|
|
}),
|
|
]);
|
|
}
|
|
|
|
let inline_opts = InlineOptions {
|
|
inline_style_tags: true,
|
|
keep_style_tags: false,
|
|
keep_link_tags: false,
|
|
base_url: None,
|
|
load_remote_stylesheets: false,
|
|
extra_css: None,
|
|
preallocate_node_capacity: 32,
|
|
..InlineOptions::default()
|
|
};
|
|
|
|
let inlined_html = match CSSInliner::new(inline_opts).inline(&html) {
|
|
Ok(inlined_html) => inlined_html,
|
|
Err(err) => {
|
|
error!("failed to inline CSS: {err}");
|
|
html.to_string()
|
|
}
|
|
};
|
|
// Default's don't allow style, but we want to preserve that.
|
|
// TODO: remove 'class' if rendering mails moves to a two phase process where abstract message
|
|
// types are collected, santized, and then grouped together as one big HTML doc
|
|
let attributes = hashset![
|
|
"align", "bgcolor", "class", "color", "height", "lang", "title", "width", "style",
|
|
];
|
|
|
|
let tags = hashset![
|
|
"a",
|
|
"abbr",
|
|
"acronym",
|
|
"area",
|
|
"article",
|
|
"aside",
|
|
"b",
|
|
"bdi",
|
|
"bdo",
|
|
"blockquote",
|
|
"br",
|
|
"caption",
|
|
"center",
|
|
"cite",
|
|
"code",
|
|
"col",
|
|
"colgroup",
|
|
"data",
|
|
"dd",
|
|
"del",
|
|
"details",
|
|
"dfn",
|
|
"div",
|
|
"dl",
|
|
"dt",
|
|
"em",
|
|
"figcaption",
|
|
"figure",
|
|
"footer",
|
|
"h1",
|
|
"h2",
|
|
"h3",
|
|
"h4",
|
|
"h5",
|
|
"h6",
|
|
"header",
|
|
"hgroup",
|
|
"hr",
|
|
"i",
|
|
"img",
|
|
"ins",
|
|
"kbd",
|
|
"kbd",
|
|
"li",
|
|
"map",
|
|
"mark",
|
|
"nav",
|
|
"ol",
|
|
"p",
|
|
"pre",
|
|
"q",
|
|
"rp",
|
|
"rt",
|
|
"rtc",
|
|
"ruby",
|
|
"s",
|
|
"samp",
|
|
"small",
|
|
"span",
|
|
"strike",
|
|
"strong",
|
|
"sub",
|
|
"summary",
|
|
"sup",
|
|
"table",
|
|
"tbody",
|
|
"td",
|
|
"th",
|
|
"thead",
|
|
"time",
|
|
"title", // wathiede
|
|
"tr",
|
|
"tt",
|
|
"u",
|
|
"ul",
|
|
"var",
|
|
"wbr",
|
|
];
|
|
let tag_attributes = hashmap![
|
|
"a" => hashset![
|
|
"href", "hreflang", "target",
|
|
],
|
|
"bdo" => hashset![
|
|
"dir"
|
|
],
|
|
"blockquote" => hashset![
|
|
"cite"
|
|
],
|
|
"col" => hashset![
|
|
"align", "char", "charoff", "span"
|
|
],
|
|
"colgroup" => hashset![
|
|
"align", "char", "charoff", "span"
|
|
],
|
|
"del" => hashset![
|
|
"cite", "datetime"
|
|
],
|
|
"hr" => hashset![
|
|
"align", "size", "width"
|
|
],
|
|
"img" => hashset![
|
|
"align", "alt", "height", "src", "width"
|
|
],
|
|
"ins" => hashset![
|
|
"cite", "datetime"
|
|
],
|
|
"ol" => hashset![
|
|
"start"
|
|
],
|
|
"q" => hashset![
|
|
"cite"
|
|
],
|
|
"table" => hashset![
|
|
"align", "border", "cellpadding", "cellspacing", "char", "charoff", "summary",
|
|
],
|
|
"tbody" => hashset![
|
|
"align", "char", "charoff"
|
|
],
|
|
"td" => hashset![
|
|
"align", "char", "charoff", "colspan", "headers", "rowspan"
|
|
],
|
|
"tfoot" => hashset![
|
|
"align", "char", "charoff"
|
|
],
|
|
"th" => hashset![
|
|
"align", "char", "charoff", "colspan", "headers", "rowspan", "scope"
|
|
],
|
|
"thead" => hashset![
|
|
"align", "char", "charoff"
|
|
],
|
|
"tr" => hashset![
|
|
"align", "char", "charoff"
|
|
],
|
|
];
|
|
|
|
let rewritten_html = rewrite_str(
|
|
&inlined_html,
|
|
RewriteStrSettings {
|
|
element_content_handlers,
|
|
..RewriteStrSettings::default()
|
|
},
|
|
)?;
|
|
let clean_html = ammonia::Builder::default()
|
|
.tags(tags)
|
|
.tag_attributes(tag_attributes)
|
|
.generic_attributes(attributes)
|
|
.clean(&rewritten_html)
|
|
.to_string();
|
|
|
|
Ok(clean_html)
|
|
}
|
|
|
|
fn compute_offset_limit(
|
|
after: Option<i32>,
|
|
before: Option<i32>,
|
|
first: Option<i32>,
|
|
last: Option<i32>,
|
|
) -> (i32, i32) {
|
|
let default_page_size = 100;
|
|
match (after, before, first, last) {
|
|
// Reasonable defaults
|
|
(None, None, None, None) => (0, default_page_size),
|
|
(None, None, Some(first), None) => (0, first),
|
|
(Some(after), None, None, None) => (after + 1, default_page_size),
|
|
(Some(after), None, Some(first), None) => (after + 1, first),
|
|
(None, Some(before), None, None) => (0.max(before - default_page_size), default_page_size),
|
|
(None, Some(before), None, Some(last)) => (0.max(before - last), last),
|
|
(None, None, None, Some(_)) => {
|
|
panic!("specifying last and no before doesn't make sense")
|
|
}
|
|
(None, None, Some(_), Some(_)) => {
|
|
panic!("specifying first and last doesn't make sense")
|
|
}
|
|
(None, Some(_), Some(_), _) => {
|
|
panic!("specifying before and first doesn't make sense")
|
|
}
|
|
(Some(_), Some(_), _, _) => {
|
|
panic!("specifying after and before doesn't make sense")
|
|
}
|
|
(Some(_), None, None, Some(_)) => {
|
|
panic!("specifying after and last doesn't make sense")
|
|
}
|
|
(Some(_), None, Some(_), Some(_)) => {
|
|
panic!("specifying after, first and last doesn't make sense")
|
|
}
|
|
}
|
|
}
|
|
|
|
#[derive(Debug)]
|
|
pub struct Query {
|
|
pub unread_only: bool,
|
|
pub tag: Option<String>,
|
|
pub uid: Option<String>,
|
|
pub remainder: Vec<String>,
|
|
pub is_notmuch: bool,
|
|
pub is_newsreader: bool,
|
|
}
|
|
|
|
impl Query {
|
|
// Converts the internal state of Query to something suitable for notmuch queries. Removes and
|
|
// letterbox specific '<key>:<value' tags
|
|
fn to_notmuch(&self) -> String {
|
|
let mut parts = Vec::new();
|
|
if !self.is_notmuch {
|
|
return String::new();
|
|
}
|
|
|
|
if self.unread_only {
|
|
parts.push("is:unread".to_string());
|
|
}
|
|
if let Some(site) = &self.tag {
|
|
parts.push(format!("tag:{site}"));
|
|
}
|
|
if let Some(uid) = &self.uid {
|
|
parts.push(uid.clone());
|
|
}
|
|
parts.extend(self.remainder.clone());
|
|
parts.join(" ")
|
|
}
|
|
}
|
|
|
|
impl FromStr for Query {
|
|
type Err = Infallible;
|
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
let mut unread_only = false;
|
|
let mut tag = None;
|
|
let mut uid = None;
|
|
let mut remainder = Vec::new();
|
|
let site_prefix = make_news_tag("");
|
|
let mut is_notmuch = false;
|
|
let mut is_newsreader = false;
|
|
for word in s.split_whitespace() {
|
|
if word == "is:unread" {
|
|
unread_only = true
|
|
} else if word.starts_with("tag:") {
|
|
tag = Some(word["tag:".len()..].to_string())
|
|
/*
|
|
} else if word.starts_with("tag:") {
|
|
// Any tag that doesn't match site_prefix should explicitly set the site to something not in the
|
|
// database
|
|
site = Some(NON_EXISTENT_SITE_NAME.to_string());
|
|
*/
|
|
} else if is_newsreader_thread(word) {
|
|
uid = Some(extract_thread_id(word).to_string())
|
|
} else if word == "is:mail" || word == "is:email" || word == "is:notmuch" {
|
|
is_notmuch = true;
|
|
} else if word == "is:news" || word == "is:newsreader" {
|
|
is_newsreader = true;
|
|
} else {
|
|
remainder.push(word.to_string());
|
|
}
|
|
}
|
|
// If we don't see any explicit filters for a corpus, flip them all on
|
|
if !(is_notmuch || is_newsreader) {
|
|
is_newsreader = true;
|
|
is_notmuch = true;
|
|
}
|
|
Ok(Query {
|
|
unread_only,
|
|
tag,
|
|
uid,
|
|
remainder,
|
|
is_notmuch,
|
|
is_newsreader,
|
|
})
|
|
}
|
|
}
|