Implement pagination for newsreader
This commit is contained in:
parent
01cbe6c037
commit
b5468bced2
@ -15,4 +15,6 @@ WHERE
|
|||||||
)
|
)
|
||||||
ORDER BY
|
ORDER BY
|
||||||
date DESC,
|
date DESC,
|
||||||
title
|
title OFFSET $3
|
||||||
|
LIMIT
|
||||||
|
$4
|
||||||
|
|||||||
@ -49,41 +49,86 @@ pub async fn search(
|
|||||||
before,
|
before,
|
||||||
first,
|
first,
|
||||||
last,
|
last,
|
||||||
|after, before, first, last| async move {
|
|after: Option<usize>, before: Option<usize>, first, last| async move {
|
||||||
info!("search page info {after:#?}, {before:#?}, {first:#?}, {last:#?}");
|
info!("search page info {after:#?}, {before:#?}, {first:#?}, {last:#?}");
|
||||||
let rows = sqlx::query_file!("sql/threads.sql", site, query.unread_only,)
|
let default_page_size = 100;
|
||||||
.fetch_all(pool)
|
let (offset, limit) = match (after, before, first, last) {
|
||||||
.await?;
|
// Reasonable defaults
|
||||||
|
(None, None, None, None) => (0, default_page_size),
|
||||||
let slice = rows.into_iter().map(|r| {
|
(None, None, Some(first), None) => (0, first),
|
||||||
let tags = if r.is_read.unwrap_or(false) {
|
(Some(after), None, None, None) => (after, default_page_size),
|
||||||
vec![site.clone()]
|
(Some(after), None, Some(first), None) => (after, first),
|
||||||
} else {
|
(None, Some(before), None, None) => {
|
||||||
vec!["unread".to_string(), site.clone()]
|
(before.saturating_sub(default_page_size), default_page_size)
|
||||||
};
|
|
||||||
ThreadSummary {
|
|
||||||
thread: format!("{THREAD_PREFIX}{}", r.uid),
|
|
||||||
timestamp: r
|
|
||||||
.date
|
|
||||||
.expect("post missing date")
|
|
||||||
.assume_utc()
|
|
||||||
.unix_timestamp() as isize,
|
|
||||||
date_relative: "TODO date_relative".to_string(),
|
|
||||||
matched: 0,
|
|
||||||
total: 1,
|
|
||||||
authors: r.name.unwrap_or_else(|| site.clone()),
|
|
||||||
subject: r.title.unwrap_or("NO TITLE".to_string()),
|
|
||||||
tags,
|
|
||||||
}
|
}
|
||||||
});
|
(None, Some(before), None, Some(last)) => (before.saturating_sub(last), last),
|
||||||
let mut connection = Connection::new(false, false);
|
(None, None, None, Some(_)) => {
|
||||||
// TODO
|
panic!("specifying last and no before doesn't make sense")
|
||||||
let start = 0;
|
}
|
||||||
|
(None, None, Some(_), Some(_)) => {
|
||||||
|
panic!("specifying first and last doesn't make sense")
|
||||||
|
}
|
||||||
|
(None, Some(_), Some(_), _) => {
|
||||||
|
panic!("specifying before and first doesn't make sense")
|
||||||
|
}
|
||||||
|
(Some(_), Some(_), _, _) => {
|
||||||
|
panic!("specifying after and before doesn't make sense")
|
||||||
|
}
|
||||||
|
(Some(_), None, None, Some(_)) => {
|
||||||
|
panic!("specifying after and last doesn't make sense")
|
||||||
|
}
|
||||||
|
(Some(_), None, Some(_), Some(_)) => {
|
||||||
|
panic!("specifying after, first and last doesn't make sense")
|
||||||
|
}
|
||||||
|
};
|
||||||
|
// The +1 is to see if there are more pages of data available.
|
||||||
|
let limit = limit + 1;
|
||||||
|
|
||||||
|
info!("search page offset {offset} limit {limit}");
|
||||||
|
let rows = sqlx::query_file!(
|
||||||
|
"sql/threads.sql",
|
||||||
|
site,
|
||||||
|
query.unread_only,
|
||||||
|
offset as i64,
|
||||||
|
limit as i64
|
||||||
|
)
|
||||||
|
.fetch_all(pool)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let mut slice = rows
|
||||||
|
.into_iter()
|
||||||
|
.map(|r| {
|
||||||
|
let tags = if r.is_read.unwrap_or(false) {
|
||||||
|
vec![site.clone()]
|
||||||
|
} else {
|
||||||
|
vec!["unread".to_string(), site.clone()]
|
||||||
|
};
|
||||||
|
ThreadSummary {
|
||||||
|
thread: format!("{THREAD_PREFIX}{}", r.uid),
|
||||||
|
timestamp: r
|
||||||
|
.date
|
||||||
|
.expect("post missing date")
|
||||||
|
.assume_utc()
|
||||||
|
.unix_timestamp() as isize,
|
||||||
|
date_relative: "TODO date_relative".to_string(),
|
||||||
|
matched: 0,
|
||||||
|
total: 1,
|
||||||
|
authors: r.name.unwrap_or_else(|| site.clone()),
|
||||||
|
subject: r.title.unwrap_or("NO TITLE".to_string()),
|
||||||
|
tags,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
let has_more = slice.len() == limit;
|
||||||
|
let mut connection = Connection::new(offset > 0, has_more);
|
||||||
|
if has_more {
|
||||||
|
slice.pop();
|
||||||
|
};
|
||||||
connection.edges.extend(
|
connection.edges.extend(
|
||||||
slice
|
slice
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.map(|(idx, item)| Edge::new(start + idx, item)),
|
.map(|(idx, item)| Edge::new(offset + idx, item)),
|
||||||
);
|
);
|
||||||
Ok::<_, async_graphql::Error>(connection)
|
Ok::<_, async_graphql::Error>(connection)
|
||||||
},
|
},
|
||||||
@ -91,7 +136,8 @@ pub async fn search(
|
|||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn tags(pool: &PgPool, needs_unread: bool) -> Result<Vec<Tag>, ServerError> {
|
pub async fn tags(pool: &PgPool, _needs_unread: bool) -> Result<Vec<Tag>, ServerError> {
|
||||||
|
// TODO: optimize query by using needs_unread
|
||||||
let tags = sqlx::query_file!("sql/tags.sql").fetch_all(pool).await?;
|
let tags = sqlx::query_file!("sql/tags.sql").fetch_all(pool).await?;
|
||||||
let tags = tags
|
let tags = tags
|
||||||
.into_iter()
|
.into_iter()
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user