From b5468bced2698d3efd84e977d94ce68322ac376f Mon Sep 17 00:00:00 2001 From: Bill Thiede Date: Mon, 22 Jul 2024 09:28:12 -0700 Subject: [PATCH] Implement pagination for newsreader --- server/sql/threads.sql | 4 +- server/src/newsreader.rs | 106 ++++++++++++++++++++++++++++----------- 2 files changed, 79 insertions(+), 31 deletions(-) diff --git a/server/sql/threads.sql b/server/sql/threads.sql index 69f9cc7..a476710 100644 --- a/server/sql/threads.sql +++ b/server/sql/threads.sql @@ -15,4 +15,6 @@ WHERE ) ORDER BY date DESC, - title + title OFFSET $3 +LIMIT + $4 diff --git a/server/src/newsreader.rs b/server/src/newsreader.rs index 3cf9cb0..7e96ae1 100644 --- a/server/src/newsreader.rs +++ b/server/src/newsreader.rs @@ -49,41 +49,86 @@ pub async fn search( before, first, last, - |after, before, first, last| async move { + |after: Option, before: Option, first, last| async move { info!("search page info {after:#?}, {before:#?}, {first:#?}, {last:#?}"); - let rows = sqlx::query_file!("sql/threads.sql", site, query.unread_only,) - .fetch_all(pool) - .await?; - - let slice = rows.into_iter().map(|r| { - let tags = if r.is_read.unwrap_or(false) { - vec![site.clone()] - } else { - vec!["unread".to_string(), site.clone()] - }; - ThreadSummary { - thread: format!("{THREAD_PREFIX}{}", r.uid), - timestamp: r - .date - .expect("post missing date") - .assume_utc() - .unix_timestamp() as isize, - date_relative: "TODO date_relative".to_string(), - matched: 0, - total: 1, - authors: r.name.unwrap_or_else(|| site.clone()), - subject: r.title.unwrap_or("NO TITLE".to_string()), - tags, + let default_page_size = 100; + let (offset, limit) = match (after, before, first, last) { + // Reasonable defaults + (None, None, None, None) => (0, default_page_size), + (None, None, Some(first), None) => (0, first), + (Some(after), None, None, None) => (after, default_page_size), + (Some(after), None, Some(first), None) => (after, first), + (None, Some(before), None, None) => { + (before.saturating_sub(default_page_size), default_page_size) } - }); - let mut connection = Connection::new(false, false); - // TODO - let start = 0; + (None, Some(before), None, Some(last)) => (before.saturating_sub(last), last), + (None, None, None, Some(_)) => { + panic!("specifying last and no before doesn't make sense") + } + (None, None, Some(_), Some(_)) => { + panic!("specifying first and last doesn't make sense") + } + (None, Some(_), Some(_), _) => { + panic!("specifying before and first doesn't make sense") + } + (Some(_), Some(_), _, _) => { + panic!("specifying after and before doesn't make sense") + } + (Some(_), None, None, Some(_)) => { + panic!("specifying after and last doesn't make sense") + } + (Some(_), None, Some(_), Some(_)) => { + panic!("specifying after, first and last doesn't make sense") + } + }; + // The +1 is to see if there are more pages of data available. + let limit = limit + 1; + + info!("search page offset {offset} limit {limit}"); + let rows = sqlx::query_file!( + "sql/threads.sql", + site, + query.unread_only, + offset as i64, + limit as i64 + ) + .fetch_all(pool) + .await?; + + let mut slice = rows + .into_iter() + .map(|r| { + let tags = if r.is_read.unwrap_or(false) { + vec![site.clone()] + } else { + vec!["unread".to_string(), site.clone()] + }; + ThreadSummary { + thread: format!("{THREAD_PREFIX}{}", r.uid), + timestamp: r + .date + .expect("post missing date") + .assume_utc() + .unix_timestamp() as isize, + date_relative: "TODO date_relative".to_string(), + matched: 0, + total: 1, + authors: r.name.unwrap_or_else(|| site.clone()), + subject: r.title.unwrap_or("NO TITLE".to_string()), + tags, + } + }) + .collect::>(); + let has_more = slice.len() == limit; + let mut connection = Connection::new(offset > 0, has_more); + if has_more { + slice.pop(); + }; connection.edges.extend( slice .into_iter() .enumerate() - .map(|(idx, item)| Edge::new(start + idx, item)), + .map(|(idx, item)| Edge::new(offset + idx, item)), ); Ok::<_, async_graphql::Error>(connection) }, @@ -91,7 +136,8 @@ pub async fn search( .await } -pub async fn tags(pool: &PgPool, needs_unread: bool) -> Result, ServerError> { +pub async fn tags(pool: &PgPool, _needs_unread: bool) -> Result, ServerError> { + // TODO: optimize query by using needs_unread let tags = sqlx::query_file!("sql/tags.sql").fetch_all(pool).await?; let tags = tags .into_iter()