refactor: split gpodder repository and the sqlite data store implementation into separate crates

The complete separation of concerns via the gpodder repository allows us
to cleanly separate the server from the gpodder specification. This
paves the way for a later Postgres implementation of the data store.
This commit is contained in:
Jef Roosens 2025-03-19 08:54:49 +01:00
parent 86687a7b96
commit 0cfcd90eba
Signed by: Jef Roosens
GPG key ID: 21FD3D77D56BAF49
45 changed files with 2416 additions and 882 deletions

View file

@ -0,0 +1,96 @@
use chrono::DateTime;
use diesel::prelude::*;
use gpodder::AuthErr;
use super::SqliteRepository;
use crate::{
models::{session::Session, user::User},
schema::*,
DbError,
};
impl From<User> for gpodder::User {
fn from(value: User) -> Self {
Self {
id: value.id,
username: value.username,
password_hash: value.password_hash,
}
}
}
impl gpodder::AuthStore for SqliteRepository {
fn get_user(&self, username: &str) -> Result<Option<gpodder::models::User>, AuthErr> {
Ok(users::table
.select(User::as_select())
.filter(users::username.eq(username))
.first(&mut self.pool.get().map_err(DbError::from)?)
.optional()
.map_err(DbError::from)?
.map(gpodder::User::from))
}
fn get_session(&self, session_id: i64) -> Result<Option<gpodder::models::Session>, AuthErr> {
match sessions::table
.inner_join(users::table)
.filter(sessions::id.eq(session_id))
.select((Session::as_select(), User::as_select()))
.get_result(&mut self.pool.get().map_err(DbError::from)?)
{
Ok((session, user)) => Ok(Some(gpodder::Session {
id: session.id,
last_seen: DateTime::from_timestamp(session.last_seen, 0).unwrap(),
user: user.into(),
})),
Err(err) => Err(DbError::from(err).into()),
}
}
fn remove_session(&self, session_id: i64) -> Result<(), AuthErr> {
Ok(
diesel::delete(sessions::table.filter(sessions::id.eq(session_id)))
.execute(&mut self.pool.get().map_err(DbError::from)?)
.map(|_| ())
.map_err(DbError::from)?,
)
}
fn insert_session(&self, session: &gpodder::Session) -> Result<(), AuthErr> {
Ok(Session {
id: session.id,
user_id: session.user.id,
last_seen: session.last_seen.timestamp(),
}
.insert_into(sessions::table)
.execute(&mut self.pool.get().map_err(DbError::from)?)
.map(|_| ())
.map_err(DbError::from)?)
}
fn refresh_session(
&self,
session: &gpodder::Session,
timestamp: DateTime<chrono::Utc>,
) -> Result<(), AuthErr> {
if diesel::update(sessions::table.filter(sessions::id.eq(session.id)))
.set(sessions::last_seen.eq(timestamp.timestamp()))
.execute(&mut self.pool.get().map_err(DbError::from)?)
.map_err(DbError::from)?
== 0
{
Err(AuthErr::UnknownSession)
} else {
Ok(())
}
}
fn remove_old_sessions(&self, min_last_seen: DateTime<chrono::Utc>) -> Result<usize, AuthErr> {
let min_last_seen = min_last_seen.timestamp();
Ok(
diesel::delete(sessions::table.filter(sessions::last_seen.lt(min_last_seen)))
.execute(&mut self.pool.get().map_err(DbError::from)?)
.map_err(DbError::from)?,
)
}
}

View file

@ -0,0 +1,298 @@
use std::collections::HashSet;
use chrono::{DateTime, Utc};
use diesel::{alias, dsl::not, prelude::*};
use gpodder::AuthErr;
use super::SqliteRepository;
use crate::{
models::{
device::{Device, DeviceType, NewDevice},
sync_group::SyncGroup,
},
schema::*,
DbError,
};
impl From<DeviceType> for gpodder::DeviceType {
fn from(value: DeviceType) -> Self {
match value {
DeviceType::Desktop => Self::Desktop,
DeviceType::Laptop => Self::Laptop,
DeviceType::Mobile => Self::Mobile,
DeviceType::Server => Self::Server,
DeviceType::Other => Self::Other,
}
}
}
impl From<gpodder::DeviceType> for DeviceType {
fn from(value: gpodder::DeviceType) -> Self {
match value {
gpodder::DeviceType::Desktop => Self::Desktop,
gpodder::DeviceType::Laptop => Self::Laptop,
gpodder::DeviceType::Mobile => Self::Mobile,
gpodder::DeviceType::Server => Self::Server,
gpodder::DeviceType::Other => Self::Other,
}
}
}
impl gpodder::DeviceRepository for SqliteRepository {
fn devices_for_user(
&self,
user: &gpodder::User,
) -> Result<Vec<gpodder::Device>, gpodder::AuthErr> {
(|| {
Ok::<_, DbError>(
devices::table
.select(Device::as_select())
.filter(devices::user_id.eq(user.id))
.get_results(&mut self.pool.get()?)?
.into_iter()
.map(|d| gpodder::Device {
id: d.device_id,
caption: d.caption,
r#type: d.type_.into(),
// TODO implement subscription count
subscriptions: 0,
})
.collect(),
)
})()
.map_err(AuthErr::from)
}
fn update_device_info(
&self,
user: &gpodder::User,
device_id: &str,
patch: gpodder::DevicePatch,
) -> Result<(), gpodder::AuthErr> {
(|| {
if let Some(mut device) = devices::table
.select(Device::as_select())
.filter(
devices::user_id
.eq(user.id)
.and(devices::device_id.eq(device_id)),
)
.get_result(&mut self.pool.get()?)
.optional()?
{
if let Some(caption) = patch.caption {
device.caption = caption;
}
if let Some(type_) = patch.r#type {
device.type_ = type_.into();
}
diesel::update(devices::table.filter(devices::id.eq(device.id)))
.set((
devices::caption.eq(&device.caption),
devices::type_.eq(&device.type_),
))
.execute(&mut self.pool.get()?)?;
} else {
let device = NewDevice {
device_id: device_id.to_string(),
user_id: user.id,
caption: patch.caption.unwrap_or(String::new()),
type_: patch.r#type.unwrap_or(gpodder::DeviceType::Other).into(),
};
diesel::insert_into(devices::table)
.values(device)
.execute(&mut self.pool.get()?)?;
}
Ok::<_, DbError>(())
})()
.map_err(AuthErr::from)
}
fn merge_sync_groups(
&self,
user: &gpodder::User,
device_ids: Vec<&str>,
) -> Result<i64, gpodder::AuthErr> {
(|| {
let conn = &mut self.pool.get()?;
conn.transaction(|conn| {
let devices: Vec<(i64, Option<i64>)> = devices::table
.select((devices::id, devices::sync_group_id))
.filter(
devices::user_id
.eq(user.id)
.and(devices::device_id.eq_any(device_ids)),
)
.get_results(conn)?;
let mut sync_group_ids: Vec<i64> = devices
.iter()
.filter_map(|(_, group_id)| *group_id)
.collect();
// Remove any duplicates, giving us each sync group ID once
sync_group_ids.sort();
sync_group_ids.dedup();
// If any of the devices are already in a sync group, we reuse the first one we find.
// Otherwise, we generate a new one.
let sync_group_id = if let Some(id) = sync_group_ids.pop() {
id
} else {
SyncGroup::new(conn)?.id
};
// Move all devices in the other sync groups into the new sync group
diesel::update(
devices::table.filter(devices::sync_group_id.eq_any(sync_group_ids.iter())),
)
.set(devices::sync_group_id.eq(sync_group_id))
.execute(conn)?;
// Add the non-synchronized devices into the new sync group
let unsynced_device_ids =
devices.iter().filter_map(
|(id, group_id)| if group_id.is_none() { Some(id) } else { None },
);
diesel::update(devices::table.filter(devices::id.eq_any(unsynced_device_ids)))
.set(devices::sync_group_id.eq(sync_group_id))
.execute(conn)?;
// Remove the other now unused sync groups
diesel::delete(sync_groups::table.filter(sync_groups::id.eq_any(sync_group_ids)))
.execute(conn)?;
Ok::<_, DbError>(sync_group_id)
})
})()
.map_err(AuthErr::from)
}
fn remove_from_sync_group(
&self,
user: &gpodder::User,
device_ids: Vec<&str>,
) -> Result<(), gpodder::AuthErr> {
(|| {
let conn = &mut self.pool.get()?;
diesel::update(
devices::table.filter(
devices::user_id
.eq(user.id)
.and(devices::device_id.eq_any(device_ids)),
),
)
.set(devices::sync_group_id.eq(None::<i64>))
.execute(conn)?;
// This is in a different transaction on purpose, as the success of this removal shouldn't
// fail the entire query
SyncGroup::remove_unused(conn)?;
Ok::<_, DbError>(())
})()
.map_err(AuthErr::from)
}
fn synchronize_sync_group(
&self,
group_id: i64,
time_changed: DateTime<Utc>,
) -> Result<(), gpodder::AuthErr> {
(|| {
let time_changed = time_changed.timestamp();
let conn = &mut self.pool.get()?;
conn.transaction(|conn| {
let device_ids: Vec<i64> = devices::table
.filter(devices::sync_group_id.eq(group_id))
.select(devices::id)
.get_results(conn)?;
// For each device in the group, we get the list of subscriptions not yet in its own
// non-deleted list, and add it to the database
for device_id in device_ids.iter().copied() {
let d1 = alias!(device_subscriptions as d1);
let own_subscriptions = d1
.filter(
d1.field(device_subscriptions::device_id)
.eq(device_id)
.and(d1.field(device_subscriptions::deleted).eq(false)),
)
.select(d1.field(device_subscriptions::podcast_url));
let urls_to_add = device_subscriptions::table
.select(device_subscriptions::podcast_url)
.filter(
device_subscriptions::device_id
.eq_any(device_ids.iter())
.and(device_subscriptions::deleted.eq(false))
.and(not(
device_subscriptions::podcast_url.eq_any(own_subscriptions)
)),
)
.distinct()
.load_iter(conn)?
.collect::<Result<HashSet<String>, _>>()?;
super::subscription::insert_subscriptions_for_single_device(
conn,
device_id,
urls_to_add.iter(),
time_changed,
)?;
}
Ok::<_, DbError>(())
})
})()
.map_err(AuthErr::from)
}
fn devices_by_sync_group(
&self,
user: &gpodder::User,
) -> Result<(Vec<String>, Vec<Vec<String>>), gpodder::AuthErr> {
(|| {
let mut not_synchronized = Vec::new();
let mut synchronized = Vec::new();
let conn = &mut self.pool.get()?;
let mut devices = devices::table
.select((devices::device_id, devices::sync_group_id))
.filter(devices::user_id.eq(user.id))
.order(devices::sync_group_id)
.load_iter::<(String, Option<i64>), _>(conn)?;
let mut cur_group = &mut not_synchronized;
let mut cur_group_id: Option<i64> = None;
while let Some((device_id, group_id)) = devices.next().transpose()? {
if group_id != cur_group_id {
if group_id.is_none() {
cur_group = &mut not_synchronized;
} else {
synchronized.push(Vec::new());
let index = synchronized.len() - 1;
cur_group = &mut synchronized[index];
}
cur_group_id = group_id;
}
cur_group.push(device_id);
}
Ok::<_, DbError>((not_synchronized, synchronized))
})()
.map_err(AuthErr::from)
}
}

View file

@ -0,0 +1,176 @@
use chrono::{DateTime, Utc};
use diesel::prelude::*;
use gpodder::AuthErr;
use super::SqliteRepository;
use crate::{
models::{
device::Device,
episode_action::{ActionType, EpisodeAction, NewEpisodeAction},
},
schema::*,
DbError,
};
impl From<gpodder::EpisodeAction> for NewEpisodeAction {
fn from(value: gpodder::EpisodeAction) -> Self {
let (action, started, position, total) = match value.action {
gpodder::EpisodeActionType::New => (ActionType::New, None, None, None),
gpodder::EpisodeActionType::Delete => (ActionType::Delete, None, None, None),
gpodder::EpisodeActionType::Download => (ActionType::Download, None, None, None),
gpodder::EpisodeActionType::Play {
started,
position,
total,
} => (ActionType::Play, started, Some(position), total),
};
NewEpisodeAction {
user_id: 0,
device_id: None,
podcast_url: value.podcast,
episode_url: value.episode,
time_changed: 0,
timestamp: value.timestamp.map(|t| t.timestamp()),
action,
started,
position,
total,
}
}
}
fn to_gpodder_action(
(device_id, db_action): (Option<String>, EpisodeAction),
) -> gpodder::EpisodeAction {
let action = match db_action.action {
ActionType::Play => gpodder::EpisodeActionType::Play {
started: db_action.started,
// SAFETY: the condition that this isn't null if the action type is "play" is
// explicitely enforced by the database using a CHECK constraint.
position: db_action.position.unwrap(),
total: db_action.total,
},
ActionType::New => gpodder::EpisodeActionType::New,
ActionType::Delete => gpodder::EpisodeActionType::Delete,
ActionType::Download => gpodder::EpisodeActionType::Download,
};
gpodder::EpisodeAction {
podcast: db_action.podcast_url,
episode: db_action.episode_url,
timestamp: db_action
.timestamp
// SAFETY the input to the from_timestamp function is always the result of a
// previous timestamp() function call, which is guaranteed to be each other's
// reverse
.map(|ts| DateTime::from_timestamp(ts, 0).unwrap()),
time_changed: DateTime::from_timestamp(db_action.time_changed, 0).unwrap(),
device: device_id,
action,
}
}
impl gpodder::EpisodeActionRepository for SqliteRepository {
fn add_episode_actions(
&self,
user: &gpodder::User,
actions: Vec<gpodder::EpisodeAction>,
time_changed: DateTime<Utc>,
) -> Result<(), gpodder::AuthErr> {
(|| {
let time_changed = time_changed.timestamp();
// TODO optimize this query
// 1. The lookup for a device could be replaced with a subquery, although Diesel seems to
// have a problem using an Option<String> to match equality with a String
// 2. Ideally the for loop would be replaced with a single query inserting multiple values,
// although each value would need its own subquery
//
// NOTE this function usually gets called from the same device, so optimizing the
// amount of device lookups required would be useful.
self.pool.get()?.transaction(|conn| {
for action in actions {
let device_id = if let Some(device) = &action.device {
Some(Device::device_id_to_id(conn, user.id, device)?)
} else {
None
};
let mut new_action: NewEpisodeAction = action.into();
new_action.user_id = user.id;
new_action.device_id = device_id;
new_action.time_changed = time_changed;
diesel::insert_into(episode_actions::table)
.values(&new_action)
.execute(conn)?;
}
Ok::<_, DbError>(())
})
})()
.map_err(AuthErr::from)
}
fn episode_actions_for_user(
&self,
user: &gpodder::User,
since: Option<DateTime<Utc>>,
podcast: Option<String>,
device: Option<String>,
aggregated: bool,
) -> Result<Vec<gpodder::EpisodeAction>, gpodder::AuthErr> {
(|| {
let since = since.map(|ts| ts.timestamp()).unwrap_or(0);
let conn = &mut self.pool.get()?;
let mut query = episode_actions::table
.left_join(devices::table)
.filter(
episode_actions::user_id
.eq(user.id)
.and(episode_actions::time_changed.ge(since)),
)
.select((devices::device_id.nullable(), EpisodeAction::as_select()))
.into_boxed();
if let Some(device_id) = device {
query = query.filter(devices::device_id.eq(device_id));
}
if let Some(podcast_url) = podcast {
query = query.filter(episode_actions::podcast_url.eq(podcast_url));
}
let db_actions: Vec<(Option<String>, EpisodeAction)> = if aggregated {
// https://stackoverflow.com/a/7745635
// For each episode URL, we want to return the row with the highest `time_changed`
// value. We achieve this be left joining with self on the URL, as well as whether the
// left row's time_changed value is less than the right one. Rows with the largest
// time_changed value for a given URL will join with a NULL value (because of the left
// join), so we filter those out to retrieve the correct rows.
let a2 = diesel::alias!(episode_actions as a2);
query
.left_join(
a2.on(episode_actions::episode_url
.eq(a2.field(episode_actions::episode_url))
.and(
episode_actions::time_changed
.lt(a2.field(episode_actions::time_changed)),
)),
)
.filter(a2.field(episode_actions::episode_url).is_null())
.get_results(conn)?
} else {
query.get_results(conn)?
};
let actions = db_actions.into_iter().map(to_gpodder_action).collect();
Ok::<_, DbError>(actions)
})()
.map_err(AuthErr::from)
}
}

View file

@ -0,0 +1,27 @@
mod auth;
mod device;
mod episode_action;
mod subscription;
use std::path::Path;
use super::DbPool;
#[derive(Clone)]
pub struct SqliteRepository {
pool: DbPool,
}
impl From<DbPool> for SqliteRepository {
fn from(value: DbPool) -> Self {
Self { pool: value }
}
}
impl SqliteRepository {
pub fn from_path(path: impl AsRef<Path>) -> Result<Self, gpodder::AuthErr> {
let pool = super::initialize_db(path, true)?;
Ok(Self { pool })
}
}

View file

@ -0,0 +1,374 @@
use std::collections::HashSet;
use chrono::DateTime;
use diesel::prelude::*;
use gpodder::AuthErr;
use super::SqliteRepository;
use crate::{
models::device_subscription::{DeviceSubscription, NewDeviceSubscription},
schema::*,
DbError,
};
fn set_subscriptions_for_single_device(
conn: &mut SqliteConnection,
device_id: i64,
urls: &HashSet<String>,
time_changed: i64,
) -> QueryResult<()> {
// https://github.com/diesel-rs/diesel/discussions/2826
// SQLite doesn't support default on conflict set values, so we can't handle this using
// on conflict. Therefore, we instead calculate which URLs should be inserted and which
// updated, so we avoid conflicts.
let urls_in_db: HashSet<String> = device_subscriptions::table
.select(device_subscriptions::podcast_url)
.filter(device_subscriptions::device_id.eq(device_id))
.get_results(conn)?
.into_iter()
.collect();
// URLs originally in the database that are no longer in the list
let urls_to_delete = urls_in_db.difference(&urls);
// URLs not in the database that are in the new list
let urls_to_insert = urls.difference(&urls_in_db);
// URLs that are in both the database and the new list. For these, those marked as
// "deleted" in the database are updated so they're no longer deleted, with their
// timestamp updated.
let urls_to_update = urls.intersection(&urls_in_db);
// Mark the URLs to delete as properly deleted
diesel::update(
device_subscriptions::table.filter(
device_subscriptions::device_id
.eq(device_id)
.and(device_subscriptions::podcast_url.eq_any(urls_to_delete)),
),
)
.set((
device_subscriptions::deleted.eq(true),
device_subscriptions::time_changed.eq(time_changed),
))
.execute(conn)?;
// Update the existing deleted URLs that are reinserted as no longer deleted
diesel::update(
device_subscriptions::table.filter(
device_subscriptions::device_id
.eq(device_id)
.and(device_subscriptions::podcast_url.eq_any(urls_to_update))
.and(device_subscriptions::deleted.eq(true)),
),
)
.set((
device_subscriptions::deleted.eq(false),
device_subscriptions::time_changed.eq(time_changed),
))
.execute(conn)?;
// Insert the new values into the database
diesel::insert_into(device_subscriptions::table)
.values(
urls_to_insert
.into_iter()
.map(|url| NewDeviceSubscription {
device_id,
podcast_url: url.to_string(),
deleted: false,
time_changed,
})
.collect::<Vec<_>>(),
)
.execute(conn)?;
Ok(())
}
/// Add the given URLs to the device's list of subscriptions, meaning the URLs are truly inserted
/// into the database. This function assumes the list of URLs is already free of URLs that already
/// have a corresponding row in the database, so no conflict checks are performed.
pub fn insert_subscriptions_for_single_device<'a>(
conn: &mut SqliteConnection,
device_id: i64,
urls: impl Iterator<Item = &'a String>,
time_changed: i64,
) -> QueryResult<()> {
diesel::insert_into(device_subscriptions::table)
.values(
urls.into_iter()
.map(|url| NewDeviceSubscription {
device_id,
podcast_url: url.to_string(),
deleted: false,
time_changed,
})
.collect::<Vec<_>>(),
)
.execute(conn)?;
Ok(())
}
pub fn update_subscriptions_for_single_device(
conn: &mut SqliteConnection,
device_id: i64,
add: &HashSet<String>,
remove: &HashSet<String>,
time_changed: i64,
) -> QueryResult<()> {
let urls_in_db: HashSet<String> = device_subscriptions::table
.select(device_subscriptions::podcast_url)
.filter(device_subscriptions::device_id.eq(device_id))
.get_results(conn)?
.into_iter()
.collect();
// Subscriptions to remove are those that were already in the database and are now part
// of the removed list. Subscriptions that were never added in the first place don't
// need to be marked as deleted. We also only update those that aren't already marked
// as deleted.
let urls_to_delete = remove.intersection(&urls_in_db);
diesel::update(
device_subscriptions::table.filter(
device_subscriptions::device_id
.eq(device_id)
.and(device_subscriptions::podcast_url.eq_any(urls_to_delete))
.and(device_subscriptions::deleted.eq(false)),
),
)
.set((
device_subscriptions::deleted.eq(true),
device_subscriptions::time_changed.eq(time_changed),
))
.execute(conn)?;
// Subscriptions to update are those that are already in the database, but are also in
// the added list. Only those who were originally marked as deleted get updated.
let urls_to_update = add.intersection(&urls_in_db);
diesel::update(
device_subscriptions::table.filter(
device_subscriptions::device_id
.eq(device_id)
.and(device_subscriptions::podcast_url.eq_any(urls_to_update))
.and(device_subscriptions::deleted.eq(true)),
),
)
.set((
device_subscriptions::deleted.eq(false),
device_subscriptions::time_changed.eq(time_changed),
))
.execute(conn)?;
// Subscriptions to insert are those that aren't in the database and are part of the
// added list
let urls_to_insert = add.difference(&urls_in_db);
insert_subscriptions_for_single_device(conn, device_id, urls_to_insert, time_changed)?;
Ok(())
}
impl gpodder::SubscriptionRepository for SqliteRepository {
fn subscriptions_for_user(
&self,
user: &gpodder::User,
) -> Result<Vec<gpodder::Subscription>, gpodder::AuthErr> {
(|| {
Ok::<_, DbError>(
device_subscriptions::table
.inner_join(devices::table)
.filter(devices::user_id.eq(user.id))
.select((
device_subscriptions::podcast_url,
device_subscriptions::time_changed,
))
.distinct()
.get_results::<(String, i64)>(&mut self.pool.get()?)?
.into_iter()
.map(|(url, ts)| gpodder::Subscription {
url,
time_changed: DateTime::from_timestamp(ts, 0).unwrap(),
})
.collect(),
)
})()
.map_err(AuthErr::from)
}
fn subscriptions_for_device(
&self,
user: &gpodder::User,
device_id: &str,
) -> Result<Vec<gpodder::Subscription>, gpodder::AuthErr> {
(|| {
Ok::<_, DbError>(
device_subscriptions::table
.inner_join(devices::table)
.filter(
devices::user_id
.eq(user.id)
.and(devices::device_id.eq(device_id)),
)
.select((
device_subscriptions::podcast_url,
device_subscriptions::time_changed,
))
.get_results::<(String, i64)>(&mut self.pool.get()?)?
.into_iter()
.map(|(url, ts)| gpodder::Subscription {
url,
time_changed: DateTime::from_timestamp(ts, 0).unwrap(),
})
.collect(),
)
})()
.map_err(AuthErr::from)
}
fn set_subscriptions_for_device(
&self,
user: &gpodder::User,
device_id: &str,
urls: Vec<String>,
time_changed: chrono::DateTime<chrono::Utc>,
) -> Result<(), gpodder::AuthErr> {
(|| {
let time_changed = time_changed.timestamp();
let urls: HashSet<String> = urls.into_iter().collect();
self.pool.get()?.transaction(|conn| {
let (device_id, group_id) = devices::table
.select((devices::id, devices::sync_group_id))
.filter(
devices::user_id
.eq(user.id)
.and(devices::device_id.eq(device_id)),
)
.get_result::<(i64, Option<i64>)>(conn)?;
// If the device is part of a sync group, we need to perform the update on every device
// in the group
if let Some(group_id) = group_id {
let device_ids: Vec<i64> = devices::table
.filter(devices::sync_group_id.eq(group_id))
.select(devices::id)
.get_results(conn)?;
for device_id in device_ids {
set_subscriptions_for_single_device(conn, device_id, &urls, time_changed)?;
}
} else {
set_subscriptions_for_single_device(conn, device_id, &urls, time_changed)?;
}
Ok::<_, DbError>(())
})
})()
.map_err(AuthErr::from)
}
fn update_subscriptions_for_device(
&self,
user: &gpodder::User,
device_id: &str,
add: Vec<String>,
remove: Vec<String>,
time_changed: chrono::DateTime<chrono::Utc>,
) -> Result<(), gpodder::AuthErr> {
(|| {
let time_changed = time_changed.timestamp();
// TODO URLs that are in both the added and removed lists will currently get "re-added",
// meaning their change timestamp will be updated even though they haven't really changed.
let add: HashSet<_> = add.into_iter().collect();
let remove: HashSet<_> = remove.into_iter().collect();
self.pool.get()?.transaction(|conn| {
let (device_id, group_id) = devices::table
.select((devices::id, devices::sync_group_id))
.filter(
devices::user_id
.eq(user.id)
.and(devices::device_id.eq(device_id)),
)
.get_result::<(i64, Option<i64>)>(conn)?;
// If the device is part of a sync group, we need to perform the update on every device
// in the group
if let Some(group_id) = group_id {
let device_ids: Vec<i64> = devices::table
.filter(devices::sync_group_id.eq(group_id))
.select(devices::id)
.get_results(conn)?;
for device_id in device_ids {
update_subscriptions_for_single_device(
conn,
device_id,
&add,
&remove,
time_changed,
)?;
}
} else {
update_subscriptions_for_single_device(
conn,
device_id,
&add,
&remove,
time_changed,
)?;
}
Ok::<_, DbError>(())
})
})()
.map_err(AuthErr::from)
}
fn subscription_updates_for_device(
&self,
user: &gpodder::User,
device_id: &str,
since: chrono::DateTime<chrono::Utc>,
) -> Result<(Vec<gpodder::Subscription>, Vec<gpodder::Subscription>), gpodder::AuthErr> {
(|| {
let since = since.timestamp();
let (mut added, mut removed) = (Vec::new(), Vec::new());
let query = device_subscriptions::table
.inner_join(devices::table)
.filter(
devices::user_id
.eq(user.id)
.and(devices::device_id.eq(device_id))
.and(device_subscriptions::time_changed.ge(since)),
)
.select(DeviceSubscription::as_select());
for sub in query.load_iter(&mut self.pool.get()?)? {
let sub = sub?;
if sub.deleted {
removed.push(gpodder::Subscription {
url: sub.podcast_url,
time_changed: DateTime::from_timestamp(sub.time_changed, 0).unwrap(),
});
} else {
added.push(gpodder::Subscription {
url: sub.podcast_url,
time_changed: DateTime::from_timestamp(sub.time_changed, 0).unwrap(),
});
}
}
Ok::<_, DbError>((added, removed))
})()
.map_err(AuthErr::from)
}
}