diff --git a/lightning-persister/src/fs_store.rs b/lightning-persister/src/fs_store/common.rs similarity index 63% rename from lightning-persister/src/fs_store.rs rename to lightning-persister/src/fs_store/common.rs index 3129748afda..72670cbe646 100644 --- a/lightning-persister/src/fs_store.rs +++ b/lightning-persister/src/fs_store/common.rs @@ -1,8 +1,10 @@ -//! Objects related to [`FilesystemStore`] live here. +//! Common utilities shared between [`FilesystemStore`]. +//! +//! [`FilesystemStore`]: crate::fs_store::FilesystemStore + use crate::utils::{check_namespace_key_validity, is_valid_kvstore_str}; use lightning::types::string::PrintableString; -use lightning::util::persist::{KVStoreSync, MigratableKVStore}; use std::collections::HashMap; use std::fs; @@ -11,14 +13,14 @@ use std::path::{Path, PathBuf}; use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; use std::sync::{Arc, Mutex, RwLock}; +#[cfg(target_os = "windows")] +use std::ffi::OsStr; #[cfg(feature = "tokio")] -use core::future::Future; -#[cfg(feature = "tokio")] -use lightning::util::persist::KVStore; - +use std::future::Future; #[cfg(target_os = "windows")] -use {std::ffi::OsStr, std::os::windows::ffi::OsStrExt}; +use std::os::windows::ffi::OsStrExt; +/// Calls a Windows API function and returns Ok(()) on success or the last OS error on failure. #[cfg(target_os = "windows")] macro_rules! call { ($e: expr) => { @@ -30,6 +32,10 @@ macro_rules! call { }; } +#[cfg(target_os = "windows")] +use call; + +/// Converts a path to a null-terminated wide string for Windows API calls. #[cfg(target_os = "windows")] fn path_to_windows_str>(path: &T) -> Vec { path.as_ref().encode_wide().chain(Some(0)).collect() @@ -39,6 +45,15 @@ fn path_to_windows_str>(path: &T) -> Vec { // a consistent view and error out. const LIST_DIR_CONSISTENCY_RETRIES: usize = 10; +// The directory name used for empty namespaces in v2. +// Uses brackets which are not in KVSTORE_NAMESPACE_KEY_ALPHABET, preventing collisions +// with valid namespace names. +pub(crate) const EMPTY_NAMESPACE_DIR: &str = "[empty]"; + +/// Inner state shared between sync and async operations for filesystem stores. +/// +/// This struct manages the data directory, temporary file counter, and per-path locks +/// that ensure we don't have concurrent writes to the same file. struct FilesystemStoreInner { data_dir: PathBuf, tmp_file_counter: AtomicUsize, @@ -48,10 +63,7 @@ struct FilesystemStoreInner { locks: Mutex>>>, } -/// A [`KVStore`] and [`KVStoreSync`] implementation that writes to and reads from the file system. -/// -/// [`KVStore`]: lightning::util::persist::KVStore -pub struct FilesystemStore { +pub(crate) struct FilesystemStoreState { inner: Arc, // Version counter to ensure that writes are applied in the correct order. It is assumed that read and list @@ -59,13 +71,15 @@ pub struct FilesystemStore { next_version: AtomicU64, } -impl FilesystemStore { - /// Constructs a new [`FilesystemStore`]. - pub fn new(data_dir: PathBuf) -> Self { - let locks = Mutex::new(HashMap::new()); - let tmp_file_counter = AtomicUsize::new(0); +impl FilesystemStoreState { + /// Creates a new [`FilesystemStoreInner`] with the given data directory. + pub(crate) fn new(data_dir: PathBuf) -> Self { Self { - inner: Arc::new(FilesystemStoreInner { data_dir, tmp_file_counter, locks }), + inner: Arc::new(FilesystemStoreInner { + data_dir, + tmp_file_counter: AtomicUsize::new(0), + locks: Mutex::new(HashMap::new()), + }), next_version: AtomicU64::new(1), } } @@ -94,57 +108,18 @@ impl FilesystemStore { let outer_lock = self.inner.locks.lock().unwrap(); outer_lock.len() } -} - -impl KVStoreSync for FilesystemStore { - fn read( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> Result, lightning::io::Error> { - let path = self.inner.get_checked_dest_file_path( - primary_namespace, - secondary_namespace, - Some(key), - "read", - )?; - self.inner.read(path) - } - - fn write( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> Result<(), lightning::io::Error> { - let path = self.inner.get_checked_dest_file_path( - primary_namespace, - secondary_namespace, - Some(key), - "write", - )?; - let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(path.clone()); - self.inner.write_version(inner_lock_ref, path, buf, version) - } - - fn remove( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> Result<(), lightning::io::Error> { - let path = self.inner.get_checked_dest_file_path( - primary_namespace, - secondary_namespace, - Some(key), - "remove", - )?; - let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(path.clone()); - self.inner.remove_version(inner_lock_ref, path, lazy, version) - } - fn list( - &self, primary_namespace: &str, secondary_namespace: &str, - ) -> Result, lightning::io::Error> { - let path = self.inner.get_checked_dest_file_path( + pub(crate) fn get_checked_dest_file_path( + &self, primary_namespace: &str, secondary_namespace: &str, key: Option<&str>, + operation: &str, use_empty_ns_dir: bool, + ) -> lightning::io::Result { + self.inner.get_checked_dest_file_path( primary_namespace, secondary_namespace, - None, - "list", - )?; - self.inner.list(path) + key, + operation, + use_empty_ns_dir, + ) } } @@ -155,7 +130,7 @@ impl FilesystemStoreInner { } fn get_dest_dir_path( - &self, primary_namespace: &str, secondary_namespace: &str, + &self, primary_namespace: &str, secondary_namespace: &str, use_empty_ns_dir: bool, ) -> std::io::Result { let mut dest_dir_path = { #[cfg(target_os = "windows")] @@ -170,9 +145,22 @@ impl FilesystemStoreInner { } }; - dest_dir_path.push(primary_namespace); - if !secondary_namespace.is_empty() { - dest_dir_path.push(secondary_namespace); + if use_empty_ns_dir { + dest_dir_path.push(if primary_namespace.is_empty() { + EMPTY_NAMESPACE_DIR + } else { + primary_namespace + }); + dest_dir_path.push(if secondary_namespace.is_empty() { + EMPTY_NAMESPACE_DIR + } else { + secondary_namespace + }); + } else { + dest_dir_path.push(primary_namespace); + if !secondary_namespace.is_empty() { + dest_dir_path.push(secondary_namespace); + } } Ok(dest_dir_path) @@ -180,11 +168,12 @@ impl FilesystemStoreInner { fn get_checked_dest_file_path( &self, primary_namespace: &str, secondary_namespace: &str, key: Option<&str>, - operation: &str, + operation: &str, use_empty_ns_dir: bool, ) -> lightning::io::Result { check_namespace_key_validity(primary_namespace, secondary_namespace, key, operation)?; - let mut dest_file_path = self.get_dest_dir_path(primary_namespace, secondary_namespace)?; + let mut dest_file_path = + self.get_dest_dir_path(primary_namespace, secondary_namespace, use_empty_ns_dir)?; if let Some(key) = key { dest_file_path.push(key); } @@ -260,8 +249,13 @@ impl FilesystemStoreInner { /// returns early without writing. fn write_version( &self, inner_lock_ref: Arc>, dest_file_path: PathBuf, buf: Vec, - version: u64, + version: u64, preserve_mtime: bool, ) -> lightning::io::Result<()> { + let mtime = if preserve_mtime { + fs::metadata(&dest_file_path).ok().and_then(|m| m.modified().ok()) + } else { + None + }; let parent_directory = dest_file_path.parent().ok_or_else(|| { let msg = format!("Could not retrieve parent directory of {}.", dest_file_path.display()); @@ -281,6 +275,13 @@ impl FilesystemStoreInner { { let mut tmp_file = fs::File::create(&tmp_file_path)?; tmp_file.write_all(&buf)?; + + // If we need to preserve the original mtime (for updates), set it before fsync. + if let Some(mtime) = mtime { + let times = fs::FileTimes::new().set_modified(mtime); + tmp_file.set_times(times)?; + } + tmp_file.sync_all()?; } @@ -413,13 +414,15 @@ impl FilesystemStoreInner { }) } - fn list(&self, prefixed_dest: PathBuf) -> lightning::io::Result> { + fn list( + &self, prefixed_dest: PathBuf, retry_on_race: bool, + ) -> lightning::io::Result> { if !Path::new(&prefixed_dest).exists() { return Ok(Vec::new()); } let mut keys; - let mut retries = LIST_DIR_CONSISTENCY_RETRIES; + let mut retries = if retry_on_race { LIST_DIR_CONSISTENCY_RETRIES } else { 0 }; 'retry_list: loop { keys = Vec::new(); @@ -458,10 +461,68 @@ impl FilesystemStoreInner { } } -#[cfg(feature = "tokio")] -impl KVStore for FilesystemStore { - fn read( +impl FilesystemStoreState { + pub(crate) fn read_impl( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + use_empty_ns_dir: bool, + ) -> Result, lightning::io::Error> { + let path = self.inner.get_checked_dest_file_path( + primary_namespace, + secondary_namespace, + Some(key), + "read", + use_empty_ns_dir, + )?; + self.inner.read(path) + } + + pub(crate) fn write_impl( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + use_empty_ns_dir: bool, + ) -> Result<(), lightning::io::Error> { + let path = self.inner.get_checked_dest_file_path( + primary_namespace, + secondary_namespace, + Some(key), + "write", + use_empty_ns_dir, + )?; + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(path.clone()); + self.inner.write_version(inner_lock_ref, path, buf, version, use_empty_ns_dir) + } + + pub(crate) fn remove_impl( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + use_empty_ns_dir: bool, + ) -> Result<(), lightning::io::Error> { + let path = self.inner.get_checked_dest_file_path( + primary_namespace, + secondary_namespace, + Some(key), + "remove", + use_empty_ns_dir, + )?; + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(path.clone()); + self.inner.remove_version(inner_lock_ref, path, lazy, version) + } + + pub(crate) fn list_impl( + &self, primary_namespace: &str, secondary_namespace: &str, use_empty_ns_dir: bool, + ) -> Result, lightning::io::Error> { + let path = self.inner.get_checked_dest_file_path( + primary_namespace, + secondary_namespace, + None, + "list", + use_empty_ns_dir, + )?; + self.inner.list(path, !use_empty_ns_dir) + } + + #[cfg(feature = "tokio")] + pub(crate) fn read_async( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + use_empty_ns_dir: bool, ) -> impl Future, lightning::io::Error>> + 'static + Send { let this = Arc::clone(&self.inner); let path = this.get_checked_dest_file_path( @@ -469,6 +530,7 @@ impl KVStore for FilesystemStore { secondary_namespace, Some(key), "read", + use_empty_ns_dir, ); async move { @@ -482,12 +544,20 @@ impl KVStore for FilesystemStore { } } - fn write( + #[cfg(feature = "tokio")] + pub(crate) fn write_async( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + use_empty_ns_dir: bool, ) -> impl Future> + 'static + Send { let this = Arc::clone(&self.inner); let path = this - .get_checked_dest_file_path(primary_namespace, secondary_namespace, Some(key), "write") + .get_checked_dest_file_path( + primary_namespace, + secondary_namespace, + Some(key), + "write", + use_empty_ns_dir, + ) .map(|path| (self.get_new_version_and_lock_ref(path.clone()), path)); async move { @@ -496,19 +566,27 @@ impl KVStore for FilesystemStore { Err(e) => return Err(e), }; tokio::task::spawn_blocking(move || { - this.write_version(inner_lock_ref, path, buf, version) + this.write_version(inner_lock_ref, path, buf, version, use_empty_ns_dir) }) .await .unwrap_or_else(|e| Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, e))) } } - fn remove( + #[cfg(feature = "tokio")] + pub(crate) fn remove_async( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + use_empty_ns_dir: bool, ) -> impl Future> + 'static + Send { let this = Arc::clone(&self.inner); let path = this - .get_checked_dest_file_path(primary_namespace, secondary_namespace, Some(key), "remove") + .get_checked_dest_file_path( + primary_namespace, + secondary_namespace, + Some(key), + "remove", + use_empty_ns_dir, + ) .map(|path| (self.get_new_version_and_lock_ref(path.clone()), path)); async move { @@ -524,23 +602,127 @@ impl KVStore for FilesystemStore { } } - fn list( - &self, primary_namespace: &str, secondary_namespace: &str, + #[cfg(feature = "tokio")] + pub(crate) fn list_async( + &self, primary_namespace: &str, secondary_namespace: &str, use_empty_ns_dir: bool, ) -> impl Future, lightning::io::Error>> + 'static + Send { let this = Arc::clone(&self.inner); - let path = - this.get_checked_dest_file_path(primary_namespace, secondary_namespace, None, "list"); + let path = this.get_checked_dest_file_path( + primary_namespace, + secondary_namespace, + None, + "list", + use_empty_ns_dir, + ); async move { let path = match path { Ok(path) => path, Err(e) => return Err(e), }; - tokio::task::spawn_blocking(move || this.list(path)).await.unwrap_or_else(|e| { - Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, e)) - }) + tokio::task::spawn_blocking(move || this.list(path, !use_empty_ns_dir)) + .await + .unwrap_or_else(|e| { + Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, e)) + }) + } + } + + pub(crate) fn list_all_keys_impl( + &self, use_empty_ns_dir: bool, + ) -> Result, lightning::io::Error> { + let prefixed_dest = &self.inner.data_dir; + if !prefixed_dest.exists() { + return Ok(Vec::new()); + } + + // When use_empty_ns_dir is true (v2), namespace directories may be named + // [empty] to represent empty namespaces, so we resolve via namespace_from_dir_path. + // When false (v1), directory names are always valid kvstore strings. + let resolve_ns = |path: &Path, base: &Path| -> Result { + if use_empty_ns_dir { + namespace_from_dir_path(path) + } else { + get_key_from_dir_entry_path(path, base) + } + }; + + let mut keys = Vec::new(); + + 'primary_loop: for primary_entry in fs::read_dir(prefixed_dest)? { + let primary_entry = primary_entry?; + let primary_path = primary_entry.path(); + + if dir_entry_is_key(&primary_entry)? { + let primary_namespace = String::new(); + let secondary_namespace = String::new(); + let key = get_key_from_dir_entry_path(&primary_path, prefixed_dest)?; + keys.push((primary_namespace, secondary_namespace, key)); + continue 'primary_loop; + } + + // The primary_entry is actually also a directory. + 'secondary_loop: for secondary_entry in fs::read_dir(&primary_path)? { + let secondary_entry = secondary_entry?; + let secondary_path = secondary_entry.path(); + + if dir_entry_is_key(&secondary_entry)? { + let primary_namespace = resolve_ns(&primary_path, prefixed_dest)?; + let secondary_namespace = String::new(); + let key = get_key_from_dir_entry_path(&secondary_path, &primary_path)?; + keys.push((primary_namespace, secondary_namespace, key)); + continue 'secondary_loop; + } + + // The secondary_entry is actually also a directory. + for tertiary_entry in fs::read_dir(&secondary_path)? { + let tertiary_entry = tertiary_entry?; + let tertiary_path = tertiary_entry.path(); + + if dir_entry_is_key(&tertiary_entry)? { + let primary_namespace = resolve_ns(&primary_path, prefixed_dest)?; + let secondary_namespace = resolve_ns(&secondary_path, &primary_path)?; + let key = get_key_from_dir_entry_path(&tertiary_path, &secondary_path)?; + keys.push((primary_namespace, secondary_namespace, key)); + } else { + debug_assert!( + false, + "Failed to list keys of path {}: only two levels of namespaces are supported", + PrintableString(tertiary_path.to_str().unwrap_or_default()) + ); + let msg = format!( + "Failed to list keys of path {}: only two levels of namespaces are supported", + PrintableString(tertiary_path.to_str().unwrap_or_default()) + ); + return Err(lightning::io::Error::new( + lightning::io::ErrorKind::Other, + msg, + )); + } + } + } } + Ok(keys) + } +} + +/// Extracts a namespace string from a directory path, converting [`EMPTY_NAMESPACE_DIR`] to an +/// empty string. +fn namespace_from_dir_path(path: &Path) -> Result { + let name = path.file_name().and_then(|n| n.to_str()).ok_or_else(|| { + lightning::io::Error::new( + lightning::io::ErrorKind::Other, + format!( + "Failed to extract namespace from path {}", + PrintableString(path.to_str().unwrap_or_default()) + ), + ) + })?; + if name == EMPTY_NAMESPACE_DIR { + Ok(String::new()) + } else { + Ok(name.to_string()) } } @@ -584,7 +766,9 @@ fn dir_entry_is_key(dir_entry: &fs::DirEntry) -> Result Result { +pub(crate) fn get_key_from_dir_entry_path( + p: &Path, base_path: &Path, +) -> Result { match p.strip_prefix(&base_path) { Ok(stripped_path) => { if let Some(relative_path) = stripped_path.to_str() { @@ -631,325 +815,3 @@ fn get_key_from_dir_entry_path(p: &Path, base_path: &Path) -> Result Result, lightning::io::Error> { - let prefixed_dest = &self.inner.data_dir; - if !prefixed_dest.exists() { - return Ok(Vec::new()); - } - - let mut keys = Vec::new(); - - 'primary_loop: for primary_entry in fs::read_dir(prefixed_dest)? { - let primary_entry = primary_entry?; - let primary_path = primary_entry.path(); - - if dir_entry_is_key(&primary_entry)? { - let primary_namespace = String::new(); - let secondary_namespace = String::new(); - let key = get_key_from_dir_entry_path(&primary_path, prefixed_dest)?; - keys.push((primary_namespace, secondary_namespace, key)); - continue 'primary_loop; - } - - // The primary_entry is actually also a directory. - 'secondary_loop: for secondary_entry in fs::read_dir(&primary_path)? { - let secondary_entry = secondary_entry?; - let secondary_path = secondary_entry.path(); - - if dir_entry_is_key(&secondary_entry)? { - let primary_namespace = - get_key_from_dir_entry_path(&primary_path, prefixed_dest)?; - let secondary_namespace = String::new(); - let key = get_key_from_dir_entry_path(&secondary_path, &primary_path)?; - keys.push((primary_namespace, secondary_namespace, key)); - continue 'secondary_loop; - } - - // The secondary_entry is actually also a directory. - for tertiary_entry in fs::read_dir(&secondary_path)? { - let tertiary_entry = tertiary_entry?; - let tertiary_path = tertiary_entry.path(); - - if dir_entry_is_key(&tertiary_entry)? { - let primary_namespace = - get_key_from_dir_entry_path(&primary_path, prefixed_dest)?; - let secondary_namespace = - get_key_from_dir_entry_path(&secondary_path, &primary_path)?; - let key = get_key_from_dir_entry_path(&tertiary_path, &secondary_path)?; - keys.push((primary_namespace, secondary_namespace, key)); - } else { - debug_assert!( - false, - "Failed to list keys of path {}: only two levels of namespaces are supported", - PrintableString(tertiary_path.to_str().unwrap_or_default()) - ); - let msg = format!( - "Failed to list keys of path {}: only two levels of namespaces are supported", - PrintableString(tertiary_path.to_str().unwrap_or_default()) - ); - return Err(lightning::io::Error::new( - lightning::io::ErrorKind::Other, - msg, - )); - } - } - } - } - Ok(keys) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::test_utils::{ - do_read_write_remove_list_persist, do_test_data_migration, do_test_store, - }; - - use lightning::chain::chainmonitor::Persist; - use lightning::chain::ChannelMonitorUpdateStatus; - use lightning::events::ClosureReason; - use lightning::ln::functional_test_utils::*; - use lightning::ln::msgs::BaseMessageHandler; - use lightning::util::persist::read_channel_monitors; - use lightning::util::test_utils; - - impl Drop for FilesystemStore { - fn drop(&mut self) { - // We test for invalid directory names, so it's OK if directory removal - // fails. - match fs::remove_dir_all(&self.inner.data_dir) { - Err(e) => println!("Failed to remove test persister directory: {}", e), - _ => {}, - } - } - } - - #[test] - fn read_write_remove_list_persist() { - let mut temp_path = std::env::temp_dir(); - temp_path.push("test_read_write_remove_list_persist"); - let fs_store = FilesystemStore::new(temp_path); - do_read_write_remove_list_persist(&fs_store); - } - - #[cfg(feature = "tokio")] - #[tokio::test] - async fn read_write_remove_list_persist_async() { - use crate::fs_store::FilesystemStore; - use lightning::util::persist::KVStore; - use std::sync::Arc; - - let mut temp_path = std::env::temp_dir(); - temp_path.push("test_read_write_remove_list_persist_async"); - let fs_store = Arc::new(FilesystemStore::new(temp_path)); - assert_eq!(fs_store.state_size(), 0); - - let async_fs_store = Arc::clone(&fs_store); - - let data1 = vec![42u8; 32]; - let data2 = vec![43u8; 32]; - - let primary = "testspace"; - let secondary = "testsubspace"; - let key = "testkey"; - - // Test writing the same key twice with different data. Execute the asynchronous part out of order to ensure - // that eventual consistency works. - let fut1 = KVStore::write(&*async_fs_store, primary, secondary, key, data1); - assert_eq!(fs_store.state_size(), 1); - - let fut2 = KVStore::remove(&*async_fs_store, primary, secondary, key, false); - assert_eq!(fs_store.state_size(), 1); - - let fut3 = KVStore::write(&*async_fs_store, primary, secondary, key, data2.clone()); - assert_eq!(fs_store.state_size(), 1); - - fut3.await.unwrap(); - assert_eq!(fs_store.state_size(), 1); - - fut2.await.unwrap(); - assert_eq!(fs_store.state_size(), 1); - - fut1.await.unwrap(); - assert_eq!(fs_store.state_size(), 0); - - // Test list. - let listed_keys = KVStore::list(&*async_fs_store, primary, secondary).await.unwrap(); - assert_eq!(listed_keys.len(), 1); - assert_eq!(listed_keys[0], key); - - // Test read. We expect to read data2, as the write call was initiated later. - let read_data = KVStore::read(&*async_fs_store, primary, secondary, key).await.unwrap(); - assert_eq!(data2, &*read_data); - - // Test remove. - KVStore::remove(&*async_fs_store, primary, secondary, key, false).await.unwrap(); - - let listed_keys = KVStore::list(&*async_fs_store, primary, secondary).await.unwrap(); - assert_eq!(listed_keys.len(), 0); - } - - #[test] - fn test_data_migration() { - let mut source_temp_path = std::env::temp_dir(); - source_temp_path.push("test_data_migration_source"); - let mut source_store = FilesystemStore::new(source_temp_path); - - let mut target_temp_path = std::env::temp_dir(); - target_temp_path.push("test_data_migration_target"); - let mut target_store = FilesystemStore::new(target_temp_path); - - do_test_data_migration(&mut source_store, &mut target_store); - } - - #[test] - fn test_if_monitors_is_not_dir() { - let store = FilesystemStore::new("test_monitors_is_not_dir".into()); - - fs::create_dir_all(&store.get_data_dir()).unwrap(); - let mut path = std::path::PathBuf::from(&store.get_data_dir()); - path.push("monitors"); - fs::File::create(path).unwrap(); - - let chanmon_cfgs = create_chanmon_cfgs(1); - let mut node_cfgs = create_node_cfgs(1, &chanmon_cfgs); - let chain_mon_0 = test_utils::TestChainMonitor::new( - Some(&chanmon_cfgs[0].chain_source), - &chanmon_cfgs[0].tx_broadcaster, - &chanmon_cfgs[0].logger, - &chanmon_cfgs[0].fee_estimator, - &store, - node_cfgs[0].keys_manager, - ); - node_cfgs[0].chain_monitor = chain_mon_0; - let node_chanmgrs = create_node_chanmgrs(1, &node_cfgs, &[None]); - let nodes = create_network(1, &node_cfgs, &node_chanmgrs); - - // Check that read_channel_monitors() returns error if monitors/ is not a - // directory. - assert!( - read_channel_monitors(&store, nodes[0].keys_manager, nodes[0].keys_manager).is_err() - ); - } - - #[test] - fn test_filesystem_store() { - // Create the nodes, giving them FilesystemStores for data stores. - let store_0 = FilesystemStore::new("test_filesystem_store_0".into()); - let store_1 = FilesystemStore::new("test_filesystem_store_1".into()); - do_test_store(&store_0, &store_1) - } - - // Test that if the store's path to channel data is read-only, writing a - // monitor to it results in the store returning an UnrecoverableError. - // Windows ignores the read-only flag for folders, so this test is Unix-only. - #[cfg(not(target_os = "windows"))] - #[test] - fn test_readonly_dir_perm_failure() { - let store = FilesystemStore::new("test_readonly_dir_perm_failure".into()); - fs::create_dir_all(&store.get_data_dir()).unwrap(); - - // Set up a dummy channel and force close. This will produce a monitor - // that we can then use to test persistence. - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - - let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - - let message = "Channel force-closed".to_owned(); - nodes[1] - .node - .force_close_broadcasting_latest_txn(&chan.2, &node_a_id, message.clone()) - .unwrap(); - let reason = - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); - let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); - - // Set the store's directory to read-only, which should result in - // returning an unrecoverable failure when we then attempt to persist a - // channel update. - let path = &store.get_data_dir(); - let mut perms = fs::metadata(path).unwrap().permissions(); - perms.set_readonly(true); - fs::set_permissions(path, perms).unwrap(); - - let monitor_name = added_monitors[0].1.persistence_key(); - match store.persist_new_channel(monitor_name, &added_monitors[0].1) { - ChannelMonitorUpdateStatus::UnrecoverableError => {}, - _ => panic!("unexpected result from persisting new channel"), - } - - nodes[1].node.get_and_clear_pending_msg_events(); - added_monitors.clear(); - } - - // Test that if a store's directory name is invalid, monitor persistence - // will fail. - #[cfg(target_os = "windows")] - #[test] - fn test_fail_on_open() { - // Set up a dummy channel and force close. This will produce a monitor - // that we can then use to test persistence. - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - - let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - - let message = "Channel force-closed".to_owned(); - nodes[1] - .node - .force_close_broadcasting_latest_txn(&chan.2, &node_a_id, message.clone()) - .unwrap(); - let reason = - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); - let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); - let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap(); - let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap(); - - // Create the store with an invalid directory name and test that the - // channel fails to open because the directories fail to be created. There - // don't seem to be invalid filename characters on Unix that Rust doesn't - // handle, hence why the test is Windows-only. - let store = FilesystemStore::new(":<>/".into()); - - let monitor_name = added_monitors[0].1.persistence_key(); - match store.persist_new_channel(monitor_name, &added_monitors[0].1) { - ChannelMonitorUpdateStatus::UnrecoverableError => {}, - _ => panic!("unexpected result from persisting new channel"), - } - - nodes[1].node.get_and_clear_pending_msg_events(); - added_monitors.clear(); - } -} - -#[cfg(ldk_bench)] -/// Benches -pub mod bench { - use criterion::Criterion; - - /// Bench! - pub fn bench_sends(bench: &mut Criterion) { - let store_a = super::FilesystemStore::new("bench_filesystem_store_a".into()); - let store_b = super::FilesystemStore::new("bench_filesystem_store_b".into()); - lightning::ln::channelmanager::bench::bench_two_sends( - bench, - "bench_filesystem_persisted_sends", - store_a, - store_b, - ); - } -} diff --git a/lightning-persister/src/fs_store/mod.rs b/lightning-persister/src/fs_store/mod.rs new file mode 100644 index 00000000000..5fe7f6542ce --- /dev/null +++ b/lightning-persister/src/fs_store/mod.rs @@ -0,0 +1,6 @@ +//! Implementations of filesystem-backed key-value stores. + +pub mod v1; +pub mod v2; + +pub(crate) mod common; diff --git a/lightning-persister/src/fs_store/v1.rs b/lightning-persister/src/fs_store/v1.rs new file mode 100644 index 00000000000..776aba630c4 --- /dev/null +++ b/lightning-persister/src/fs_store/v1.rs @@ -0,0 +1,349 @@ +//! Objects related to [`FilesystemStore`] live here. +use crate::fs_store::common::FilesystemStoreState; + +use lightning::util::persist::{KVStoreSync, MigratableKVStore}; + +use std::path::PathBuf; + +#[cfg(feature = "tokio")] +use core::future::Future; +#[cfg(feature = "tokio")] +use lightning::util::persist::KVStore; + +/// A [`KVStore`] and [`KVStoreSync`] implementation that writes to and reads from the file system. +/// +/// [`KVStore`]: lightning::util::persist::KVStore +pub struct FilesystemStore { + state: FilesystemStoreState, +} + +impl FilesystemStore { + /// Constructs a new [`FilesystemStore`]. + pub fn new(data_dir: PathBuf) -> Self { + Self { state: FilesystemStoreState::new(data_dir) } + } + + /// Returns the data directory. + pub fn get_data_dir(&self) -> PathBuf { + self.state.get_data_dir() + } + + #[cfg(any(all(feature = "tokio", test), fuzzing))] + /// Returns the size of the async state. + pub fn state_size(&self) -> usize { + self.state.state_size() + } +} + +impl KVStoreSync for FilesystemStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> Result, lightning::io::Error> { + self.state.read_impl(primary_namespace, secondary_namespace, key, false) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> Result<(), lightning::io::Error> { + self.state.write_impl(primary_namespace, secondary_namespace, key, buf, false) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> Result<(), lightning::io::Error> { + self.state.remove_impl(primary_namespace, secondary_namespace, key, lazy, false) + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> Result, lightning::io::Error> { + self.state.list_impl(primary_namespace, secondary_namespace, false) + } +} + +#[cfg(feature = "tokio")] +impl KVStore for FilesystemStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> impl Future, lightning::io::Error>> + 'static + Send { + self.state.read_async(primary_namespace, secondary_namespace, key, false) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> impl Future> + 'static + Send { + self.state.write_async(primary_namespace, secondary_namespace, key, buf, false) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> impl Future> + 'static + Send { + self.state.remove_async(primary_namespace, secondary_namespace, key, lazy, false) + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> impl Future, lightning::io::Error>> + 'static + Send { + self.state.list_async(primary_namespace, secondary_namespace, false) + } +} + +impl MigratableKVStore for FilesystemStore { + fn list_all_keys(&self) -> Result, lightning::io::Error> { + self.state.list_all_keys_impl(false) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{ + do_read_write_remove_list_persist, do_test_data_migration, do_test_store, + }; + + use lightning::chain::chainmonitor::Persist; + use lightning::chain::ChannelMonitorUpdateStatus; + use lightning::events::ClosureReason; + use lightning::ln::functional_test_utils::*; + use lightning::ln::msgs::BaseMessageHandler; + use lightning::util::persist::read_channel_monitors; + use lightning::util::test_utils; + + use std::fs; + + impl Drop for FilesystemStore { + fn drop(&mut self) { + // We test for invalid directory names, so it's OK if directory removal + // fails. + match fs::remove_dir_all(&self.get_data_dir()) { + Err(e) => println!("Failed to remove test persister directory: {}", e), + _ => {}, + } + } + } + + #[test] + fn read_write_remove_list_persist() { + let mut temp_path = std::env::temp_dir(); + temp_path.push("test_read_write_remove_list_persist"); + let fs_store = FilesystemStore::new(temp_path); + do_read_write_remove_list_persist(&fs_store); + } + + #[cfg(feature = "tokio")] + #[tokio::test] + async fn read_write_remove_list_persist_async() { + use lightning::util::persist::KVStore; + use std::sync::Arc; + + let mut temp_path = std::env::temp_dir(); + temp_path.push("test_read_write_remove_list_persist_async"); + let fs_store = Arc::new(FilesystemStore::new(temp_path)); + assert_eq!(fs_store.state_size(), 0); + + let async_fs_store = Arc::clone(&fs_store); + + let data1 = vec![42u8; 32]; + let data2 = vec![43u8; 32]; + + let primary = "testspace"; + let secondary = "testsubspace"; + let key = "testkey"; + + // Test writing the same key twice with different data. Execute the asynchronous part out of order to ensure + // that eventual consistency works. + let fut1 = KVStore::write(&*async_fs_store, primary, secondary, key, data1); + assert_eq!(fs_store.state_size(), 1); + + let fut2 = KVStore::remove(&*async_fs_store, primary, secondary, key, false); + assert_eq!(fs_store.state_size(), 1); + + let fut3 = KVStore::write(&*async_fs_store, primary, secondary, key, data2.clone()); + assert_eq!(fs_store.state_size(), 1); + + fut3.await.unwrap(); + assert_eq!(fs_store.state_size(), 1); + + fut2.await.unwrap(); + assert_eq!(fs_store.state_size(), 1); + + fut1.await.unwrap(); + assert_eq!(fs_store.state_size(), 0); + + // Test list. + let listed_keys = KVStore::list(&*async_fs_store, primary, secondary).await.unwrap(); + assert_eq!(listed_keys.len(), 1); + assert_eq!(listed_keys[0], key); + + // Test read. We expect to read data2, as the write call was initiated later. + let read_data = KVStore::read(&*async_fs_store, primary, secondary, key).await.unwrap(); + assert_eq!(data2, &*read_data); + + // Test remove. + KVStore::remove(&*async_fs_store, primary, secondary, key, false).await.unwrap(); + + let listed_keys = KVStore::list(&*async_fs_store, primary, secondary).await.unwrap(); + assert_eq!(listed_keys.len(), 0); + } + + #[test] + fn test_data_migration() { + let mut source_temp_path = std::env::temp_dir(); + source_temp_path.push("test_data_migration_source"); + let mut source_store = FilesystemStore::new(source_temp_path); + + let mut target_temp_path = std::env::temp_dir(); + target_temp_path.push("test_data_migration_target"); + let mut target_store = FilesystemStore::new(target_temp_path); + + do_test_data_migration(&mut source_store, &mut target_store); + } + + #[test] + fn test_if_monitors_is_not_dir() { + let store = FilesystemStore::new("test_monitors_is_not_dir".into()); + + fs::create_dir_all(&store.get_data_dir()).unwrap(); + let mut path = std::path::PathBuf::from(&store.get_data_dir()); + path.push("monitors"); + fs::File::create(path).unwrap(); + + let chanmon_cfgs = create_chanmon_cfgs(1); + let mut node_cfgs = create_node_cfgs(1, &chanmon_cfgs); + let chain_mon_0 = test_utils::TestChainMonitor::new( + Some(&chanmon_cfgs[0].chain_source), + &chanmon_cfgs[0].tx_broadcaster, + &chanmon_cfgs[0].logger, + &chanmon_cfgs[0].fee_estimator, + &store, + node_cfgs[0].keys_manager, + ); + node_cfgs[0].chain_monitor = chain_mon_0; + let node_chanmgrs = create_node_chanmgrs(1, &node_cfgs, &[None]); + let nodes = create_network(1, &node_cfgs, &node_chanmgrs); + + // Check that read_channel_monitors() returns error if monitors/ is not a + // directory. + assert!( + read_channel_monitors(&store, nodes[0].keys_manager, nodes[0].keys_manager).is_err() + ); + } + + #[test] + fn test_filesystem_store() { + // Create the nodes, giving them FilesystemStores for data stores. + let store_0 = FilesystemStore::new("test_filesystem_store_0".into()); + let store_1 = FilesystemStore::new("test_filesystem_store_1".into()); + do_test_store(&store_0, &store_1) + } + + // Test that if the store's path to channel data is read-only, writing a + // monitor to it results in the store returning an UnrecoverableError. + // Windows ignores the read-only flag for folders, so this test is Unix-only. + #[cfg(not(target_os = "windows"))] + #[test] + fn test_readonly_dir_perm_failure() { + let store = FilesystemStore::new("test_readonly_dir_perm_failure".into()); + fs::create_dir_all(&store.get_data_dir()).unwrap(); + + // Set up a dummy channel and force close. This will produce a monitor + // that we can then use to test persistence. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + + let message = "Channel force-closed".to_owned(); + nodes[1] + .node + .force_close_broadcasting_latest_txn(&chan.2, &node_a_id, message.clone()) + .unwrap(); + let reason = + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; + check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); + let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); + + // Set the store's directory to read-only, which should result in + // returning an unrecoverable failure when we then attempt to persist a + // channel update. + let path = &store.get_data_dir(); + let mut perms = fs::metadata(path).unwrap().permissions(); + perms.set_readonly(true); + fs::set_permissions(path, perms).unwrap(); + + let monitor_name = added_monitors[0].1.persistence_key(); + match store.persist_new_channel(monitor_name, &added_monitors[0].1) { + ChannelMonitorUpdateStatus::UnrecoverableError => {}, + _ => panic!("unexpected result from persisting new channel"), + } + + nodes[1].node.get_and_clear_pending_msg_events(); + added_monitors.clear(); + } + + // Test that if a store's directory name is invalid, monitor persistence + // will fail. + #[cfg(target_os = "windows")] + #[test] + fn test_fail_on_open() { + // Set up a dummy channel and force close. This will produce a monitor + // that we can then use to test persistence. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + + let message = "Channel force-closed".to_owned(); + nodes[1] + .node + .force_close_broadcasting_latest_txn(&chan.2, &node_a_id, message.clone()) + .unwrap(); + let reason = + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; + check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); + let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); + let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap(); + let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap(); + + // Create the store with an invalid directory name and test that the + // channel fails to open because the directories fail to be created. There + // don't seem to be invalid filename characters on Unix that Rust doesn't + // handle, hence why the test is Windows-only. + let store = FilesystemStore::new(":<>/".into()); + + let monitor_name = added_monitors[0].1.persistence_key(); + match store.persist_new_channel(monitor_name, &added_monitors[0].1) { + ChannelMonitorUpdateStatus::UnrecoverableError => {}, + _ => panic!("unexpected result from persisting new channel"), + } + + nodes[1].node.get_and_clear_pending_msg_events(); + added_monitors.clear(); + } +} + +#[cfg(ldk_bench)] +/// Benches +pub mod bench { + use criterion::Criterion; + + /// Bench! + pub fn bench_sends(bench: &mut Criterion) { + let store_a = super::FilesystemStore::new("bench_filesystem_store_a".into()); + let store_b = super::FilesystemStore::new("bench_filesystem_store_b".into()); + lightning::ln::channelmanager::bench::bench_two_sends( + bench, + "bench_filesystem_persisted_sends", + store_a, + store_b, + ); + } +} diff --git a/lightning-persister/src/fs_store/v2.rs b/lightning-persister/src/fs_store/v2.rs new file mode 100644 index 00000000000..b167541830e --- /dev/null +++ b/lightning-persister/src/fs_store/v2.rs @@ -0,0 +1,655 @@ +//! Objects related to [`FilesystemStoreV2`] live here. +use crate::fs_store::common::{get_key_from_dir_entry_path, FilesystemStoreState}; + +use lightning::util::persist::{ + KVStoreSync, MigratableKVStore, PageToken, PaginatedKVStoreSync, PaginatedListResponse, +}; + +use std::fs; +use std::path::PathBuf; +use std::time::UNIX_EPOCH; + +#[cfg(feature = "tokio")] +use core::future::Future; +#[cfg(feature = "tokio")] +use lightning::util::persist::{KVStore, PaginatedKVStore}; +use std::sync::Arc; + +/// A [`KVStore`] and [`KVStoreSync`] implementation that writes to and reads from the file system. +/// +/// This is version 2 of the filesystem store which provides: +/// - Consistent directory structure using `[empty]` for empty namespaces +/// - File modification times for creation-order pagination +/// - Support for [`PaginatedKVStoreSync`] with newest-first ordering +/// +/// ## Directory Structure +/// +/// Files are stored with a consistent two-level namespace hierarchy: +/// ```text +/// data_dir/ +/// [empty]/ # empty primary namespace +/// [empty]/ # empty secondary namespace +/// {key} +/// primary_ns/ +/// [empty]/ # empty secondary namespace +/// {key} +/// secondary_ns/ +/// {key} +/// ``` +/// +/// ## File Ordering +/// +/// Files are ordered by their modification time (mtime). When a file is created, it gets +/// the current time. When updated, the original creation time is preserved by setting +/// the mtime of the new file to match the original before the atomic rename. +/// +/// [`KVStore`]: lightning::util::persist::KVStore +pub struct FilesystemStoreV2 { + inner: Arc, +} + +impl FilesystemStoreV2 { + /// Constructs a new [`FilesystemStoreV2`]. + /// + /// Returns an error if the data directory already exists and contains files at the top level, + /// which would indicate it was previously used by a [`FilesystemStore`] (v1). The v2 store + /// expects only directories (namespaces) at the top level. + /// + /// [`FilesystemStore`]: crate::fs_store::FilesystemStore + pub fn new(data_dir: PathBuf) -> std::io::Result { + if data_dir.exists() { + for entry in fs::read_dir(&data_dir)? { + let entry = entry?; + if entry.file_type()?.is_file() { + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!( + "Found file `{}` in the top-level data directory. \ + This indicates the directory was previously used by FilesystemStore (v1). \ + Please migrate your data or use a different directory.", + entry.path().display() + ), + )); + } + } + } + + Ok(Self { inner: Arc::new(FilesystemStoreState::new(data_dir)) }) + } + + /// Returns the data directory. + pub fn get_data_dir(&self) -> PathBuf { + self.inner.get_data_dir() + } + + #[cfg(any(all(feature = "tokio", test), fuzzing))] + /// Returns the size of the async state. + pub fn state_size(&self) -> usize { + self.inner.state_size() + } +} + +/// The fixed page size for paginated listing operations. +pub(crate) const PAGE_SIZE: usize = 50; + +/// The length of the timestamp in a page token (milliseconds since epoch as 16-digit decimal). +const PAGE_TOKEN_TIMESTAMP_LEN: usize = 16; + +impl FilesystemStoreState { + fn list_paginated_impl( + &self, prefixed_dest: PathBuf, page_token: Option, + ) -> Result { + if !prefixed_dest.exists() { + return Ok(PaginatedListResponse { keys: Vec::new(), next_page_token: None }); + } + + // Collect all entries with their modification times + let mut entries: Vec<(u64, String)> = Vec::new(); + for dir_entry in fs::read_dir(&prefixed_dest)? { + let dir_entry = dir_entry?; + + let key = get_key_from_dir_entry_path(&dir_entry.path(), prefixed_dest.as_path())?; + // Get modification time as millis since epoch + let mtime_millis = dir_entry + .metadata() + .ok() + .and_then(|m| m.modified().ok()) + .and_then(|t| t.duration_since(UNIX_EPOCH).ok()) + .map(|d| d.as_millis() as u64) + .unwrap_or(0); + + entries.push((mtime_millis, key)); + } + + // Sort by mtime descending (newest first), then by key descending for same mtime + entries.sort_by(|a, b| b.0.cmp(&a.0).then_with(|| b.1.cmp(&a.1))); + + // Find starting position based on page token + let start_idx = if let Some(token) = page_token { + let (token_mtime, token_key) = parse_page_token(token.as_str())?; + + // Find entries that come after the token (older entries = lower mtime) + // or same mtime but lexicographically smaller key (since we sort descending) + entries + .iter() + .position(|(mtime, key)| { + *mtime < token_mtime + || (*mtime == token_mtime && key.as_str() < token_key.as_str()) + }) + .unwrap_or(entries.len()) + } else { + 0 + }; + + // Take PAGE_SIZE entries starting from start_idx + let page_entries: Vec<_> = + entries.iter().skip(start_idx).take(PAGE_SIZE).cloned().collect(); + + let keys: Vec = page_entries.iter().map(|(_, key)| key.clone()).collect(); + + // Determine next page token + let next_page_token = if start_idx + PAGE_SIZE < entries.len() { + page_entries.last().map(|(mtime, key)| PageToken::new(format_page_token(*mtime, key))) + } else { + None + }; + + Ok(PaginatedListResponse { keys, next_page_token }) + } +} + +impl KVStoreSync for FilesystemStoreV2 { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> Result, lightning::io::Error> { + self.inner.read_impl(primary_namespace, secondary_namespace, key, true) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> Result<(), lightning::io::Error> { + self.inner.write_impl(primary_namespace, secondary_namespace, key, buf, true) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> Result<(), lightning::io::Error> { + self.inner.remove_impl(primary_namespace, secondary_namespace, key, lazy, true) + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> Result, lightning::io::Error> { + self.inner.list_impl(primary_namespace, secondary_namespace, true) + } +} + +impl PaginatedKVStoreSync for FilesystemStoreV2 { + fn list_paginated( + &self, primary_namespace: &str, secondary_namespace: &str, page_token: Option, + ) -> Result { + let prefixed_dest = self.inner.get_checked_dest_file_path( + primary_namespace, + secondary_namespace, + None, + "list_paginated", + true, + )?; + self.inner.list_paginated_impl(prefixed_dest, page_token) + } +} + +#[cfg(feature = "tokio")] +impl KVStore for FilesystemStoreV2 { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> impl Future, lightning::io::Error>> + 'static + Send { + self.inner.read_async(primary_namespace, secondary_namespace, key, true) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> impl Future> + 'static + Send { + self.inner.write_async(primary_namespace, secondary_namespace, key, buf, true) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> impl Future> + 'static + Send { + self.inner.remove_async(primary_namespace, secondary_namespace, key, lazy, true) + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> impl Future, lightning::io::Error>> + 'static + Send { + self.inner.list_async(primary_namespace, secondary_namespace, true) + } +} + +#[cfg(feature = "tokio")] +impl PaginatedKVStore for FilesystemStoreV2 { + fn list_paginated( + &self, primary_namespace: &str, secondary_namespace: &str, page_token: Option, + ) -> impl Future> + 'static + Send + { + let this = Arc::clone(&self.inner); + + let path = this.get_checked_dest_file_path( + primary_namespace, + secondary_namespace, + None, + "list_paginated", + true, + ); + + async move { + let path = match path { + Ok(path) => path, + Err(e) => return Err(e), + }; + tokio::task::spawn_blocking(move || this.list_paginated_impl(path, page_token)) + .await + .unwrap_or_else(|e| { + Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, e)) + }) + } + } +} + +impl MigratableKVStore for FilesystemStoreV2 { + fn list_all_keys(&self) -> Result, lightning::io::Error> { + self.inner.list_all_keys_impl(true) + } +} + +/// Formats a page token from mtime (millis since epoch) and key. +pub(crate) fn format_page_token(mtime_millis: u64, key: &str) -> String { + format!("{mtime_millis:016}:{key}") +} + +/// Parses a page token into mtime (millis since epoch) and key. +pub(crate) fn parse_page_token(token: &str) -> lightning::io::Result<(u64, String)> { + let colon_pos = token.find(':').ok_or_else(|| { + lightning::io::Error::new( + lightning::io::ErrorKind::InvalidInput, + "Invalid page token format", + ) + })?; + + if colon_pos != PAGE_TOKEN_TIMESTAMP_LEN { + return Err(lightning::io::Error::new( + lightning::io::ErrorKind::InvalidInput, + "Invalid page token format", + )); + } + + let mtime = token[..colon_pos].parse::().map_err(|_| { + lightning::io::Error::new( + lightning::io::ErrorKind::InvalidInput, + "Invalid page token timestamp", + ) + })?; + + let key = token[colon_pos + 1..].to_string(); + + Ok((mtime, key)) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::fs_store::common::EMPTY_NAMESPACE_DIR; + use crate::test_utils::{ + do_read_write_remove_list_persist, do_test_data_migration, do_test_store, + }; + use std::fs::FileTimes; + use std::time::UNIX_EPOCH; + + impl Drop for FilesystemStoreV2 { + fn drop(&mut self) { + // We test for invalid directory names, so it's OK if directory removal + // fails. + match fs::remove_dir_all(&self.inner.get_data_dir()) { + Err(e) => println!("Failed to remove test persister directory: {}", e), + _ => {}, + } + } + } + + #[test] + fn read_write_remove_list_persist() { + let mut temp_path = std::env::temp_dir(); + temp_path.push("test_read_write_remove_list_persist_v2"); + let fs_store = FilesystemStoreV2::new(temp_path).unwrap(); + do_read_write_remove_list_persist(&fs_store); + } + + #[cfg(feature = "tokio")] + #[tokio::test] + async fn read_write_remove_list_persist_async() { + use lightning::util::persist::KVStore; + use std::sync::Arc; + + let mut temp_path = std::env::temp_dir(); + temp_path.push("test_read_write_remove_list_persist_async_v2"); + let fs_store = Arc::new(FilesystemStoreV2::new(temp_path).unwrap()); + assert_eq!(fs_store.state_size(), 0); + + let async_fs_store = Arc::clone(&fs_store); + + let data1 = vec![42u8; 32]; + let data2 = vec![43u8; 32]; + + let primary = "testspace"; + let secondary = "testsubspace"; + let key = "testkey"; + + // Test writing the same key twice with different data. Execute the asynchronous part out of order to ensure + // that eventual consistency works. + let fut1 = KVStore::write(&*async_fs_store, primary, secondary, key, data1); + assert_eq!(fs_store.state_size(), 1); + + let fut2 = KVStore::remove(&*async_fs_store, primary, secondary, key, false); + assert_eq!(fs_store.state_size(), 1); + + let fut3 = KVStore::write(&*async_fs_store, primary, secondary, key, data2.clone()); + assert_eq!(fs_store.state_size(), 1); + + fut3.await.unwrap(); + assert_eq!(fs_store.state_size(), 1); + + fut2.await.unwrap(); + assert_eq!(fs_store.state_size(), 1); + + fut1.await.unwrap(); + assert_eq!(fs_store.state_size(), 0); + + // Test list. + let listed_keys = KVStore::list(&*async_fs_store, primary, secondary).await.unwrap(); + assert_eq!(listed_keys.len(), 1); + assert_eq!(listed_keys[0], key); + + // Test read. We expect to read data2, as the write call was initiated later. + let read_data = KVStore::read(&*async_fs_store, primary, secondary, key).await.unwrap(); + assert_eq!(data2, &*read_data); + + // Test remove. + KVStore::remove(&*async_fs_store, primary, secondary, key, false).await.unwrap(); + + let listed_keys = KVStore::list(&*async_fs_store, primary, secondary).await.unwrap(); + assert_eq!(listed_keys.len(), 0); + } + + #[test] + fn test_data_migration() { + let mut source_temp_path = std::env::temp_dir(); + source_temp_path.push("test_data_migration_source_v2"); + let mut source_store = FilesystemStoreV2::new(source_temp_path).unwrap(); + + let mut target_temp_path = std::env::temp_dir(); + target_temp_path.push("test_data_migration_target_v2"); + let mut target_store = FilesystemStoreV2::new(target_temp_path).unwrap(); + + do_test_data_migration(&mut source_store, &mut target_store); + } + + #[test] + fn test_filesystem_store_v2() { + // Create the nodes, giving them FilesystemStoreV2s for data stores. + let store_0 = FilesystemStoreV2::new("test_filesystem_store_v2_0".into()).unwrap(); + let store_1 = FilesystemStoreV2::new("test_filesystem_store_v2_1".into()).unwrap(); + do_test_store(&store_0, &store_1) + } + + #[test] + fn test_page_token_format() { + let mtime: u64 = 1706500000000; + let key = "test_key"; + let token = format_page_token(mtime, key); + assert_eq!(token, "0001706500000000:test_key"); + + let parsed = parse_page_token(&token).unwrap(); + assert_eq!(parsed, (mtime, key.to_string())); + + // Test invalid tokens + assert!(parse_page_token("invalid").is_err()); + assert!(parse_page_token("0001706500000000_key").is_err()); // wrong separator + } + + #[test] + fn test_directory_structure() { + use lightning::util::persist::KVStoreSync; + + let mut temp_path = std::env::temp_dir(); + temp_path.push("test_directory_structure_v2"); + let fs_store = FilesystemStoreV2::new(temp_path.clone()).unwrap(); + + let data = vec![42u8; 32]; + + // Write with empty namespaces + KVStoreSync::write(&fs_store, "", "", "key1", data.clone()).unwrap(); + assert!(temp_path.join(EMPTY_NAMESPACE_DIR).join(EMPTY_NAMESPACE_DIR).exists()); + + // Write with non-empty primary, empty secondary + KVStoreSync::write(&fs_store, "primary", "", "key2", data.clone()).unwrap(); + assert!(temp_path.join("primary").join(EMPTY_NAMESPACE_DIR).exists()); + + // Write with both non-empty + KVStoreSync::write(&fs_store, "primary", "secondary", "key3", data.clone()).unwrap(); + assert!(temp_path.join("primary").join("secondary").exists()); + + // Verify we can read them back + assert_eq!(KVStoreSync::read(&fs_store, "", "", "key1").unwrap(), data); + assert_eq!(KVStoreSync::read(&fs_store, "primary", "", "key2").unwrap(), data); + assert_eq!(KVStoreSync::read(&fs_store, "primary", "secondary", "key3").unwrap(), data); + + // Verify files are named just by key (no timestamp prefix) + assert!(temp_path + .join(EMPTY_NAMESPACE_DIR) + .join(EMPTY_NAMESPACE_DIR) + .join("key1") + .exists()); + assert!(temp_path.join("primary").join(EMPTY_NAMESPACE_DIR).join("key2").exists()); + assert!(temp_path.join("primary").join("secondary").join("key3").exists()); + } + + #[test] + fn test_update_preserves_mtime() { + use lightning::util::persist::KVStoreSync; + + let mut temp_path = std::env::temp_dir(); + temp_path.push("test_update_preserves_mtime_v2"); + let fs_store = FilesystemStoreV2::new(temp_path.clone()).unwrap(); + + let data1 = vec![42u8; 32]; + let data2 = vec![43u8; 32]; + + // Write initial data + KVStoreSync::write(&fs_store, "ns", "sub", "key", data1).unwrap(); + + // Get the original mtime + let file_path = temp_path.join("ns").join("sub").join("key"); + let original_mtime = fs::metadata(&file_path).unwrap().modified().unwrap(); + + // Sleep briefly to ensure different timestamp if not preserved + std::thread::sleep(std::time::Duration::from_millis(50)); + + // Update with new data + KVStoreSync::write(&fs_store, "ns", "sub", "key", data2.clone()).unwrap(); + + // Verify mtime is preserved + let updated_mtime = fs::metadata(&file_path).unwrap().modified().unwrap(); + assert_eq!(original_mtime, updated_mtime); + + // Verify data was updated + assert_eq!(KVStoreSync::read(&fs_store, "ns", "sub", "key").unwrap(), data2); + } + + #[test] + fn test_paginated_listing() { + use lightning::util::persist::{KVStoreSync, PaginatedKVStoreSync}; + + let mut temp_path = std::env::temp_dir(); + temp_path.push("test_paginated_listing_v2"); + let fs_store = FilesystemStoreV2::new(temp_path).unwrap(); + + let data = vec![42u8; 32]; + + // Write several keys with small delays to ensure different mtimes + let keys: Vec = (0..5).map(|i| format!("key{}", i)).collect(); + for key in &keys { + KVStoreSync::write(&fs_store, "ns", "sub", key, data.clone()).unwrap(); + std::thread::sleep(std::time::Duration::from_millis(10)); + } + + // List paginated - should return newest first + let response = PaginatedKVStoreSync::list_paginated(&fs_store, "ns", "sub", None).unwrap(); + assert_eq!(response.keys.len(), 5); + // Newest key (key4) should be first + assert_eq!(response.keys[0], "key4"); + assert_eq!(response.keys[4], "key0"); + assert!(response.next_page_token.is_none()); // Less than PAGE_SIZE items + } + + #[test] + fn test_paginated_listing_with_pagination() { + use lightning::util::persist::{KVStoreSync, PaginatedKVStoreSync}; + + let mut temp_path = std::env::temp_dir(); + temp_path.push("test_paginated_listing_with_pagination_v2"); + let fs_store = FilesystemStoreV2::new(temp_path).unwrap(); + + let data = vec![42u8; 32]; + + // Write more than PAGE_SIZE keys + let num_keys = PAGE_SIZE + 50; + for i in 0..num_keys { + let key = format!("key{:04}", i); + KVStoreSync::write(&fs_store, "ns", "sub", &key, data.clone()).unwrap(); + // Small delay to ensure ordering + if i % 10 == 0 { + std::thread::sleep(std::time::Duration::from_millis(1)); + } + } + + // First page + let response1 = PaginatedKVStoreSync::list_paginated(&fs_store, "ns", "sub", None).unwrap(); + assert_eq!(response1.keys.len(), PAGE_SIZE); + assert!(response1.next_page_token.is_some()); + + // Second page + let response2 = + PaginatedKVStoreSync::list_paginated(&fs_store, "ns", "sub", response1.next_page_token) + .unwrap(); + assert_eq!(response2.keys.len(), 50); + assert!(response2.next_page_token.is_none()); + + // Verify no duplicates between pages + let all_keys: std::collections::HashSet<_> = + response1.keys.iter().chain(response2.keys.iter()).collect(); + assert_eq!(all_keys.len(), num_keys); + } + + #[test] + fn test_page_token_after_deletion() { + use lightning::util::persist::{KVStoreSync, PaginatedKVStoreSync}; + + let mut temp_path = std::env::temp_dir(); + temp_path.push("test_page_token_after_deletion_v2"); + let fs_store = FilesystemStoreV2::new(temp_path).unwrap(); + + let data = vec![42u8; 32]; + + // Write keys + for i in 0..10 { + let key = format!("key{}", i); + KVStoreSync::write(&fs_store, "ns", "sub", &key, data.clone()).unwrap(); + std::thread::sleep(std::time::Duration::from_millis(10)); + } + + // Verify initial listing + let response1 = PaginatedKVStoreSync::list_paginated(&fs_store, "ns", "sub", None).unwrap(); + assert_eq!(response1.keys.len(), 10); + + // Delete some keys + KVStoreSync::remove(&fs_store, "ns", "sub", "key5", false).unwrap(); + KVStoreSync::remove(&fs_store, "ns", "sub", "key3", false).unwrap(); + + // List again - should work fine with deleted keys + let response2 = PaginatedKVStoreSync::list_paginated(&fs_store, "ns", "sub", None).unwrap(); + assert_eq!(response2.keys.len(), 8); // 10 - 2 deleted + } + + #[test] + fn test_same_mtime_sorted_by_key() { + use lightning::util::persist::PaginatedKVStoreSync; + use std::time::Duration; + + // Create files directly on disk first with the same mtime + let mut temp_path = std::env::temp_dir(); + temp_path.push("test_same_mtime_sorted_by_key_v2"); + let _ = fs::remove_dir_all(&temp_path); + + let data = vec![42u8; 32]; + let dir = temp_path.join("ns").join("sub"); + fs::create_dir_all(&dir).unwrap(); + + // Write files with the same mtime but different keys + let keys = vec!["zebra", "apple", "mango", "banana"]; + let fixed_time = UNIX_EPOCH + Duration::from_secs(1706500000); + + for key in &keys { + let file_path = dir.join(key); + let file = fs::File::create(&file_path).unwrap(); + std::io::Write::write_all(&mut &file, &data).unwrap(); + file.set_times(FileTimes::new().set_modified(fixed_time)).unwrap(); + } + + // Open the store + let fs_store = FilesystemStoreV2::new(temp_path.clone()).unwrap(); + + // List paginated - should return keys sorted by key in reverse order + // (for same mtime, keys are sorted reverse alphabetically) + let response = PaginatedKVStoreSync::list_paginated(&fs_store, "ns", "sub", None).unwrap(); + assert_eq!(response.keys.len(), 4); + + // Same mtime means sorted by key in reverse order (z > m > b > a) + assert_eq!(response.keys[0], "zebra"); + assert_eq!(response.keys[1], "mango"); + assert_eq!(response.keys[2], "banana"); + assert_eq!(response.keys[3], "apple"); + } + + #[test] + fn test_rejects_v1_data_directory() { + let mut temp_path = std::env::temp_dir(); + temp_path.push("test_rejects_v1_data_directory"); + let _ = fs::remove_dir_all(&temp_path); + fs::create_dir_all(&temp_path).unwrap(); + + // Create a file at the top level, as v1 would for an empty primary namespace + fs::write(temp_path.join("some_key"), b"data").unwrap(); + + // V2 construction should fail + match FilesystemStoreV2::new(temp_path.clone()) { + Err(err) => { + assert_eq!(err.kind(), std::io::ErrorKind::InvalidData); + assert!(err.to_string().contains("FilesystemStore (v1)")); + }, + Ok(_) => panic!("Expected error for directory with top-level files"), + } + + // Clean up + let _ = fs::remove_dir_all(&temp_path); + + // An empty directory should succeed + fs::create_dir_all(&temp_path).unwrap(); + let result = FilesystemStoreV2::new(temp_path.clone()); + assert!(result.is_ok()); + + // A directory with only subdirectories should succeed + fs::create_dir_all(temp_path.join("some_namespace")).unwrap(); + let result = FilesystemStoreV2::new(temp_path); + assert!(result.is_ok()); + } +} diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index cb4bdeb6a51..0aee98377be 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -17,6 +17,7 @@ use bitcoin::hashes::hex::FromHex; use bitcoin::{BlockHash, Txid}; use core::convert::Infallible; +use core::fmt; use core::future::Future; use core::mem; use core::ops::Deref; @@ -367,6 +368,191 @@ where } } +/// An opaque token used for paginated listing operations. +/// +/// This token should be treated as an opaque value by callers. Pass the token returned from +/// one `list_paginated` call to the next call to continue pagination. The internal format +/// is implementation-defined and may change between versions. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct PageToken(String); + +impl PageToken { + /// Creates a new `PageToken` from the given string. + pub fn new(token: String) -> Self { + PageToken(token) + } + + /// Returns the inner string representation of the `PageToken`. + pub fn as_str(&self) -> &str { + &self.0 + } +} + +impl fmt::Display for PageToken { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +/// Represents the response from a paginated `list` operation. +/// +/// Contains the list of keys and a token for retrieving the next page of results. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct PaginatedListResponse { + /// A vector of keys, ordered from most recently created to least recently created. + pub keys: Vec, + + /// A token that can be passed to the next call to continue pagination. + /// + /// Is `None` if there are no more pages to retrieve. + pub next_page_token: Option, +} + +/// Extends [`KVStoreSync`] with paginated key listing in reverse creation order. +/// +/// While [`KVStoreSync::list`] returns all keys at once in arbitrary order, this trait adds a +/// [`list_paginated`] method that returns keys in pages ordered from newest to oldest. This is +/// useful when a namespace may contain a large number of keys that would be expensive to retrieve +/// in a single call. +/// +/// Namespace and key requirements are inherited from [`KVStoreSync`]. +/// +/// For an asynchronous version of this trait, see [`PaginatedKVStore`]. +/// +/// [`list_paginated`]: Self::list_paginated +pub trait PaginatedKVStoreSync: KVStoreSync { + /// Returns a paginated list of keys that are stored under the given `secondary_namespace` in + /// `primary_namespace`, ordered from most recently created to least recently created. + /// + /// Implementations must return keys in reverse creation order (newest first). How creation + /// order is tracked is implementation-defined (e.g., storing creation timestamps, using an + /// incrementing ID, or another mechanism). Creation order (not last-updated order) is used + /// to prevent race conditions during pagination: if keys were ordered by update time, a key + /// updated mid-pagination could shift position, causing it to be skipped or returned twice + /// across pages. + /// + /// If `page_token` is provided, listing continues from where the previous page left off. + /// If `None`, listing starts from the most recently created entry. The `next_page_token` + /// in the returned [`PaginatedListResponse`] can be passed to subsequent calls to fetch + /// the next page. + /// + /// Implementations must generate a [`PageToken`] that encodes enough information to resume + /// listing from the correct position. Tokens must remain valid across multiple calls within + /// a reasonable timeframe. If the entry referenced by a token has been deleted, + /// implementations should resume from the next valid position rather than failing. + /// Tokens are scoped to a specific `(primary_namespace, secondary_namespace)` pair and should + /// not be used across different namespace pairs. + /// + /// Returns an empty list if `primary_namespace` or `secondary_namespace` is unknown or if + /// there are no more keys to return. + fn list_paginated( + &self, primary_namespace: &str, secondary_namespace: &str, page_token: Option, + ) -> Result; +} + +/// A wrapper around a [`PaginatedKVStoreSync`] that implements the [`PaginatedKVStore`] trait. +/// It is not necessary to use this type directly. +#[derive(Clone)] +pub struct PaginatedKVStoreSyncWrapper(pub K) +where + K::Target: PaginatedKVStoreSync; + +/// This is not exported to bindings users as async is only supported in Rust. +impl KVStore for PaginatedKVStoreSyncWrapper +where + K::Target: PaginatedKVStoreSync, +{ + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> impl Future, io::Error>> + 'static + MaybeSend { + let res = self.0.read(primary_namespace, secondary_namespace, key); + + async move { res } + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> impl Future> + 'static + MaybeSend { + let res = self.0.write(primary_namespace, secondary_namespace, key, buf); + + async move { res } + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> impl Future> + 'static + MaybeSend { + let res = self.0.remove(primary_namespace, secondary_namespace, key, lazy); + + async move { res } + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> impl Future, io::Error>> + 'static + MaybeSend { + let res = self.0.list(primary_namespace, secondary_namespace); + + async move { res } + } +} + +/// This is not exported to bindings users as async is only supported in Rust. +impl PaginatedKVStore for PaginatedKVStoreSyncWrapper +where + K::Target: PaginatedKVStoreSync, +{ + fn list_paginated( + &self, primary_namespace: &str, secondary_namespace: &str, page_token: Option, + ) -> impl Future> + 'static + MaybeSend { + let res = self.0.list_paginated(primary_namespace, secondary_namespace, page_token); + + async move { res } + } +} + +/// Extends [`KVStore`] with paginated key listing in reverse creation order. +/// +/// While [`KVStore::list`] returns all keys at once in arbitrary order, this trait adds a +/// [`list_paginated`] method that returns keys in pages ordered from newest to oldest. This is +/// useful when a namespace may contain a large number of keys that would be expensive to retrieve +/// in a single call. +/// +/// Namespace and key requirements are inherited from [`KVStore`]. +/// +/// For a synchronous version of this trait, see [`PaginatedKVStoreSync`]. +/// +/// [`list_paginated`]: Self::list_paginated +/// +/// This is not exported to bindings users as async is only supported in Rust. +pub trait PaginatedKVStore: KVStore { + /// Returns a paginated list of keys that are stored under the given `secondary_namespace` in + /// `primary_namespace`, ordered from most recently created to least recently created. + /// + /// Implementations must return keys in reverse creation order (newest first). How creation + /// order is tracked is implementation-defined (e.g., storing creation timestamps, using an + /// incrementing ID, or another mechanism). Creation order (not last-updated order) is used + /// to prevent race conditions during pagination: if keys were ordered by update time, a key + /// updated mid-pagination could shift position, causing it to be skipped or returned twice + /// across pages. + /// + /// If `page_token` is provided, listing continues from where the previous page left off. + /// If `None`, listing starts from the most recently created entry. The `next_page_token` + /// in the returned [`PaginatedListResponse`] can be passed to subsequent calls to fetch + /// the next page. + /// + /// Implementations must generate a [`PageToken`] that encodes enough information to resume + /// listing from the correct position. Tokens must remain valid across multiple calls within + /// a reasonable timeframe. If the entry referenced by a token has been deleted, + /// implementations should resume from the next valid position rather than failing. + /// Tokens are scoped to a specific `(primary_namespace, secondary_namespace)` pair and should + /// not be used across different namespace pairs. + /// + /// Returns an empty list if `primary_namespace` or `secondary_namespace` is unknown or if + /// there are no more keys to return. + fn list_paginated( + &self, primary_namespace: &str, secondary_namespace: &str, page_token: Option, + ) -> impl Future> + 'static + MaybeSend; +} + /// Provides additional interface methods that are required for [`KVStore`]-to-[`KVStore`] /// data migration. pub trait MigratableKVStore: KVStoreSync { @@ -1539,7 +1725,7 @@ mod tests { use crate::ln::msgs::BaseMessageHandler; use crate::sync::Arc; use crate::util::test_channel_signer::TestChannelSigner; - use crate::util::test_utils::{self, TestStore}; + use crate::util::test_utils::{self, TestPaginatedStore, TestStore}; use bitcoin::hashes::hex::FromHex; use core::cmp; @@ -1951,4 +2137,78 @@ mod tests { let store: Arc = Arc::new(TestStore::new(false)); assert!(persist_fn::<_, TestChannelSigner>(Arc::clone(&store))); } + + #[test] + fn paginated_store_basic_operations() { + let store = TestPaginatedStore::new(10); + + // Write some data + store.write("ns1", "ns2", "key1", vec![1, 2, 3]).unwrap(); + store.write("ns1", "ns2", "key2", vec![4, 5, 6]).unwrap(); + + // Read it back + assert_eq!(KVStoreSync::read(&store, "ns1", "ns2", "key1").unwrap(), vec![1, 2, 3]); + assert_eq!(KVStoreSync::read(&store, "ns1", "ns2", "key2").unwrap(), vec![4, 5, 6]); + + // List should return keys in descending order + let response = store.list_paginated("ns1", "ns2", None).unwrap(); + assert_eq!(response.keys, vec!["key2", "key1"]); + assert!(response.next_page_token.is_none()); + + // Remove a key + KVStoreSync::remove(&store, "ns1", "ns2", "key1", false).unwrap(); + assert!(KVStoreSync::read(&store, "ns1", "ns2", "key1").is_err()); + } + + #[test] + fn paginated_store_pagination() { + let store = TestPaginatedStore::new(2); + + // Write 5 items with different order values + for i in 0..5i64 { + store.write("ns", "", &format!("key{i}"), vec![i as u8]).unwrap(); + } + + // First page should have 2 items (most recently created first: key4, key3) + let page1 = store.list_paginated("ns", "", None).unwrap(); + assert_eq!(page1.keys.len(), 2); + assert_eq!(page1.keys, vec!["key4", "key3"]); + assert!(page1.next_page_token.is_some()); + + // Second page + let page2 = store.list_paginated("ns", "", page1.next_page_token).unwrap(); + assert_eq!(page2.keys.len(), 2); + assert_eq!(page2.keys, vec!["key2", "key1"]); + assert!(page2.next_page_token.is_some()); + + // Third page (last item) + let page3 = store.list_paginated("ns", "", page2.next_page_token).unwrap(); + assert_eq!(page3.keys.len(), 1); + assert_eq!(page3.keys, vec!["key0"]); + assert!(page3.next_page_token.is_none()); + } + + #[test] + fn paginated_store_update_preserves_order() { + let store = TestPaginatedStore::new(10); + + // Write items with specific order values + store.write("ns", "", "key1", vec![1]).unwrap(); + store.write("ns", "", "key2", vec![2]).unwrap(); + store.write("ns", "", "key3", vec![3]).unwrap(); + + // Verify initial order (newest first) + let response = store.list_paginated("ns", "", None).unwrap(); + assert_eq!(response.keys, vec!["key3", "key2", "key1"]); + + // Update key1 with a new order value that would put it first if used + store.write("ns", "", "key1", vec![1, 1]).unwrap(); + + // Verify data was updated + assert_eq!(KVStoreSync::read(&store, "ns", "", "key1").unwrap(), vec![1, 1]); + + // Verify order is unchanged - creation order should have been preserved + let response = store.list_paginated("ns", "", None).unwrap(); + assert_eq!(response.keys, vec!["key3", "key2", "key1"]); + } } diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index bcf39fde482..a6df800b5c7 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -51,6 +51,7 @@ use crate::sign::{ChannelSigner, PeerStorageKey}; use crate::sync::RwLock; use crate::types::features::{ChannelFeatures, InitFeatures, NodeFeatures}; use crate::util::async_poll::MaybeSend; +use crate::util::atomic_counter::AtomicCounter; use crate::util::config::UserConfig; use crate::util::dyn_signer::{ DynKeysInterface, DynKeysInterfaceTrait, DynPhantomKeysInterface, DynSigner, @@ -58,7 +59,7 @@ use crate::util::dyn_signer::{ use crate::util::logger::{Logger, Record}; #[cfg(feature = "std")] use crate::util::mut_global::MutGlobal; -use crate::util::persist::{KVStore, KVStoreSync, MonitorName}; +use crate::util::persist::{KVStore, KVStoreSync, MonitorName, PageToken, PaginatedListResponse}; use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer}; use crate::util::test_channel_signer::{EnforcementState, TestChannelSigner}; use crate::util::wakers::Notifier; @@ -1129,6 +1130,122 @@ impl KVStoreSync for TestStore { unsafe impl Sync for TestStore {} unsafe impl Send for TestStore {} +/// A simple in-memory implementation of [`PaginatedKVStoreSync`] for testing. +/// +/// [`PaginatedKVStoreSync`]: crate::util::persist::PaginatedKVStoreSync +pub struct TestPaginatedStore { + data: Mutex)>>, + page_size: usize, + time_counter: AtomicCounter, +} + +impl TestPaginatedStore { + /// Creates a new `TestPaginatedStore` with the given page size. + pub fn new(page_size: usize) -> Self { + Self { data: Mutex::new(new_hash_map()), page_size, time_counter: AtomicCounter::new() } + } +} + +impl KVStoreSync for TestPaginatedStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> Result, io::Error> { + let data = self.data.lock().unwrap(); + data.get(&(primary_namespace.to_string(), secondary_namespace.to_string(), key.to_string())) + .map(|(_, v)| v.clone()) + .ok_or_else(|| io::Error::new(io::ErrorKind::NotFound, "key not found")) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> Result<(), io::Error> { + let mut data = self.data.lock().unwrap(); + let order = self.time_counter.next() as i64; + let key_tuple = + (primary_namespace.to_string(), secondary_namespace.to_string(), key.to_string()); + // Only use order for new entries; preserve existing order on updates + let order_to_use = + data.get(&key_tuple).map(|(existing_order, _)| *existing_order).unwrap_or(order); + data.insert(key_tuple, (order_to_use, buf)); + Ok(()) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, + ) -> Result<(), io::Error> { + let mut data = self.data.lock().unwrap(); + data.remove(&( + primary_namespace.to_string(), + secondary_namespace.to_string(), + key.to_string(), + )); + Ok(()) + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> Result, io::Error> { + let mut all_keys = Vec::new(); + let mut page_token = None; + loop { + let response = crate::util::persist::PaginatedKVStoreSync::list_paginated( + self, + primary_namespace, + secondary_namespace, + page_token, + )?; + all_keys.extend(response.keys); + match response.next_page_token { + Some(token) => page_token = Some(token), + None => break, + } + } + Ok(all_keys) + } +} + +impl crate::util::persist::PaginatedKVStoreSync for TestPaginatedStore { + fn list_paginated( + &self, primary_namespace: &str, secondary_namespace: &str, page_token: Option, + ) -> Result { + let data = self.data.lock().unwrap(); + let mut entries: Vec<_> = data + .iter() + .filter(|((pn, sn, _), _)| pn == primary_namespace && sn == secondary_namespace) + .map(|((_, _, k), (t, _))| (k.clone(), *t)) + .collect(); + + // Sort by time descending, then by key + entries.sort_by(|a, b| b.1.cmp(&a.1).then_with(|| a.0.cmp(&b.0))); + + // Apply pagination: find the first entry AFTER the given key in sort order. + // This implementation uses the last key as the page token. + let start_idx = if let Some(ref last_key) = page_token { + // Find the position of this key and start after it + entries.iter().position(|(k, _)| k == last_key.as_str()).map(|pos| pos + 1).unwrap_or(0) + } else { + 0 + }; + + let page_entries: Vec<_> = + entries.into_iter().skip(start_idx).take(self.page_size).collect(); + + let next_page_token = if page_entries.len() == self.page_size { + page_entries.last().map(|(k, _)| PageToken::new(k.clone())) + } else { + None + }; + + Ok(PaginatedListResponse { + keys: page_entries.into_iter().map(|(k, _)| k).collect(), + next_page_token, + }) + } +} + +unsafe impl Sync for TestPaginatedStore {} +unsafe impl Send for TestPaginatedStore {} + pub struct TestBroadcaster { pub txn_broadcasted: Mutex>, pub txn_types: Mutex>,