A flexible, async-first cache library for Rust with pluggable backends, multiple eviction policies, and advanced search capabilities.
Add this to your Cargo.toml
:
[dependencies]
threatflux-cache = "0.1.0"
default
: Enables filesystem backend and JSON serializationfilesystem-backend
: Filesystem storage supportjson-serialization
: JSON format supportbincode-serialization
: Bincode format supportcompression
: Compression support for stored valuesopenapi
: OpenAPI schema generationmetrics
: Prometheus metrics integrationtracing
: Tracing supportfull
: All features enableduse threatflux_cache::prelude::*;
use serde::{Serialize, Deserialize};
#[derive(Serialize, Deserialize, Clone)]
struct User {
id: u64,
name: String,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a cache with default configuration
let cache: Cache<String, User> = Cache::with_config(CacheConfig::default()).await?;
// Store a value
let user = User { id: 1, name: "Alice".to_string() };
cache.put("user:1".to_string(), user).await?;
// Retrieve a value
if let Some(user) = cache.get(&"user:1".to_string()).await? {
println!("Found user: {}", user.name);
}
Ok(())
}
use threatflux_cache::prelude::*;
let config = CacheConfig::default()
.with_persistence(PersistenceConfig::with_path("/tmp/my-cache"))
.with_eviction_policy(EvictionPolicy::Lru);
let backend = FilesystemBackend::new("/tmp/my-cache").await?;
let cache: Cache<String, String> = Cache::new(config, backend).await?;
use threatflux_cache::{CacheEntry, BasicMetadata};
let metadata = BasicMetadata {
execution_time_ms: Some(100),
size_bytes: Some(1024),
category: Some("api-response".to_string()),
tags: vec!["user".to_string(), "profile".to_string()],
};
let entry = CacheEntry::with_metadata(
"key".to_string(),
"value".to_string(),
metadata,
);
cache.add_entry(entry).await?;
use threatflux_cache::SearchQuery;
// Search by pattern and category
let query = SearchQuery::new()
.with_pattern("user")
.with_category("api-response")
.with_access_count_range(Some(5), None);
let results = cache.search(&query).await;
for entry in results {
println!("Found: {:?}", entry.value);
}
If you’re migrating from file-scanner’s built-in cache, see the examples/file_scanner_migration.rs
for a complete migration guide. The library provides an adapter pattern to maintain API compatibility while gaining the benefits of the new cache system.
let config = CacheConfig::default()
// Capacity settings
.with_max_entries_per_key(100)
.with_max_total_entries(10_000)
// Eviction policy
.with_eviction_policy(EvictionPolicy::Lru)
// Persistence
.with_persistence(PersistenceConfig {
enabled: true,
path: Some("/var/cache/myapp".into()),
sync_interval: 100,
save_on_drop: true,
load_on_startup: true,
})
// TTL for all entries
.with_default_ttl(Duration::from_secs(3600))
// Enable compression
.with_compression(CompressionConfig {
algorithm: CompressionAlgorithm::Gzip,
level: 6,
min_size: 1024,
});
Implement the StorageBackend
trait to create custom storage solutions:
use async_trait::async_trait;
use threatflux_cache::{StorageBackend, CacheEntry, Result};
pub struct MyCustomBackend;
#[async_trait]
impl StorageBackend for MyCustomBackend {
type Key = String;
type Value = String;
type Metadata = ();
async fn save(&self, entries: &HashMap<Self::Key, Vec<CacheEntry<Self::Key, Self::Value, Self::Metadata>>>) -> Result<()> {
// Implementation
Ok(())
}
async fn load(&self) -> Result<HashMap<Self::Key, Vec<CacheEntry<Self::Key, Self::Value, Self::Metadata>>>> {
// Implementation
Ok(HashMap::new())
}
// ... other required methods
}
Arc<RwLock<HashMap>>
for thread-safe concurrent accessLicensed under either of:
at your option.
Contributions are welcome! Please feel free to submit a Pull Request.