udpate sick
This commit is contained in:
@@ -12,6 +12,7 @@ bincode = "1.3"
|
||||
serde_json = "1.0"
|
||||
tower = "0.4"
|
||||
tower-http = { version = "0.5", features = ["cors", "fs", "compression-full"] }
|
||||
redis = { version = "0.24", features = ["tokio-comp", "connection-manager"] }
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = "0.3"
|
||||
async-trait = "0.1"
|
||||
|
||||
@@ -5,9 +5,11 @@ use axum::{
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use crate::services::tile_service::TileService;
|
||||
use crate::repositories::redis_repository::RedisRepository;
|
||||
|
||||
pub struct AppState {
|
||||
pub tile_service: Arc<TileService>,
|
||||
pub redis_repo: Arc<RedisRepository>,
|
||||
}
|
||||
|
||||
pub async fn get_tile(
|
||||
@@ -110,7 +112,24 @@ pub async fn get_tile_all(
|
||||
Path((z, x, y)): Path<(i32, i32, i32)>,
|
||||
State(state): State<Arc<AppState>>,
|
||||
) -> impl IntoResponse {
|
||||
// Parallel fetching for performance
|
||||
// Cache key for this tile
|
||||
let cache_key = format!("tile:{}:{}:{}", z, x, y);
|
||||
|
||||
// Try to get from cache first
|
||||
if let Ok(Some(cached_bytes)) = state.redis_repo.get(&cache_key).await {
|
||||
tracing::debug!("Cache HIT for tile {}/{}/{}", z, x, y);
|
||||
return (
|
||||
[
|
||||
(header::CONTENT_TYPE, "application/octet-stream"),
|
||||
(header::CACHE_CONTROL, "public, max-age=86400"), // 24 hours
|
||||
],
|
||||
cached_bytes
|
||||
).into_response();
|
||||
}
|
||||
|
||||
tracing::debug!("Cache MISS for tile {}/{}/{}", z, x, y);
|
||||
|
||||
// Cache miss - fetch from database
|
||||
let (nodes, ways, buildings, landuse, water, railways) = tokio::join!(
|
||||
state.tile_service.get_nodes(z, x, y),
|
||||
state.tile_service.get_ways(z, x, y),
|
||||
@@ -120,23 +139,11 @@ pub async fn get_tile_all(
|
||||
state.tile_service.get_railways(z, x, y),
|
||||
);
|
||||
|
||||
// Initial capacity estimaton (removed unused var)
|
||||
if let (Ok(n), Ok(w), Ok(b), Ok(l), Ok(wt), Ok(r)) = (nodes.as_ref(), ways.as_ref(), buildings.as_ref(), landuse.as_ref(), water.as_ref(), railways.as_ref()) {
|
||||
tracing::debug!("Tile {}/{}/{}: nodes={}, ways={}, buildings={}, landuse={}, water={}, railways={}",
|
||||
z, x, y, n.len(), w.len(), b.len(), l.len(), wt.len(), r.len());
|
||||
}
|
||||
|
||||
// Check errors and separate results?
|
||||
// For now, the endpoint likely expects a single binary blob of combined types or just simple sequential data.
|
||||
// The original logic didn't seem to implement get_tile_all in the viewed main.rs snippet.
|
||||
// Based on standard practices, I'll return a struct or just concatenation if that's what the frontend expects.
|
||||
// Wait, the original main.rs HAD `get_tile_all` registered but the implementation was truncated in view.
|
||||
// I will implementation it by combining all into a single structured response or just separate vectors if I define a TileData DTO.
|
||||
// Checking the plan... "models/tile_response.rs". I haven't created that yet.
|
||||
// For now, I'll stick to individual endpoints as primary, but `get_tile_all` is useful.
|
||||
// I'll return a tuple or struct serialized.
|
||||
|
||||
// Let's assume a structure similar to what the frontend expects.
|
||||
// If I don't know the exact format of `get_tile_all` from previous code, I should look at it or just stub it safely.
|
||||
// Actually, looking at `frontend/src/lib.rs` might reveal what it expects.
|
||||
|
||||
// For simplicity in this step, I will implement it returning a generic error if fails, or a tuple.
|
||||
if let (Ok(n), Ok(w), Ok(b), Ok(l), Ok(wt), Ok(r)) = (nodes, ways, buildings, landuse, water, railways) {
|
||||
#[derive(serde::Serialize)]
|
||||
struct TileData {
|
||||
@@ -156,12 +163,31 @@ pub async fn get_tile_all(
|
||||
water: wt,
|
||||
railways: r,
|
||||
};
|
||||
|
||||
let bytes = bincode::serialize(&data).unwrap();
|
||||
([(header::CONTENT_TYPE, "application/octet-stream")], bytes).into_response()
|
||||
|
||||
// Store in cache asynchronously (fire and forget)
|
||||
let redis_clone = state.redis_repo.clone();
|
||||
let cache_key_clone = cache_key.clone();
|
||||
let bytes_clone = bytes.clone();
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = redis_clone.set(&cache_key_clone, &bytes_clone, 86400).await {
|
||||
tracing::warn!("Failed to cache tile {}: {}", cache_key_clone, e);
|
||||
}
|
||||
});
|
||||
|
||||
(
|
||||
[
|
||||
(header::CONTENT_TYPE, "application/octet-stream"),
|
||||
(header::CACHE_CONTROL, "public, max-age=86400"), // 24 hours
|
||||
],
|
||||
bytes
|
||||
).into_response()
|
||||
} else {
|
||||
(
|
||||
(
|
||||
axum::http::StatusCode::INTERNAL_SERVER_ERROR,
|
||||
"Failed to fetch tile data".to_string(),
|
||||
).into_response()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -37,6 +37,7 @@ pub async fn initialize_schema(session: &Session) -> Result<(), Box<dyn std::err
|
||||
id bigint,
|
||||
tags map<text, text>,
|
||||
points blob,
|
||||
vertex_buffer blob,
|
||||
PRIMARY KEY ((zoom, tile_x, tile_y), id)
|
||||
)",
|
||||
&[],
|
||||
@@ -52,6 +53,7 @@ pub async fn initialize_schema(session: &Session) -> Result<(), Box<dyn std::err
|
||||
id bigint,
|
||||
tags map<text, text>,
|
||||
points blob,
|
||||
vertex_buffer blob,
|
||||
PRIMARY KEY ((zoom, tile_x, tile_y), id)
|
||||
)",
|
||||
&[],
|
||||
@@ -67,6 +69,7 @@ pub async fn initialize_schema(session: &Session) -> Result<(), Box<dyn std::err
|
||||
id bigint,
|
||||
tags map<text, text>,
|
||||
points blob,
|
||||
vertex_buffer blob,
|
||||
PRIMARY KEY ((zoom, tile_x, tile_y), id)
|
||||
)",
|
||||
&[],
|
||||
@@ -82,6 +85,7 @@ pub async fn initialize_schema(session: &Session) -> Result<(), Box<dyn std::err
|
||||
id bigint,
|
||||
tags map<text, text>,
|
||||
points blob,
|
||||
vertex_buffer blob,
|
||||
PRIMARY KEY ((zoom, tile_x, tile_y), id)
|
||||
)",
|
||||
&[],
|
||||
|
||||
@@ -16,6 +16,7 @@ use tower_http::compression::CompressionLayer;
|
||||
|
||||
use crate::repositories::way_repository::WayRepository;
|
||||
use crate::repositories::node_repository::NodeRepository;
|
||||
use crate::repositories::redis_repository::RedisRepository;
|
||||
use crate::services::tile_service::TileService;
|
||||
use crate::api::handlers::tiles::{
|
||||
get_tile, get_tile_ways, get_tile_buildings,
|
||||
@@ -37,20 +38,28 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
.build()
|
||||
.await?;
|
||||
|
||||
// Initialize schema and seed data (Keep existing db module for now)
|
||||
// Initialize schema and seed data
|
||||
db::initialize_schema(&session).await?;
|
||||
db::seed_data(&session).await?;
|
||||
|
||||
let session_arc = Arc::new(session);
|
||||
println!("Connected to ScyllaDB!");
|
||||
|
||||
// Connect to Redis
|
||||
println!("Connecting to Redis...");
|
||||
let redis_uri = std::env::var("REDIS_URI")
|
||||
.unwrap_or_else(|_| "redis://redis:6379".to_string());
|
||||
let redis_repo = Arc::new(RedisRepository::new(&redis_uri).await?);
|
||||
println!("Connected to Redis!");
|
||||
|
||||
// Dependency Injection
|
||||
let node_repo = Arc::new(NodeRepository::new(session_arc.clone()));
|
||||
let way_repo = Arc::new(WayRepository::new(session_arc.clone()));
|
||||
let tile_service = Arc::new(TileService::new(node_repo, way_repo));
|
||||
|
||||
let state = Arc::new(AppState {
|
||||
tile_service: tile_service,
|
||||
tile_service,
|
||||
redis_repo,
|
||||
});
|
||||
|
||||
let app = Router::new()
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
pub mod way_repository;
|
||||
pub mod node_repository;
|
||||
pub mod redis_repository;
|
||||
|
||||
43
backend/src/repositories/redis_repository.rs
Normal file
43
backend/src/repositories/redis_repository.rs
Normal file
@@ -0,0 +1,43 @@
|
||||
use redis::{AsyncCommands, Client};
|
||||
use std::sync::Arc;
|
||||
|
||||
pub struct RedisRepository {
|
||||
client: Arc<Client>,
|
||||
}
|
||||
|
||||
impl RedisRepository {
|
||||
pub async fn new(uri: &str) -> Result<Self, redis::RedisError> {
|
||||
let client = Client::open(uri)?;
|
||||
// Test connection
|
||||
let mut conn = client.get_async_connection().await?;
|
||||
let _: () = redis::cmd("PING").query_async(&mut conn).await?;
|
||||
|
||||
Ok(Self {
|
||||
client: Arc::new(client),
|
||||
})
|
||||
}
|
||||
|
||||
/// Get cached data by key
|
||||
pub async fn get(&self, key: &str) -> Result<Option<Vec<u8>>, redis::RedisError> {
|
||||
let mut conn = self.client.get_async_connection().await?;
|
||||
conn.get(key).await
|
||||
}
|
||||
|
||||
/// Set data with TTL (time-to-live) in seconds
|
||||
pub async fn set(&self, key: &str, value: &[u8], ttl_seconds: u64) -> Result<(), redis::RedisError> {
|
||||
let mut conn = self.client.get_async_connection().await?;
|
||||
conn.set_ex(key, value, ttl_seconds).await
|
||||
}
|
||||
|
||||
/// Check if key exists
|
||||
pub async fn exists(&self, key: &str) -> Result<bool, redis::RedisError> {
|
||||
let mut conn = self.client.get_async_connection().await?;
|
||||
conn.exists(key).await
|
||||
}
|
||||
|
||||
/// Delete a key
|
||||
pub async fn delete(&self, key: &str) -> Result<(), redis::RedisError> {
|
||||
let mut conn = self.client.get_async_connection().await?;
|
||||
conn.del(key).await
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user