diff --git a/README.md b/README.md index bb9a193..2501d4f 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ -# Miniaturs +# miniaturs [![Continuous integration](https://github.com/lloydmeta/miniaturs/actions/workflows/ci.yaml/badge.svg)](https://github.com/lloydmeta/miniaturs/actions/workflows/ci.yaml) -HTTP image resizer +Tiny HTTP image resizer. ## Goals @@ -21,10 +21,10 @@ To fulfil the above: * Runs in a Lambda * Rust ⚡️ -* Caching in layers: CDN with S3 for images +* Caching in layers: CDN to protect the app, with S3 for storing images * Serverless, but built on HTTP framework ([cargo-lambda](https://www.cargo-lambda.info) on top of [axum](https://github.com/tokio-rs/axum)) -An example Terraform config in `terraform/prod` is provided to show how to deploy at a subdomain using Cloudflare as our (free!) CDN + WAF. +An example Terraform config in `terraform/prod` is provided to show how to deploy at a subdomain using Cloudflare as our ([free!](https://www.cloudflare.com/en-gb/plans/free/)) CDN + WAF with AWS Lambda and S3 ([also free!](https://aws.amazon.com/free/)) ## Usage: diff --git a/server/src/api/routing/handlers.rs b/server/src/api/routing/handlers.rs index 80d248b..80dcd33 100644 --- a/server/src/api/routing/handlers.rs +++ b/server/src/api/routing/handlers.rs @@ -72,7 +72,7 @@ async fn resize( let maybe_cached_resized_image = app_components .processed_images_cacher .get(&processed_image_request) - .await; + .await?; if let Some(cached_resized_image) = maybe_cached_resized_image { let mut response_headers = HeaderMap::new(); @@ -91,7 +91,7 @@ async fn resize( let maybe_cached_fetched_image = app_components .unprocessed_images_cacher .get(&unprocessed_cache_retrieve_req) - .await; + .await?; let (response_status_code, bytes, maybe_content_type_string) = if let Some(cached_fetched) = maybe_cached_fetched_image { @@ -117,7 +117,7 @@ async fn resize( app_components .unprocessed_images_cacher .set(&bytes, &cache_fetched_req) - .await; + .await?; let response_status_code = StatusCode::from_u16(status_code.as_u16())?; (response_status_code, bytes, maybe_content_type_string) @@ -161,7 +161,7 @@ async fn resize( app_components .processed_images_cacher .set(&written_bytes, &cache_image_req) - .await; + .await?; let mut response_headers = HeaderMap::new(); let maybe_content_type_header = maybe_content_type_string @@ -305,7 +305,9 @@ fn handle_panic(err: Box) -> Response> { let error = Standard { message: details }; - let body = serde_json::to_string(&error).expect("Could not marshal error message"); + let body = serde_json::to_string(&error).unwrap_or_else(|e| { + format!("{{\"message\": \"Could not serialise error message [{e}]\"}}") + }); Response::builder() .status(StatusCode::INTERNAL_SERVER_ERROR) diff --git a/server/src/infra/config.rs b/server/src/infra/config.rs index 8e7def5..e809098 100644 --- a/server/src/infra/config.rs +++ b/server/src/infra/config.rs @@ -33,7 +33,7 @@ pub struct AwsSettings { } impl Config { - pub async fn load_env() -> Result { + pub async fn load_env() -> anyhow::Result { let shared_secret = env::var(SHARED_SECRET_ENV_KEY) .context("Expected {SHARED_SECRET_ENV_KEY} to be defined")?; diff --git a/server/src/infra/image_caching.rs b/server/src/infra/image_caching.rs index b92a591..fff1bb5 100644 --- a/server/src/infra/image_caching.rs +++ b/server/src/infra/image_caching.rs @@ -1,5 +1,6 @@ use std::collections::HashMap; +use anyhow::Context; use aws_sdk_s3::{ error::DisplayErrorContext, primitives::{ByteStream, SdkBody}, @@ -67,17 +68,17 @@ where GetRequest: CacheGettable, SetRequest: CacheSettable, { - async fn get(&self, req: &GetRequest) -> Option>; - async fn set(&self, bytes: &[u8], req: &SetRequest); + async fn get(&self, req: &GetRequest) -> anyhow::Result>>; + async fn set(&self, bytes: &[u8], req: &SetRequest) -> anyhow::Result<()>; } pub trait CacheGettable { type Cached; - fn cache_key(&self) -> CacheKey; + fn cache_key(&self) -> anyhow::Result; } pub trait CacheSettable: CacheGettable { type Retrieve; - fn metadata(&self) -> Metadata; + fn metadata(&self) -> anyhow::Result; } #[derive(Clone)] @@ -100,8 +101,8 @@ where GetReq: CacheGettable, SetReq: CacheSettable + DeserializeOwned, { - async fn get(&self, req: &GetReq) -> Option> { - let cache_key = req.cache_key(); + async fn get(&self, req: &GetReq) -> anyhow::Result>> { + let cache_key = req.cache_key()?; let cache_retrieve_attempt = self .client @@ -113,19 +114,24 @@ where match cache_retrieve_attempt { Ok(retreived) => { // If we can't retrieve metadata, it's dead to us ! - let s3_metadata = retreived.metadata()?.get(METADATA_JSON_KEY)?; - let as_original_requested = serde_json::from_str(s3_metadata).ok()?; - - let bytes: Vec = retreived - .body - .collect() - .await - .expect("Failure to retrieve from S3") - .to_vec(); - - Some(Retrieved { - bytes, - requested: as_original_requested, + let maybe_s3_metadata = retreived.metadata().and_then(|m| m.get(METADATA_JSON_KEY)); + let maybe_as_original_requested = + maybe_s3_metadata.and_then(|m| serde_json::from_str(m).ok()); + + Ok(match maybe_as_original_requested { + Some(as_original_requested) => { + let bytes: Vec = retreived + .body + .collect() + .await + .map_err(|e| anyhow::anyhow!("Failure to retrieve from S3 [{e}]"))? + .to_vec(); + Some(Retrieved { + bytes, + requested: as_original_requested, + }) + } + None => None, }) } Err(sdk_err) @@ -134,18 +140,18 @@ where .filter(|e| e.is_no_such_key()) .is_some() => { - None + Ok(None) } // Anything else is fucked. - Err(other) => panic!("AWS S3 SDK error: [{}]", DisplayErrorContext(other)), + Err(other) => anyhow::bail!("AWS S3 SDK error: [{}]", DisplayErrorContext(other)), } } - async fn set(&self, bytes: &[u8], req: &SetReq) { + async fn set(&self, bytes: &[u8], req: &SetReq) -> anyhow::Result<()> { let body_stream = ByteStream::new(SdkBody::from(bytes)); - let metadata = req.metadata(); - let cache_key = req.cache_key(); + let metadata = req.metadata()?; + let cache_key = req.cache_key()?; self.client .put_object() @@ -155,7 +161,8 @@ where .body(body_stream) .send() .await - .expect("Writing to S3 failed."); + .map_err(|e| anyhow::anyhow!("Writing to S3 failed [{e}]"))?; + Ok(()) } } @@ -166,49 +173,51 @@ pub struct CacheKey(String); static METADATA_JSON_KEY: &str = "_metadata_json"; impl CacheGettable for ImageResizeRequest { type Cached = ImageResizedCacheRequest; - fn cache_key(&self) -> CacheKey { - let as_json = serde_json::to_string(self).expect("Could not JSON-ify, oddly"); + fn cache_key(&self) -> anyhow::Result { + let as_json = serde_json::to_string(self).context("Could not JSON-ify to cache key.")?; let sha256ed = sha256::digest(as_json); - CacheKey(sha256ed) + Ok(CacheKey(sha256ed)) } } impl CacheGettable for ImageResizedCacheRequest { type Cached = Self; - fn cache_key(&self) -> CacheKey { + fn cache_key(&self) -> anyhow::Result { self.request.cache_key() } } impl CacheSettable for ImageResizedCacheRequest { type Retrieve = ImageResizeRequest; - fn metadata(&self) -> Metadata { - let as_json_string = serde_json::to_string(self).expect("Could not JSON-ify, oddly"); + fn metadata(&self) -> anyhow::Result { + let as_json_string = + serde_json::to_string(self).context("Could not JSON-ify to metadata.")?; let mut map = HashMap::new(); map.insert(METADATA_JSON_KEY.to_string(), as_json_string); - Metadata(map) + Ok(Metadata(map)) } } impl CacheGettable for ImageFetchRequest { type Cached = ImageFetchedCacheRequest; - fn cache_key(&self) -> CacheKey { - let as_json = serde_json::to_string(self).expect("Could not JSON-ify, oddly"); + fn cache_key(&self) -> anyhow::Result { + let as_json = serde_json::to_string(self).context("Could not JSON-ify to cache key.")?; let sha256ed = sha256::digest(as_json); - CacheKey(sha256ed) + Ok(CacheKey(sha256ed)) } } impl CacheGettable for ImageFetchedCacheRequest { type Cached = Self; - fn cache_key(&self) -> CacheKey { + fn cache_key(&self) -> anyhow::Result { self.request.cache_key() } } impl CacheSettable for ImageFetchedCacheRequest { type Retrieve = ImageFetchRequest; - fn metadata(&self) -> Metadata { - let as_json_string = serde_json::to_string(self).expect("Could not JSON-ify, oddly"); + fn metadata(&self) -> anyhow::Result { + let as_json_string = + serde_json::to_string(self).context("Could not JSON-ify to metadata.")?; let mut map = HashMap::new(); map.insert(METADATA_JSON_KEY.to_string(), as_json_string); - Metadata(map) + Ok(Metadata(map)) } } @@ -281,7 +290,7 @@ mod tests { target_height: 100, })), }; - let key = req.cache_key(); + let key = req.cache_key()?; assert!(key.0.len() < 1024); Ok(()) } @@ -298,7 +307,7 @@ mod tests { }, content_type: "image/png".to_string(), }; - let metadata = req.metadata(); + let metadata = req.metadata()?; let mut expected = HashMap::new(); let metadata_as_json = serde_json::to_string(&req).unwrap(); @@ -325,7 +334,7 @@ mod tests { })), }; let retrieved = s3_image_cacher.get(&req).await; - assert!(retrieved.is_none()); + assert!(retrieved?.is_none()); Ok(()) } @@ -350,10 +359,10 @@ mod tests { request: req.clone(), content_type: "image/png".to_string(), }; - s3_image_cacher.set(content, &image_set_req).await; + s3_image_cacher.set(content, &image_set_req).await?; let retrieved = s3_image_cacher .get(&req) - .await + .await? .expect("Cached image should be retrievable"); assert_eq!(content, retrieved.bytes.as_slice()); assert_eq!(image_set_req, retrieved.requested); @@ -380,10 +389,10 @@ mod tests { request: req.clone(), content_type: "image/png".to_string(), }; - s3_image_cacher.set(content, &image_set_req).await; + s3_image_cacher.set(content, &image_set_req).await?; - let cache_key = req.cache_key(); - let metadata_from_req = image_set_req.metadata(); + let cache_key = req.cache_key()?; + let metadata_from_req = image_set_req.metadata()?; let retrieved = s3_client() .await diff --git a/server/src/main.rs b/server/src/main.rs index cb243ca..9576881 100644 --- a/server/src/main.rs +++ b/server/src/main.rs @@ -315,6 +315,7 @@ mod tests { .unprocessed_images_cacher .get(&unprocessed_cache_retrieve_req) .await + .unwrap() } async fn retrieve_processed_cached( @@ -331,6 +332,7 @@ mod tests { .processed_images_cacher .get(&processed_cache_retrieve_req) .await + .unwrap() } fn signed_resize_path(