diff --git a/Cargo.toml b/Cargo.toml index b88325b3..d2fd4775 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,6 @@ rocket = "0.4.11" rocket_contrib = { version = "0.4.11", features = ["json"] } rocket_i18n = "0.4.1" scheduled-thread-pool = "0.2.6" -#aws-creds = { version = "0.34", default-features = false, features = ["native-tls"] } serde = "1.0.137" serde_json = "1.0.81" shrinkwraprs = "0.3.0" @@ -69,7 +68,7 @@ ructe = "0.15.0" rsass = "0.26" [features] -default = ["postgres", "s3"] +default = ["postgres"] postgres = ["plume-models/postgres", "diesel/postgres"] sqlite = ["plume-models/sqlite", "diesel/sqlite"] debug-mailer = [] diff --git a/plume-models/Cargo.toml b/plume-models/Cargo.toml index 5e8dfd83..6eac4b62 100644 --- a/plume-models/Cargo.toml +++ b/plume-models/Cargo.toml @@ -18,7 +18,6 @@ rocket_i18n = "0.4.1" reqwest = "0.11.11" scheduled-thread-pool = "0.2.6" serde = "1.0.137" -#rust-s3 = { version = "0.29.0", default-features = false, features = ["blocking"] } rust-s3 = { version = "0.33.0", optional = true, features = ["blocking"] } serde_derive = "1.0" serde_json = "1.0.81" diff --git a/plume-models/src/config.rs b/plume-models/src/config.rs index af2605ae..b6676f27 100644 --- a/plume-models/src/config.rs +++ b/plume-models/src/config.rs @@ -6,9 +6,8 @@ use rocket::Config as RocketConfig; use std::collections::HashSet; use std::env::{self, var}; -use s3::{Bucket, Region}; -use s3::creds::Credentials; - +#[cfg(feature = "s3")] +use s3::{Bucket, Region, creds::Credentials}; #[cfg(not(test))] const DB_NAME: &str = "plume"; @@ -382,6 +381,7 @@ pub struct S3Config { } impl S3Config { + #[cfg(feature = "s3")] pub fn get_bucket(&self) -> Bucket { let region = Region::Custom { region: self.region.clone(), @@ -411,41 +411,49 @@ fn get_s3_config() -> Option { if bucket.is_none() && access_key_id.is_none() && access_key_secret.is_none() { return None; } - if bucket.is_none() || access_key_id.is_none() || access_key_secret.is_none() { - panic!("Invalid S3 configuration: some required values are set, but not others"); + + #[cfg(not(feature = "s3"))] + panic!("S3 support is not enabled in this build"); + + #[cfg(feature = "s3")] + { + if bucket.is_none() || access_key_id.is_none() || access_key_secret.is_none() { + panic!("Invalid S3 configuration: some required values are set, but not others"); + } + let bucket = bucket.unwrap(); + let access_key_id = access_key_id.unwrap(); + let access_key_secret = access_key_secret.unwrap(); + + let region = var("S3_REGION").unwrap_or_else(|_| "us-east-1".to_owned()); + let hostname = var("S3_HOSTNAME").unwrap_or_else(|_| format!("{}.amazonaws.com", region)); + + let protocol = var("S3_PROTOCOL").unwrap_or_else(|_| "https".to_owned()); + if protocol != "http" && protocol != "https" { + panic!("Invalid S3 configuration: invalid protocol {}", protocol); + } + + let path_style = var("S3_PATH_STYLE").unwrap_or_else(|_| "false".to_owned()); + let path_style = string_to_bool(&path_style, "S3_PATH_STYLE"); + let direct_upload = var("S3_DIRECT_UPLOAD").unwrap_or_else(|_| "false".to_owned()); + let direct_upload = string_to_bool(&direct_upload, "S3_DIRECT_UPLOAD"); + let direct_download = var("S3_DIRECT_DOWNLOAD").unwrap_or_else(|_| "false".to_owned()); + let direct_download = string_to_bool(&direct_download, "S3_DIRECT_DOWNLOAD"); + + let alias = var("S3_ALIAS_HOST").ok(); + + Some(S3Config { + bucket, + access_key_id, + access_key_secret, + region, + hostname, + protocol, + path_style, + direct_upload, + direct_download, + alias, + }) } - let bucket = bucket.unwrap(); - let access_key_id = access_key_id.unwrap(); - let access_key_secret = access_key_secret.unwrap(); - - let region = var("S3_REGION").unwrap_or_else(|_| "us-east-1".to_owned()); - let hostname = var("S3_HOSTNAME").unwrap_or_else(|_| format!("{}.amazonaws.com", region)); - - let protocol = var("S3_PROTOCOL").unwrap_or_else(|_| "https".to_owned()); - if protocol != "http" && protocol != "https" { - panic!("Invalid S3 configuration: invalid protocol {}", protocol); - } - - let path_style = var("S3_PATH_STYLE").unwrap_or_else(|_| "false".to_owned()); - let path_style = string_to_bool(&path_style, "S3_PATH_STYLE"); - let direct_upload = var("S3_DIRECT_UPLOAD").unwrap_or_else(|_| "false".to_owned()); - let direct_upload = string_to_bool(&direct_upload, "S3_DIRECT_UPLOAD"); - let direct_download = var("S3_DIRECT_DOWNLOAD").unwrap_or_else(|_| "false".to_owned()); - let direct_download = string_to_bool(&direct_download, "S3_DIRECT_DOWNLOAD"); - - let alias = var("S3_ALIAS_HOST").ok(); - Some(S3Config { - bucket, - access_key_id, - access_key_secret, - region, - hostname, - protocol, - path_style, - direct_upload, - direct_download, - alias, - }) } lazy_static! { diff --git a/plume-models/src/medias.rs b/plume-models/src/medias.rs index 192e60da..3dc22605 100644 --- a/plume-models/src/medias.rs +++ b/plume-models/src/medias.rs @@ -170,9 +170,12 @@ impl Media { pub fn delete(&self, conn: &Connection) -> Result<()> { if !self.is_remote { - if let Some(config) = &CONFIG.s3 { - config.get_bucket() + if CONFIG.s3.is_some() { + #[cfg(feature = "s3")] + CONFIG.s3.as_ref().unwrap().get_bucket() .delete_object_blocking(&self.file_path)?; + #[cfg(not(feature="s3"))] + unreachable!(); } else { fs::remove_file(self.file_path.as_str())?; } diff --git a/src/routes/mod.rs b/src/routes/mod.rs index 7f49b399..08fd8fa1 100644 --- a/src/routes/mod.rs +++ b/src/routes/mod.rs @@ -9,7 +9,7 @@ use rocket::{ http::{ hyper::header::{CacheControl, CacheDirective, ETag, EntityTag}, uri::{FromUriParam, Query}, - ContentType, RawStr, Status, + RawStr, Status, }, request::{self, FromFormValue, FromRequest, Request}, response::{self, Flash, NamedFile, Redirect, Responder, Response}, @@ -21,6 +21,9 @@ use std::{ path::{Path, PathBuf}, }; +#[cfg(feature = "s3")] +use rocket::http::ContentType; + /// Special return type used for routes that "cannot fail", and instead /// `Redirect`, or `Flash`, when we cannot deliver a `Ructe` Response #[allow(clippy::large_enum_variant)] @@ -207,6 +210,7 @@ pub mod well_known; #[derive(Responder)] enum FileKind { Local(NamedFile), + #[cfg(feature = "s3")] S3(Vec, ContentType), } @@ -259,18 +263,23 @@ pub fn plume_static_files(file: PathBuf, build_id: &RawStr) -> Option")] pub fn plume_media_files(file: PathBuf) -> Option { - if let Some(config) = &CONFIG.s3 { - let ct = file.extension() - .and_then(|ext| ContentType::from_extension(&ext.to_string_lossy())) - .unwrap_or(ContentType::Binary); + if CONFIG.s3.is_some() { + #[cfg(feature="s3")] + { + let ct = file.extension() + .and_then(|ext| ContentType::from_extension(&ext.to_string_lossy())) + .unwrap_or(ContentType::Binary); - let data = config.get_bucket() - .get_object_blocking(format!("plume-media/{}", file.to_string_lossy())).ok()?; + let data = CONFIG.s3.as_ref().unwrap().get_bucket() + .get_object_blocking(format!("plume-media/{}", file.to_string_lossy())).ok()?; - Some(CachedFile { - inner: FileKind::S3 ( data.to_vec(), ct), - cache_control: CacheControl(vec![CacheDirective::MaxAge(60 * 60 * 24 * 30)]), - }) + Some(CachedFile { + inner: FileKind::S3 ( data.to_vec(), ct), + cache_control: CacheControl(vec![CacheDirective::MaxAge(60 * 60 * 24 * 30)]), + }) + } + #[cfg(not(feature="s3"))] + unreachable!(); } else { NamedFile::open(Path::new(&CONFIG.media_directory).join(file)) .ok()