added benchmarks (#142)

This commit is contained in:
Henk-Jan Lebbink
2025-03-29 23:26:11 +01:00
committed by GitHub
parent 0cccaf1663
commit f23572dce8
60 changed files with 1986 additions and 712 deletions

21
common/Cargo.toml Normal file
View File

@@ -0,0 +1,21 @@
[package]
name = "minio_common"
version = "0.1.0"
edition = "2024"
[dependencies]
minio = {path = ".." }
tokio = { version = "1.44.1", features = ["full"] }
tokio-stream = "0.1.17"
async-std = "1.13.1"
rand = { version = "0.8.5", features = ["small_rng"] }
bytes = "1.10.1"
log = "0.4.27"
chrono = "0.4.40"
reqwest = "0.12.15"
http = "1.3.1"
[lib]
name = "minio_common"
path = "src/lib.rs"

View File

@@ -0,0 +1,69 @@
// MinIO Rust Library for Amazon S3 Compatible Cloud Storage
// Copyright 2025 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use async_std::future::timeout;
use minio::s3::Client;
use std::thread;
/// Cleanup guard that removes the bucket when it is dropped
pub struct CleanupGuard {
client: Client,
bucket_name: String,
}
impl CleanupGuard {
#[allow(dead_code)]
pub fn new(client: &Client, bucket_name: &str) -> Self {
Self {
client: client.clone(),
bucket_name: bucket_name.to_string(),
}
}
}
impl Drop for CleanupGuard {
fn drop(&mut self) {
let client = self.client.clone();
let bucket_name = self.bucket_name.clone();
//println!("Going to remove bucket {}", bucket_name);
// Spawn the cleanup task in a way that detaches it from the current runtime
thread::spawn(move || {
// Create a new runtime for this thread
let rt = tokio::runtime::Runtime::new().unwrap();
// Execute the async cleanup in this new runtime
rt.block_on(async {
// do the actual removal of the bucket
match timeout(
std::time::Duration::from_secs(60),
client.remove_and_purge_bucket(&bucket_name),
)
.await
{
Ok(result) => match result {
Ok(_) => {
//println!("Bucket {} removed successfully", bucket_name),
}
Err(e) => println!("Error removing bucket {}: {:?}", bucket_name, e),
},
Err(_) => println!("Timeout after 60s while removing bucket {}", bucket_name),
}
});
})
.join()
.unwrap(); // This blocks the current thread until cleanup is done
}
}

196
common/src/example.rs Normal file
View File

@@ -0,0 +1,196 @@
// MinIO Rust Library for Amazon S3 Compatible Cloud Storage
// Copyright 2025 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use minio::s3::args::PostPolicy;
use minio::s3::types::{
AndOperator, Destination, Filter, LifecycleConfig, LifecycleRule, NotificationConfig,
ObjectLockConfig, PrefixFilterRule, QueueConfig, ReplicationConfig, ReplicationRule,
RetentionMode, SuffixFilterRule,
};
use minio::s3::utils::utc_now;
use std::collections::HashMap;
pub fn create_bucket_lifecycle_config_examples() -> LifecycleConfig {
LifecycleConfig {
rules: vec![LifecycleRule {
abort_incomplete_multipart_upload_days_after_initiation: None,
expiration_date: None,
expiration_days: Some(365),
expiration_expired_object_delete_marker: None,
filter: Filter {
and_operator: None,
prefix: Some(String::from("logs/")),
tag: None,
},
id: String::from("rule1"),
noncurrent_version_expiration_noncurrent_days: None,
noncurrent_version_transition_noncurrent_days: None,
noncurrent_version_transition_storage_class: None,
status: true,
transition_date: None,
transition_days: None,
transition_storage_class: None,
}],
}
}
pub fn create_bucket_notification_config_example() -> NotificationConfig {
NotificationConfig {
cloud_func_config_list: None,
queue_config_list: Some(vec![QueueConfig {
events: vec![
String::from("s3:ObjectCreated:Put"),
String::from("s3:ObjectCreated:Copy"),
],
id: Some("".to_string()), //TODO or should this be NONE??
prefix_filter_rule: Some(PrefixFilterRule {
value: String::from("images"),
}),
suffix_filter_rule: Some(SuffixFilterRule {
value: String::from("pg"),
}),
queue: String::from("arn:minio:sqs::miniojavatest:webhook"),
}]),
topic_config_list: None,
}
}
pub fn create_bucket_policy_config_example(bucket_name: &str) -> String {
let config = r#"
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:GetObject"
],
"Effect": "Allow",
"Principal": {
"AWS": [
"*"
]
},
"Resource": [
"arn:aws:s3:::<BUCKET>/myobject*"
],
"Sid": ""
}
]
}
"#
.replace("<BUCKET>", bucket_name);
config.to_string()
}
pub fn create_bucket_policy_config_example_for_replication() -> String {
let config = r#"
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetReplicationConfiguration",
"s3:ListBucket",
"s3:ListBucketMultipartUploads",
"s3:GetBucketLocation",
"s3:GetBucketVersioning",
"s3:GetBucketObjectLockConfiguration",
"s3:GetEncryptionConfiguration"
],
"Resource": [
"arn:aws:s3:::*"
],
"Sid": "EnableReplicationOnBucket"
},
{
"Effect": "Allow",
"Action": [
"s3:GetReplicationConfiguration",
"s3:ReplicateTags",
"s3:AbortMultipartUpload",
"s3:GetObject",
"s3:GetObjectVersion",
"s3:GetObjectVersionTagging",
"s3:PutObject",
"s3:PutObjectRetention",
"s3:PutBucketObjectLockConfiguration",
"s3:PutObjectLegalHold",
"s3:DeleteObject",
"s3:ReplicateObject",
"s3:ReplicateDelete"
],
"Resource": [
"arn:aws:s3:::*"
],
"Sid": "EnableReplicatingDataIntoBucket"
}
]
}"#;
config.to_string()
}
pub fn create_bucket_replication_config_example(dst_bucket: &str) -> ReplicationConfig {
let mut tags: HashMap<String, String> = HashMap::new();
tags.insert(String::from("key1"), String::from("value1"));
tags.insert(String::from("key2"), String::from("value2"));
ReplicationConfig {
role: Some("example1".to_string()),
rules: vec![ReplicationRule {
destination: Destination {
bucket_arn: String::from(&format!("arn:aws:s3:::{}", dst_bucket)),
access_control_translation: None,
account: None,
encryption_config: None,
metrics: None,
replication_time: None,
storage_class: None,
},
delete_marker_replication_status: None,
existing_object_replication_status: None,
filter: Some(Filter {
and_operator: Some(AndOperator {
prefix: Some(String::from("TaxDocs")),
tags: Some(tags),
}),
prefix: None,
tag: None,
}),
id: Some(String::from("rule1")),
prefix: None,
priority: Some(1),
source_selection_criteria: None,
delete_replication_status: Some(false),
status: true,
}],
}
}
pub fn create_tags_example() -> HashMap<String, String> {
HashMap::from([
(String::from("Project"), String::from("Project One")),
(String::from("User"), String::from("jsmith")),
])
}
pub fn create_object_lock_config_example() -> ObjectLockConfig {
const DURATION_DAYS: i32 = 7;
ObjectLockConfig::new(RetentionMode::GOVERNANCE, Some(DURATION_DAYS), None).unwrap()
}
pub fn create_post_policy_example(bucket_name: &str, object_name: &str) -> PostPolicy {
let expiration = utc_now() + chrono::Duration::days(5);
let mut policy = PostPolicy::new(&bucket_name, expiration).unwrap();
policy.add_equals_condition("key", &object_name).unwrap();
policy
.add_content_length_range_condition(1024 * 1024, 4 * 1024 * 1024)
.unwrap();
policy
}

6
common/src/lib.rs Normal file
View File

@@ -0,0 +1,6 @@
pub mod cleanup_guard;
pub mod example;
pub mod rand_reader;
pub mod rand_src;
pub mod test_context;
pub mod utils;

45
common/src/rand_reader.rs Normal file
View File

@@ -0,0 +1,45 @@
// MinIO Rust Library for Amazon S3 Compatible Cloud Storage
// Copyright 2025 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::io;
pub struct RandReader {
size: u64,
}
impl RandReader {
#[allow(dead_code)]
pub fn new(size: u64) -> RandReader {
RandReader { size }
}
}
impl io::Read for RandReader {
fn read(&mut self, buf: &mut [u8]) -> Result<usize, io::Error> {
let bytes_read: usize = match (self.size as usize) > buf.len() {
true => buf.len(),
false => self.size as usize,
};
if bytes_read > 0 {
let random: &mut dyn rand::RngCore = &mut rand::thread_rng();
random.fill_bytes(&mut buf[0..bytes_read]);
}
self.size -= bytes_read as u64;
Ok(bytes_read)
}
}

89
common/src/rand_src.rs Normal file
View File

@@ -0,0 +1,89 @@
// MinIO Rust Library for Amazon S3 Compatible Cloud Storage
// Copyright 2025 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use async_std::task;
use bytes::Bytes;
use rand::SeedableRng;
use rand::prelude::SmallRng;
use std::io;
use tokio::io::AsyncRead;
use tokio_stream::Stream;
pub struct RandSrc {
size: u64,
rng: SmallRng,
}
impl RandSrc {
#[allow(dead_code)]
pub fn new(size: u64) -> RandSrc {
let rng = SmallRng::from_entropy();
RandSrc { size, rng }
}
}
impl Stream for RandSrc {
type Item = Result<Bytes, io::Error>;
fn poll_next(
self: std::pin::Pin<&mut Self>,
_cx: &mut task::Context<'_>,
) -> task::Poll<Option<Self::Item>> {
if self.size == 0 {
return task::Poll::Ready(None);
}
let bytes_read = match self.size > 64 * 1024 {
true => 64 * 1024,
false => self.size as usize,
};
let this = self.get_mut();
let mut buf = vec![0; bytes_read];
let random: &mut dyn rand::RngCore = &mut this.rng;
random.fill_bytes(&mut buf);
this.size -= bytes_read as u64;
task::Poll::Ready(Some(Ok(Bytes::from(buf))))
}
}
impl AsyncRead for RandSrc {
fn poll_read(
self: std::pin::Pin<&mut Self>,
_cx: &mut task::Context<'_>,
read_buf: &mut tokio::io::ReadBuf<'_>,
) -> task::Poll<io::Result<()>> {
let buf = read_buf.initialize_unfilled();
let bytes_read = match self.size > (buf.len() as u64) {
true => buf.len(),
false => self.size as usize,
};
let this = self.get_mut();
if bytes_read > 0 {
let random: &mut dyn rand::RngCore = &mut this.rng;
random.fill_bytes(&mut buf[0..bytes_read]);
}
this.size -= bytes_read as u64;
read_buf.advance(bytes_read);
task::Poll::Ready(Ok(()))
}
}

158
common/src/test_context.rs Normal file
View File

@@ -0,0 +1,158 @@
// MinIO Rust Library for Amazon S3 Compatible Cloud Storage
// Copyright 2025 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::cleanup_guard::CleanupGuard;
use crate::utils::rand_bucket_name;
use minio::s3::Client;
use minio::s3::creds::StaticProvider;
use minio::s3::http::BaseUrl;
use minio::s3::types::S3Api;
use std::path::{Path, PathBuf};
#[derive(Clone)]
pub struct TestContext {
pub base_url: BaseUrl,
pub access_key: String,
pub secret_key: String,
pub ignore_cert_check: Option<bool>,
pub ssl_cert_file: Option<PathBuf>,
pub client: Client,
}
impl TestContext {
pub fn new_from_env() -> Self {
let run_on_ci: bool = std::env::var("CI")
.unwrap_or("false".into())
.parse()
.unwrap_or(false);
if run_on_ci {
let host = std::env::var("SERVER_ENDPOINT").unwrap();
let access_key = std::env::var("ACCESS_KEY").unwrap();
let secret_key = std::env::var("SECRET_KEY").unwrap();
let secure = std::env::var("ENABLE_HTTPS").is_ok();
let value = std::env::var("SSL_CERT_FILE").unwrap();
let mut ssl_cert_file = None;
if !value.is_empty() {
ssl_cert_file = Some(Path::new(&value));
}
let ignore_cert_check = std::env::var("IGNORE_CERT_CHECK").is_ok();
let region = std::env::var("SERVER_REGION").ok();
let mut base_url: BaseUrl = host.parse().unwrap();
base_url.https = secure;
if let Some(v) = region {
base_url.region = v;
}
let static_provider = StaticProvider::new(&access_key, &secret_key, None);
let client = Client::new(
base_url.clone(),
Some(Box::new(static_provider)),
ssl_cert_file,
Some(ignore_cert_check),
)
.unwrap();
Self {
base_url,
access_key,
secret_key,
ignore_cert_check: Some(ignore_cert_check),
ssl_cert_file: ssl_cert_file.map(PathBuf::from),
client,
}
} else {
const DEFAULT_SERVER_ENDPOINT: &str = "https://play.min.io/";
const DEFAULT_ACCESS_KEY: &str = "minioadmin";
const DEFAULT_SECRET_KEY: &str = "minioadmin";
const DEFAULT_ENABLE_HTTPS: &str = "true";
const DEFAULT_SSL_CERT_FILE: &str = "./tests/public.crt";
const DEFAULT_IGNORE_CERT_CHECK: &str = "false";
const DEFAULT_SERVER_REGION: &str = "";
let host: String =
std::env::var("SERVER_ENDPOINT").unwrap_or(DEFAULT_SERVER_ENDPOINT.to_string());
log::debug!("SERVER_ENDPOINT={}", host);
let access_key: String =
std::env::var("ACCESS_KEY").unwrap_or(DEFAULT_ACCESS_KEY.to_string());
log::debug!("ACCESS_KEY={}", access_key);
let secret_key: String =
std::env::var("SECRET_KEY").unwrap_or(DEFAULT_SECRET_KEY.to_string());
log::debug!("SECRET_KEY=*****");
let secure: bool = std::env::var("ENABLE_HTTPS")
.unwrap_or(DEFAULT_ENABLE_HTTPS.to_string())
.parse()
.unwrap_or(false);
log::debug!("ENABLE_HTTPS={}", secure);
let ssl_cert: String =
std::env::var("SSL_CERT_FILE").unwrap_or(DEFAULT_SSL_CERT_FILE.to_string());
log::debug!("SSL_CERT_FILE={}", ssl_cert);
let ssl_cert_file: PathBuf = ssl_cert.into();
let ignore_cert_check: bool = std::env::var("IGNORE_CERT_CHECK")
.unwrap_or(DEFAULT_IGNORE_CERT_CHECK.to_string())
.parse()
.unwrap_or(true);
log::debug!("IGNORE_CERT_CHECK={}", ignore_cert_check);
let region: String =
std::env::var("SERVER_REGION").unwrap_or(DEFAULT_SERVER_REGION.to_string());
log::debug!("SERVER_REGION={:?}", region);
let mut base_url: BaseUrl = host.parse().unwrap();
base_url.https = secure;
base_url.region = region;
let static_provider = StaticProvider::new(&access_key, &secret_key, None);
let client = Client::new(
base_url.clone(),
Some(Box::new(static_provider)),
Some(&*ssl_cert_file),
Some(ignore_cert_check),
)
.unwrap();
Self {
base_url,
access_key,
secret_key,
ignore_cert_check: Some(ignore_cert_check),
ssl_cert_file: Some(ssl_cert_file),
client,
}
}
}
/// Creates a temporary bucket with an automatic cleanup guard.
///
/// This function creates a new bucket and returns both its name and a `CleanupGuard`
/// that ensures the bucket is deleted when it goes out of scope.
///
/// # Returns
/// A tuple containing:
/// - `String` - The name of the created bucket.
/// - `CleanupGuard` - A guard that automatically deletes the bucket when dropped.
///
/// # Example
/// ```rust
/// let (bucket_name, guard) = client.create_bucket_helper().await;
/// println!("Created temporary bucket: {}", bucket_name);
/// // The bucket will be removed when `guard` is dropped.
/// ```
pub async fn create_bucket_helper(&self) -> (String, CleanupGuard) {
let bucket_name = rand_bucket_name();
let _resp = self.client.make_bucket(&bucket_name).send().await.unwrap();
let guard = CleanupGuard::new(&self.client, &bucket_name);
(bucket_name, guard)
}
}

48
common/src/utils.rs Normal file
View File

@@ -0,0 +1,48 @@
// MinIO Rust Library for Amazon S3 Compatible Cloud Storage
// Copyright 2025 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use http::{Response as HttpResponse, StatusCode};
use minio::s3::error::Error;
use rand::distributions::{Alphanumeric, DistString};
pub fn rand_bucket_name() -> String {
Alphanumeric
.sample_string(&mut rand::thread_rng(), 8)
.to_lowercase()
}
pub fn rand_object_name() -> String {
Alphanumeric.sample_string(&mut rand::thread_rng(), 8)
}
pub async fn get_bytes_from_response(v: Result<reqwest::Response, Error>) -> bytes::Bytes {
match v {
Ok(r) => match r.bytes().await {
Ok(b) => b,
Err(e) => panic!("{:?}", e),
},
Err(e) => panic!("{:?}", e),
}
}
pub fn get_response_from_bytes(bytes: bytes::Bytes) -> reqwest::Response {
let http_response = HttpResponse::builder()
.status(StatusCode::OK) // You can customize the status if needed
.header("Content-Type", "application/octet-stream")
.body(bytes)
.expect("Failed to build HTTP response");
reqwest::Response::try_from(http_response).expect("Failed to convert to reqwest::Response")
}