Skip to content

Commit

Permalink
page lock all dbs in parallel
Browse files Browse the repository at this point in the history
  • Loading branch information
eaypek-tfh committed Jan 24, 2025
1 parent f92d639 commit c2e7d7a
Show file tree
Hide file tree
Showing 4 changed files with 26 additions and 30 deletions.
2 changes: 1 addition & 1 deletion deploy/stage/smpcv2-0-stage/values-iris-mpc.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ env:
value: "even_odd_binary_output_16k"

- name: SMPC__LOAD_CHUNKS_PARALLELISM
value: "128"
value: "256"

- name: SMPC__LOAD_CHUNKS_DEFAULT_CLIENT
value: "true"
Expand Down
2 changes: 1 addition & 1 deletion deploy/stage/smpcv2-1-stage/values-iris-mpc.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ env:
value: "even_odd_binary_output_16k"

- name: SMPC__LOAD_CHUNKS_PARALLELISM
value: "128"
value: "256"

- name: SMPC__LOAD_CHUNKS_DEFAULT_CLIENT
value: "true"
Expand Down
2 changes: 1 addition & 1 deletion deploy/stage/smpcv2-2-stage/values-iris-mpc.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ env:
value: "even_odd_binary_output_16k"

- name: SMPC__LOAD_CHUNKS_PARALLELISM
value: "128"
value: "256"

- name: SMPC__LOAD_CHUNKS_DEFAULT_CLIENT
value: "true"
Expand Down
50 changes: 23 additions & 27 deletions iris-mpc/src/bin/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ use aws_smithy_runtime_api::client::dns::{DnsFuture, ResolveDns};
use axum::{response::IntoResponse, routing::get, Router};
use clap::Parser;
use eyre::{eyre, Context};
use futures::{stream::BoxStream, StreamExt};
use futures::{future, stream::BoxStream, StreamExt};
use hickory_resolver::{
config::{ResolverConfig, ResolverOpts},
TokioAsyncResolver,
Expand Down Expand Up @@ -1179,36 +1179,32 @@ async fn server_main(config: Config) -> eyre::Result<()> {
S3Store::new(db_chunks_s3_client.clone(), db_chunks_bucket_name.clone());

tracing::info!("Page-lock host memory");
let left_codes = actor.left_code_db_slices.code_gr.clone();
let right_codes = actor.right_code_db_slices.code_gr.clone();
let left_masks = actor.left_mask_db_slices.code_gr.clone();
let right_masks = actor.right_mask_db_slices.code_gr.clone();
let device_manager_clone = actor.device_manager.clone();
let page_lock_handle = spawn_blocking(move || {
for db in [&left_codes, &right_codes] {
let dbs = [
(actor.left_code_db_slices.code_gr.clone(), IRIS_CODE_LENGTH),
(actor.right_code_db_slices.code_gr.clone(), IRIS_CODE_LENGTH),
(actor.left_mask_db_slices.code_gr.clone(), MASK_CODE_LENGTH),
(actor.right_mask_db_slices.code_gr.clone(), MASK_CODE_LENGTH),
];
let mut page_lock_handles = Vec::new();
for (db, code_length) in dbs {
let max_db_size = config.max_db_size;
// spawn_blocking moves its closure to a worker thread
let device_manager_clone = actor.device_manager.clone();
let handle = spawn_blocking(move || {
register_host_memory(
device_manager_clone.clone(),
db,
config.max_db_size,
IRIS_CODE_LENGTH,
device_manager_clone,
&db,
max_db_size,
code_length,
);
}

for db in [&left_masks, &right_masks] {
register_host_memory(
device_manager_clone.clone(),
db,
config.max_db_size,
MASK_CODE_LENGTH,
);
}
});
let mut page_lock_handle = Some(page_lock_handle);

});
page_lock_handles.push(handle);
}
let mut page_lock_handles = Some(page_lock_handles);
tokio::runtime::Handle::current().block_on(async {
let mut now = Instant::now();
if config.page_lock_at_beginning {
page_lock_handle.take().unwrap().await?;
future::join_all(page_lock_handles.take().unwrap()).await;
tracing::info!("Page-locking took {:?}", now.elapsed());
}
now = Instant::now();
Expand Down Expand Up @@ -1373,7 +1369,7 @@ async fn server_main(config: Config) -> eyre::Result<()> {

if !config.page_lock_at_beginning {
tracing::info!("Waiting for page-lock to finish");
page_lock_handle.take().unwrap().await?;
future::join_all(page_lock_handles.take().unwrap()).await;
}

tracing::info!(
Expand Down

0 comments on commit c2e7d7a

Please sign in to comment.