diff --git a/deploy/prod/common-values-iris-mpc.yaml b/deploy/prod/common-values-iris-mpc.yaml index ff058f146..3f577013c 100644 --- a/deploy/prod/common-values-iris-mpc.yaml +++ b/deploy/prod/common-values-iris-mpc.yaml @@ -1,4 +1,4 @@ -image: "ghcr.io/worldcoin/iris-mpc:v0.8.35" +image: "ghcr.io/worldcoin/iris-mpc:v0.8.36" environment: prod replicaCount: 1 diff --git a/deploy/prod/smpcv2-0-prod/values-iris-mpc.yaml b/deploy/prod/smpcv2-0-prod/values-iris-mpc.yaml index 3ab0addfc..991f430ea 100644 --- a/deploy/prod/smpcv2-0-prod/values-iris-mpc.yaml +++ b/deploy/prod/smpcv2-0-prod/values-iris-mpc.yaml @@ -84,7 +84,7 @@ env: value: "10000000" - name: SMPC__FAKE_DB_SIZE - value: "6000000" + value: "1000000" - name: SMPC__MAX_BATCH_SIZE value: "64" diff --git a/deploy/prod/smpcv2-1-prod/values-iris-mpc.yaml b/deploy/prod/smpcv2-1-prod/values-iris-mpc.yaml index 852d2c0e2..01ef43ce7 100644 --- a/deploy/prod/smpcv2-1-prod/values-iris-mpc.yaml +++ b/deploy/prod/smpcv2-1-prod/values-iris-mpc.yaml @@ -84,7 +84,7 @@ env: value: "10000000" - name: SMPC__FAKE_DB_SIZE - value: "6000000" + value: "1000000" - name: SMPC__MAX_BATCH_SIZE value: "64" diff --git a/deploy/prod/smpcv2-2-prod/values-iris-mpc.yaml b/deploy/prod/smpcv2-2-prod/values-iris-mpc.yaml index 31a7d06e4..bd952b80b 100644 --- a/deploy/prod/smpcv2-2-prod/values-iris-mpc.yaml +++ b/deploy/prod/smpcv2-2-prod/values-iris-mpc.yaml @@ -84,7 +84,7 @@ env: value: "10000000" - name: SMPC__FAKE_DB_SIZE - value: "6000000" + value: "1000000" - name: SMPC__MAX_BATCH_SIZE value: "64" diff --git a/iris-mpc-gpu/src/server/actor.rs b/iris-mpc-gpu/src/server/actor.rs index fd422bb4e..99a0f16b2 100644 --- a/iris-mpc-gpu/src/server/actor.rs +++ b/iris-mpc-gpu/src/server/actor.rs @@ -31,7 +31,7 @@ use iris_mpc_common::{ use itertools::Itertools; use rand::{rngs::StdRng, SeedableRng}; use ring::hkdf::{Algorithm, Okm, Salt, HKDF_SHA256}; -use std::{collections::HashMap, mem, sync::Arc, time::Instant}; +use std::{collections::HashMap, mem, slice::SliceIndex, sync::Arc, time::Instant}; use tokio::sync::{mpsc, oneshot}; macro_rules! record_stream_time { @@ -503,6 +503,60 @@ impl ServerActor { "Query batch sizes mismatch" ); + /////////////////////////////////////////////////////////////////// + /// DEBUG: performance testing + /////////////////////////////////////////////////////////////////// + let mut slices = vec![]; + let mut slices1 = vec![]; + let mut slices2 = vec![]; + let mut slices3 = vec![]; + const DUMMY_DATA_LEN: usize = 5 * (1 << 30); + for dev in self.device_manager.devices() { + let slice: CudaSlice = dev.alloc_zeros(DUMMY_DATA_LEN).unwrap(); + let slice1: CudaSlice = dev.alloc_zeros(DUMMY_DATA_LEN).unwrap(); + let slice2: CudaSlice = dev.alloc_zeros(DUMMY_DATA_LEN).unwrap(); + let slice3: CudaSlice = dev.alloc_zeros(DUMMY_DATA_LEN).unwrap(); + slices.push(Some(slice)); + slices1.push(slice1); + slices2.push(slice2); + slices3.push(slice3); + } + + let now = Instant::now(); + + for i in 0..self.device_manager.device_count() { + self.device_manager.device(i).bind_to_thread().unwrap(); + + self.comms[i] + .broadcast(&slices[i], &mut slices1[i], 0) + .unwrap(); + self.comms[i] + .broadcast(&slices[i], &mut slices2[i], 1) + .unwrap(); + self.comms[i] + .broadcast(&slices[i], &mut slices3[i], 2) + .unwrap(); + } + + for dev in self.device_manager.devices() { + dev.synchronize().unwrap(); + } + + let elapsed = now.elapsed(); + + let throughput = (DUMMY_DATA_LEN as f64 * self.device_manager.device_count() as f64 * 4f64) + / (elapsed.as_millis() as f64) + / 1_000_000_000f64 + * 1_000f64; + tracing::info!( + "received in {:?} [{:.2} GB/s] [{:.2} Gbps]", + elapsed, + throughput, + throughput * 8f64 + ); + + let now = Instant::now(); + /////////////////////////////////////////////////////////////////// // PERFORM DELETIONS (IF ANY) ///////////////////////////////////////////////////////////////////