From 4ff99a6a96577b41da19efa27b66bfb7f9e58960 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Sun, 31 Oct 2021 12:00:43 +0100 Subject: [PATCH] chore/layer/nll: fmt, cleanup, asserts --- coaster/tests/shared_memory_specs.rs | 4 +-- .../layers/loss/negative_log_likelihood.rs | 2 +- juice/tests/layer_specs.rs | 31 +++++++++---------- 3 files changed, 18 insertions(+), 19 deletions(-) diff --git a/coaster/tests/shared_memory_specs.rs b/coaster/tests/shared_memory_specs.rs index c75827187..48885378e 100644 --- a/coaster/tests/shared_memory_specs.rs +++ b/coaster/tests/shared_memory_specs.rs @@ -2,10 +2,10 @@ use coaster as co; #[cfg(test)] mod shared_memory_spec { - use super::co::prelude::*; - use super::co::tensor::Error; #[cfg(features = "cuda")] use super::co::frameworks::native::flatbox::FlatBox; + use super::co::prelude::*; + use super::co::tensor::Error; #[cfg(features = "cuda")] fn write_to_memory(mem: &mut FlatBox, data: &[T]) { diff --git a/juice/src/layers/loss/negative_log_likelihood.rs b/juice/src/layers/loss/negative_log_likelihood.rs index de7450d1c..ce08bd3b8 100644 --- a/juice/src/layers/loss/negative_log_likelihood.rs +++ b/juice/src/layers/loss/negative_log_likelihood.rs @@ -166,4 +166,4 @@ impl Into for NegativeLogLikelihoodConfig { fn into(self) -> LayerType { LayerType::NegativeLogLikelihood(self) } -} \ No newline at end of file +} diff --git a/juice/tests/layer_specs.rs b/juice/tests/layer_specs.rs index 1f22eb066..a7f7147d7 100644 --- a/juice/tests/layer_specs.rs +++ b/juice/tests/layer_specs.rs @@ -386,8 +386,8 @@ mod layer_spec { .is_err()); } - use juice::layers::SequentialConfig; use juice::layers::NegativeLogLikelihoodConfig; + use juice::layers::SequentialConfig; #[test] fn nll_basic() { @@ -401,19 +401,15 @@ mod layer_spec { let nll_layer_cfg = NegativeLogLikelihoodConfig { num_classes: 10 }; let nll_cfg = LayerConfig::new("nll", nll_layer_cfg); classifier_cfg.add_layer(nll_cfg); - let mut network = Layer::from_config( - native_backend.clone(), - &LayerConfig::new("foo", classifier_cfg), - ); - let labels_data = (0..(BATCH_SIZE * KLASS_COUNT)) - .into_iter() - .map(|x| x as f32) - .collect::>(); + let mut network = Layer::from_config(native_backend.clone(), &LayerConfig::new("foo", classifier_cfg)); let desc = [BATCH_SIZE, KLASS_COUNT]; let desc: &[usize] = &desc[..]; let mut input = SharedTensor::::new(&desc); let mem = input.write_only(native_backend.device()).unwrap(); - let input_data = (0..(KLASS_COUNT * BATCH_SIZE)).into_iter().map(|x| x as f32 * 3.77).collect::>(); + let input_data = (0..(KLASS_COUNT * BATCH_SIZE)) + .into_iter() + .map(|x| x as f32 * 3.77) + .collect::>(); let input_data = &input_data[..]; juice::util::write_to_memory(mem, input_data); @@ -435,11 +431,14 @@ mod layer_spec { std::sync::Arc::new(std::sync::RwLock::new(labels)), ]; - let output = network.forward(input.as_slice()); - - let x = output[0].read().unwrap(); - dbg!(&x); - let out = x.read(native_backend.device()).unwrap(); - dbg!(out.as_slice::()); + let out = network.forward(input.as_slice()); + assert_eq!(out.len(), 1); + let out = &out[0]; + let out = out.read().unwrap(); + assert_eq!(out.desc().dims(), &vec![BATCH_SIZE, 1]); + let out = out.read(native_backend.device()).unwrap(); + let out_mem = out.as_slice::(); + assert_eq!(out_mem.len(), BATCH_SIZE); + assert!(out_mem[0] < 0_f32); } }