Skip to content

Commit

Permalink
Merge pull request #1 from kianmeng/fix-typos
Browse files Browse the repository at this point in the history
Fix typos
  • Loading branch information
ahirner authored Nov 28, 2022
2 parents 47da014 + 4e24acf commit b3b5957
Show file tree
Hide file tree
Showing 7 changed files with 11 additions and 11 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ By default, the app's settings are persisted after closing.
The purpose of this crate is to study tradeoffs regarding model inference, native GUIs and
video decoding approaches, in Rust :crab:.

There are a couple of Todos will make `InFur` more intersting beyond exploring
There are a couple of Todos will make `InFur` more interesting beyond exploring
production-readiness as now:

- [ ] GATify `type Output` in `trait Processor`
Expand Down
2 changes: 1 addition & 1 deletion ff-video/src/decoder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ enum StreamInfoTerm {
Final(String),
}

/// Deliver infos about an ffmpeg video process trhough its stderr file
/// Deliver infos about an ffmpeg video process through its stderr file
///
/// The receiver can be read until satisfying info was obtained and dropped anytime.
/// By default, frame updates and other infos are logged as tracing event.
Expand Down
2 changes: 1 addition & 1 deletion ff-video/src/parse.rs
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ impl InfoParser {

/// Blanket implementation for lines of ffmpeg's default stderr bytes.
pub(crate) trait FFMpegLineIter: Iterator {
/// Emit lines on \n, \r (CR) or both but never emtpy lines.
/// Emit lines on \n, \r (CR) or both but never empty lines.
fn ffmpeg_lines(self) -> FFMpegLines<Self>
where
Self: Sized,
Expand Down
4 changes: 2 additions & 2 deletions infur-test-gen/build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ fn run_ffmpeg_synth(
.expect("synthesizing video couldn't start, do you have ffmpeg in PATH?")
.wait()
.expect("synthesizing video didn't finish");
assert!(status.success(), "synthesizing videos didn't finish succesfully");
assert!(status.success(), "synthesizing videos didn't finish successfully");
}

fn download(source_url: &str, target_file: impl AsRef<Path>) {
Expand Down Expand Up @@ -85,7 +85,7 @@ pub fn main() {
}

// models
// segementation model, see: https://github.com/onnx/models/tree/main/vision/object_detection_segmentation/fcn
// segmentation model, see: https://github.com/onnx/models/tree/main/vision/object_detection_segmentation/fcn
let fcn_resnet50_12_int8 = gen_root.join("models").join("fcn-resnet50-12-int8.onnx");
download("https://github.com/onnx/models/raw/main/vision/object_detection_segmentation/fcn/model/fcn-resnet50-12-int8.onnx",
&fcn_resnet50_12_int8);
Expand Down
2 changes: 1 addition & 1 deletion infur/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ eframe = { version = "0.19", features = ["wgpu", "default_fonts"], default-featu
serde = { version = "1", features = ["derive"] }
fast_image_resize = { version = "1" }
# need onnxruntime .14 for 0-dim input tolerance (not in .13),
# then furhtermore need master to resolve ndarray with tract-core...
# then furthermore need master to resolve ndarray with tract-core...
onnxruntime = { git = "https://github.com/nbigaouette/onnxruntime-rs" }
once_cell = "1"
image-ext = { path = "../image-ext" }
Expand Down
6 changes: 3 additions & 3 deletions infur/src/decode_predict.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ use onnxruntime::ndarray::Array3;
///
/// adapted from: <http://www.color-hex.com/color-palette/23381>
/// and: <http://www.color-hex.com/color-palette/52402>
const COLORS_PALATTE: [(u8, u8, u8); 20] = [
const COLORS_PALETTE: [(u8, u8, u8); 20] = [
(75, 180, 60),
(75, 25, 230),
(25, 225, 255),
Expand All @@ -31,7 +31,7 @@ const COLORS_PALATTE: [(u8, u8, u8); 20] = [

fn color_code(klass: usize, alpha: f32) -> Color32 {
// todo: pre-transform COLORS into linear space
let (r, g, b) = COLORS_PALATTE[klass % COLORS_PALATTE.len()];
let (r, g, b) = COLORS_PALETTE[klass % COLORS_PALETTE.len()];
Color32::from_rgba_unmultiplied(r, g, b, (alpha * 255.0f32) as u8)
}

Expand Down Expand Up @@ -92,7 +92,7 @@ mod test {

#[test]
fn color_2() {
let c = COLORS_PALATTE[2];
let c = COLORS_PALETTE[2];
assert_eq!(color_code(2, 0.5), Color32::from_rgba_unmultiplied(c.0, c.1, c.2, 127));
}

Expand Down
4 changes: 2 additions & 2 deletions infur/src/predict_onnx.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ struct ImageSession<'s> {

/// ONNX session with pre-processing u8 images.
impl<'s> ImageSession<'s> {
/// Constract an `ImageSession` by inferring some required image input meta data.
/// Construct an `ImageSession` by inferring some required image input meta data.
///
/// The basic assumption is that images are passed as batches at position 0.
///
Expand Down Expand Up @@ -375,7 +375,7 @@ mod test {
let mut tensors = vec![];
m.advance(&img, &mut tensors).unwrap();

assert_eq!(tensors.len(), 2, "this sementation model should return two tensors");
assert_eq!(tensors.len(), 2, "this segmentation model should return two tensors");
assert_eq!(tensors[0].shape(), [21, 240, 320], "out should be 21 classes upscaled");
assert_eq!(tensors[1].shape(), [21, 240, 320], "aux should be 21 classes upscaled");
}
Expand Down

0 comments on commit b3b5957

Please sign in to comment.