diff --git a/src/linalg/traits/svd.rs b/src/linalg/traits/svd.rs index 8c807714..cee33a0e 100644 --- a/src/linalg/traits/svd.rs +++ b/src/linalg/traits/svd.rs @@ -48,9 +48,7 @@ pub struct SVD> { pub V: M, /// Singular values of the original matrix pub s: Vec, - /// m: usize, - /// n: usize, /// Tolerance tol: T, diff --git a/src/naive_bayes/bernoulli.rs b/src/naive_bayes/bernoulli.rs index 33f00bd4..4be62d56 100644 --- a/src/naive_bayes/bernoulli.rs +++ b/src/naive_bayes/bernoulli.rs @@ -258,7 +258,7 @@ impl BernoulliNBDistribution { /// * `x` - training data. /// * `y` - vector with target values (classes) of length N. /// * `priors` - Optional vector with prior probabilities of the classes. If not defined, - /// priors are adjusted according to the data. + /// priors are adjusted according to the data. /// * `alpha` - Additive (Laplace/Lidstone) smoothing parameter. /// * `binarize` - Threshold for binarizing. fn fit, Y: Array1>( @@ -402,10 +402,10 @@ impl, Y: Arr { /// Fits BernoulliNB with given data /// * `x` - training data of size NxM where N is the number of samples and M is the number of - /// features. + /// features. /// * `y` - vector with target values (classes) of length N. /// * `parameters` - additional parameters like class priors, alpha for smoothing and - /// binarizing threshold. + /// binarizing threshold. pub fn fit(x: &X, y: &Y, parameters: BernoulliNBParameters) -> Result { let distribution = if let Some(threshold) = parameters.binarize { BernoulliNBDistribution::fit( @@ -427,6 +427,7 @@ impl, Y: Arr /// Estimates the class labels for the provided data. /// * `x` - data of shape NxM where N is number of data points to estimate and M is number of features. + /// /// Returns a vector of size N with class estimates. pub fn predict(&self, x: &X) -> Result { if let Some(threshold) = self.binarize { diff --git a/src/naive_bayes/categorical.rs b/src/naive_bayes/categorical.rs index 4afb9c7e..b60ee0d3 100644 --- a/src/naive_bayes/categorical.rs +++ b/src/naive_bayes/categorical.rs @@ -363,7 +363,7 @@ impl, Y: Array1> Predictor for Categ impl, Y: Array1> CategoricalNB { /// Fits CategoricalNB with given data /// * `x` - training data of size NxM where N is the number of samples and M is the number of - /// features. + /// features. /// * `y` - vector with target values (classes) of length N. /// * `parameters` - additional parameters like alpha for smoothing pub fn fit(x: &X, y: &Y, parameters: CategoricalNBParameters) -> Result { @@ -375,6 +375,7 @@ impl, Y: Array1> CategoricalNB { /// Estimates the class labels for the provided data. /// * `x` - data of shape NxM where N is number of data points to estimate and M is number of features. + /// /// Returns a vector of size N with class estimates. pub fn predict(&self, x: &X) -> Result { self.inner.as_ref().unwrap().predict(x) diff --git a/src/naive_bayes/gaussian.rs b/src/naive_bayes/gaussian.rs index aff996be..e774fdc9 100644 --- a/src/naive_bayes/gaussian.rs +++ b/src/naive_bayes/gaussian.rs @@ -175,7 +175,7 @@ impl GaussianNBDistribution { /// * `x` - training data. /// * `y` - vector with target values (classes) of length N. /// * `priors` - Optional vector with prior probabilities of the classes. If not defined, - /// priors are adjusted according to the data. + /// priors are adjusted according to the data. pub fn fit, Y: Array1>( x: &X, y: &Y, @@ -317,7 +317,7 @@ impl, Y: Arr { /// Fits GaussianNB with given data /// * `x` - training data of size NxM where N is the number of samples and M is the number of - /// features. + /// features. /// * `y` - vector with target values (classes) of length N. /// * `parameters` - additional parameters like class priors. pub fn fit(x: &X, y: &Y, parameters: GaussianNBParameters) -> Result { @@ -328,6 +328,7 @@ impl, Y: Arr /// Estimates the class labels for the provided data. /// * `x` - data of shape NxM where N is number of data points to estimate and M is number of features. + /// /// Returns a vector of size N with class estimates. pub fn predict(&self, x: &X) -> Result { self.inner.as_ref().unwrap().predict(x) diff --git a/src/naive_bayes/mod.rs b/src/naive_bayes/mod.rs index 0a58a063..31cdd46d 100644 --- a/src/naive_bayes/mod.rs +++ b/src/naive_bayes/mod.rs @@ -89,6 +89,7 @@ impl, Y: Array1, D: NBDistribution Result { let y_classes = self.distribution.classes(); diff --git a/src/naive_bayes/multinomial.rs b/src/naive_bayes/multinomial.rs index 2d6c437c..e00965ed 100644 --- a/src/naive_bayes/multinomial.rs +++ b/src/naive_bayes/multinomial.rs @@ -208,7 +208,7 @@ impl MultinomialNBDistribution { /// * `x` - training data. /// * `y` - vector with target values (classes) of length N. /// * `priors` - Optional vector with prior probabilities of the classes. If not defined, - /// priors are adjusted according to the data. + /// priors are adjusted according to the data. /// * `alpha` - Additive (Laplace/Lidstone) smoothing parameter. pub fn fit, Y: Array1>( x: &X, @@ -345,10 +345,10 @@ impl, Y: Array { /// Fits MultinomialNB with given data /// * `x` - training data of size NxM where N is the number of samples and M is the number of - /// features. + /// features. /// * `y` - vector with target values (classes) of length N. /// * `parameters` - additional parameters like class priors, alpha for smoothing and - /// binarizing threshold. + /// binarizing threshold. pub fn fit(x: &X, y: &Y, parameters: MultinomialNBParameters) -> Result { let distribution = MultinomialNBDistribution::fit(x, y, parameters.alpha, parameters.priors)?; @@ -358,6 +358,7 @@ impl, Y: Array /// Estimates the class labels for the provided data. /// * `x` - data of shape NxM where N is number of data points to estimate and M is number of features. + /// /// Returns a vector of size N with class estimates. pub fn predict(&self, x: &X) -> Result { self.inner.as_ref().unwrap().predict(x) diff --git a/src/neighbors/knn_classifier.rs b/src/neighbors/knn_classifier.rs index d18620c9..137143e0 100644 --- a/src/neighbors/knn_classifier.rs +++ b/src/neighbors/knn_classifier.rs @@ -261,6 +261,7 @@ impl, Y: Array1, D: Distance Result { let mut result = Y::zeros(x.shape().0); diff --git a/src/neighbors/knn_regressor.rs b/src/neighbors/knn_regressor.rs index 3d17bf24..b49743f8 100644 --- a/src/neighbors/knn_regressor.rs +++ b/src/neighbors/knn_regressor.rs @@ -246,6 +246,7 @@ impl, Y: Array1, D: Distance>> /// Predict the target for the provided data. /// * `x` - data of shape NxM where N is number of data points to estimate and M is number of features. + /// /// Returns a vector of size N with estimates. pub fn predict(&self, x: &X) -> Result { let mut result = Y::zeros(x.shape().0); diff --git a/src/optimization/first_order/gradient_descent.rs b/src/optimization/first_order/gradient_descent.rs index e9d625ce..0be7222f 100644 --- a/src/optimization/first_order/gradient_descent.rs +++ b/src/optimization/first_order/gradient_descent.rs @@ -6,13 +6,13 @@ use crate::optimization::first_order::{FirstOrderOptimizer, OptimizerResult}; use crate::optimization::line_search::LineSearchMethod; use crate::optimization::{DF, F}; -/// +/// Gradient Descent optimization algorithm pub struct GradientDescent { /// Maximum number of iterations pub max_iter: usize, - /// + /// Relative tolerance for the gradient norm pub g_rtol: f64, - /// + /// Absolute tolerance for the gradient norm pub g_atol: f64, } diff --git a/src/optimization/first_order/lbfgs.rs b/src/optimization/first_order/lbfgs.rs index 6466007a..c9e27c2f 100644 --- a/src/optimization/first_order/lbfgs.rs +++ b/src/optimization/first_order/lbfgs.rs @@ -11,25 +11,25 @@ use crate::optimization::first_order::{FirstOrderOptimizer, OptimizerResult}; use crate::optimization::line_search::LineSearchMethod; use crate::optimization::{DF, F}; -/// +/// Limited-memory BFGS optimization algorithm pub struct LBFGS { /// Maximum number of iterations pub max_iter: usize, - /// + /// TODO: Add documentation pub g_rtol: f64, - /// + /// TODO: Add documentation pub g_atol: f64, - /// + /// TODO: Add documentation pub x_atol: f64, - /// + /// TODO: Add documentation pub x_rtol: f64, - /// + /// TODO: Add documentation pub f_abstol: f64, - /// + /// TODO: Add documentation pub f_reltol: f64, - /// + /// TODO: Add documentation pub successive_f_tol: usize, - /// + /// TODO: Add documentation pub m: usize, } diff --git a/src/optimization/first_order/mod.rs b/src/optimization/first_order/mod.rs index bbaeea62..cf7e4f91 100644 --- a/src/optimization/first_order/mod.rs +++ b/src/optimization/first_order/mod.rs @@ -1,6 +1,6 @@ -/// +/// Gradient descent optimization algorithm pub mod gradient_descent; -/// +/// Limited-memory BFGS optimization algorithm pub mod lbfgs; use std::clone::Clone; diff --git a/src/optimization/mod.rs b/src/optimization/mod.rs index 6a9fa332..83ca2493 100644 --- a/src/optimization/mod.rs +++ b/src/optimization/mod.rs @@ -1,11 +1,11 @@ -/// +/// first order optimization algorithms pub mod first_order; -/// +/// line search algorithms pub mod line_search; -/// +/// Function f(x) = y pub type F<'a, T, X> = dyn for<'b> Fn(&'b X) -> T + 'a; -/// +/// Function df(x) pub type DF<'a, X> = dyn for<'b> Fn(&'b mut X, &'b X) + 'a; /// Function order