-
Notifications
You must be signed in to change notification settings - Fork 7
/
Copy pathml-nn.tex
36 lines (32 loc) · 1.92 KB
/
ml-nn.tex
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
% ml - NNs
\newcommand{\neurons}{z_1,\dots,z_M} % vector of neurons
\newcommand{\hidz}{\mathbf{z}} % vector of hidden activations
\newcommand{\biasb}{\mathbf{b}} % bias vector
\newcommand{\biasc}{c} % bias in output
\newcommand{\wtw}{\mathbf{w}} % weight vector (general)
\newcommand{\Wmat}{\mathbf{W}} % weight vector (general)
\newcommand{\wtu}{\mathbf{u}} % weight vector of output neuron
% deeplearning - regularization
\newcommand{\Oreg}{\mathnormal{R}_{reg}(\theta|X,y)} % regularized objective function
\newcommand{\Ounreg}{\mathnormal{R}_{emp}(\theta|X,y)} % unconstrained objective function
\newcommand{\Pen}{\Omega(\theta)} % penalty
\newcommand{\Oregweight}{\mathnormal{R}_{reg}(w|X,y)} % regularized objective function with weight
\newcommand{\Oweight}{\mathnormal{R}_{emp}(w|X,y)} % unconstrained objective function with weight
\newcommand{\Oweighti}{\mathnormal{R}_{emp}(w_i|X,y)} % unconstrained objective function with weight w_i
\newcommand{\Oweightopt}{\mathnormal{J}(w^*|X,y)} % unconstrained objective function withoptimal weight
\newcommand{\Oopt}{\hat{\mathnormal{J}}(\theta|X,y)} % optimal objective function
\newcommand{\Odropout}{\mathnormal{J}(\theta, \mu|X,y)} % dropout objective function
% deeplearning - optimization
\newcommand{\Loss}{L(y, f(\xv, \thetav))}
\newcommand{\Lmomentumnest}{L(\yi, f(x^{(i)}, \thetav + \varphi \nub))} % momentum risk
\newcommand{\Lmomentumtilde}{L(\yi, f(x^{(i)}, \tilde{\thetav}))} % Nesterov momentum risk
\newcommand{\Lmomentum}{L(\yi, f(x^{(i)}, \thetav))}
\newcommand{\Hess}{\mathbf{H}}
\newcommand{\nub}{\bm{\nu}}
% deeplearning - autoencoders
\newcommand{\uauto}{L(x,g(f(x)))} % undercomplete autoencoder objective function
\newcommand{\dauto}{L(x,g(f(\tilde{x})))} % denoising autoencoder objective function
% deeplearning - adversarials
\newcommand{\deltab}{\bm{\delta}}
\newcommand{\Lossdeltai}{L(\yi, f(\xi + \deltab|\thetav))}
\newcommand{\Lossdelta}{L(y, f(\xv + \deltab| \thetav))}