diff --git a/.github/workflows/CompatHelper.yml b/.github/workflows/CompatHelper.yml new file mode 100644 index 0000000..dd821e6 --- /dev/null +++ b/.github/workflows/CompatHelper.yml @@ -0,0 +1,19 @@ +name: CompatHelper + +on: + schedule: + - cron: '00 00 * * *' + +jobs: + CompatHelper: + runs-on: ubuntu-latest + steps: + - uses: julia-actions/setup-julia@latest + with: + version: 1.3 + - name: Pkg.add("CompatHelper") + run: julia -e 'using Pkg; Pkg.add("CompatHelper")' + - name: CompatHelper.main() + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: julia -e 'using CompatHelper; CompatHelper.main()' diff --git a/.github/workflows/TagBot.yml b/.github/workflows/TagBot.yml index b838c67..f389611 100644 --- a/.github/workflows/TagBot.yml +++ b/.github/workflows/TagBot.yml @@ -1,14 +1,17 @@ name: TagBot on: - issue_comment: # THIS BIT IS NEW + issue_comment: types: - created workflow_dispatch: + inputs: + lookback: + default: 3 +permissions: + contents: write jobs: TagBot: - # THIS 'if' LINE IS NEW if: github.event_name == 'workflow_dispatch' || github.actor == 'JuliaTagBot' - # NOTHING BELOW HAS CHANGED runs-on: ubuntu-latest steps: - uses: JuliaRegistries/TagBot@v1 diff --git a/LICENSE b/LICENSE index a6d9bff..15c0b3d 100644 --- a/LICENSE +++ b/LICENSE @@ -1,5 +1,5 @@ MIT License -Copyright (c) 2019, Marco Congedo, CNRS, Grenobe, France: +Copyright (c) 2019-2022, Marco Congedo, CNRS, Grenobe, France: https://sites.google.com/site/marcocongedo/home Permission is hereby granted, free of charge, to any person obtaining a copy diff --git a/Manifest - Back up.toml b/Manifest - Back up.toml new file mode 100644 index 0000000..dcef4a6 --- /dev/null +++ b/Manifest - Back up.toml @@ -0,0 +1,23 @@ +# This file is machine-generated - editing it directly is not advised + +[[Libdl]] +uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" + +[[LinearAlgebra]] +deps = ["Libdl"] +uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" + +[[Random]] +deps = ["Serialization"] +uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" + +[[Serialization]] +uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" + +[[SparseArrays]] +deps = ["LinearAlgebra", "Random"] +uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" + +[[Statistics]] +deps = ["LinearAlgebra", "SparseArrays"] +uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" diff --git a/Manifest.toml b/Manifest.toml index dcef4a6..3988a00 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -1,23 +1,44 @@ # This file is machine-generated - editing it directly is not advised -[[Libdl]] +julia_version = "1.7.2" +manifest_format = "2.0" + +[[deps.Artifacts]] +uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" + +[[deps.CompilerSupportLibraries_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae" + +[[deps.Libdl]] uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" -[[LinearAlgebra]] -deps = ["Libdl"] +[[deps.LinearAlgebra]] +deps = ["Libdl", "libblastrampoline_jll"] uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -[[Random]] -deps = ["Serialization"] +[[deps.OpenBLAS_jll]] +deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"] +uuid = "4536629a-c528-5b80-bd46-f80d51c5b363" + +[[deps.Random]] +deps = ["SHA", "Serialization"] uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" -[[Serialization]] +[[deps.SHA]] +uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" + +[[deps.Serialization]] uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" -[[SparseArrays]] +[[deps.SparseArrays]] deps = ["LinearAlgebra", "Random"] uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" -[[Statistics]] +[[deps.Statistics]] deps = ["LinearAlgebra", "SparseArrays"] uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" + +[[deps.libblastrampoline_jll]] +deps = ["Artifacts", "Libdl", "OpenBLAS_jll"] +uuid = "8e850b90-86db-534c-a0d3-1478176c7d93" diff --git a/Project.toml b/Project.toml index 8fe2695..736165a 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "PosDefManifold" uuid = "f45a3650-5c51-11e9-1e9a-133aa5e309cf" authors = ["Marco Congedo "] -version = "0.4.9" +version = "0.5.0" [deps] LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" diff --git a/build/riemannianGeometry.jl b/build/riemannianGeometry.jl index 5ccbee8..08dce7e 100644 --- a/build/riemannianGeometry.jl +++ b/build/riemannianGeometry.jl @@ -1424,7 +1424,7 @@ function geometricMean(𝐏::ℍVector; maxiter, iter, conv, oldconv = 500, 1, 0., maxpos ⏩ && k>=thr*4 && thr > 1 ? threaded=true : threaded=false isempty(w) ? v=[] : v = _getWeights(w, ✓w, k) - init == nothing ? M = mean(logEuclidean, 𝐏; w=v, ✓w=false, ⏩=⏩) : M = ℍ(init) + init === nothing ? M = mean(logEuclidean, 𝐏; w=v, ✓w=false, ⏩=⏩) : M = ℍ(init) tol==0 ? tolerance = √eps(real(type))*1e2 : tolerance = tol 💡 = similar(M, type) if threaded 𝐐 = similar(𝐏) end @@ -1564,7 +1564,7 @@ function logdet0Mean(𝐏::Union{ℍVector, 𝔻Vector}; maxiter, iter, conv, oldconv, l = 500, 1, 0., maxpos, k/2 ⏩ && k>=thr*4 && thr > 1 ? threaded=true : threaded=false isempty(w) ? v=[] : v = _getWeights(w, ✓w, k) - init == nothing ? M = mean(logEuclidean, 𝐏; w=v, ✓w=false, ⏩=⏩) : M = 𝕋(init) + init === nothing ? M = mean(logEuclidean, 𝐏; w=v, ✓w=false, ⏩=⏩) : M = 𝕋(init) tol==0 ? tolerance = √eps(real(type))*1e2 : tolerance = tol 💡 = similar(M, type) if threaded 𝐐 = similar(𝐏) end @@ -1701,7 +1701,7 @@ function wasMean(𝐏::ℍVector; maxiter, iter, conv, oldconv = 500, 1, 0., maxpos ⏩ && k>=thr*4 && thr > 1 ? threaded=true : threaded=false isempty(w) ? v=[] : v = _getWeights(w, ✓w, k) - init == nothing ? M = generalizedMean(𝐏, 0.5; w=v, ✓w=false, ⏩=⏩) : M = ℍ(init) + init === nothing ? M = generalizedMean(𝐏, 0.5; w=v, ✓w=false, ⏩=⏩) : M = ℍ(init) tol==0 ? tolerance = √eps(real(type))*1e2 : tolerance = tol 💡 = similar(M, type) if threaded 𝐐 = similar(𝐏) end @@ -1876,7 +1876,7 @@ function powerMean(𝐏::ℍVector, p::Real; sqrtn, maxiter, iter, conv, oldconv, r = √n, 500, 1, 0., maxpos, -0.375/absp ⏩ && k>=thr*4 && thr > 1 ? threaded=true : threaded=false isempty(w) ? v=[] : v = _getWeights(w, ✓w, k) - init == nothing ? M = generalizedMean(𝐏, p; w=v, ✓w=false, ⏩=⏩) : M = ℍ(init) + init === nothing ? M = generalizedMean(𝐏, p; w=v, ✓w=false, ⏩=⏩) : M = ℍ(init) p<0 ? X=ℍ(M^(0.5)) : X=ℍ(M^(-0.5)) 💡, H, 𝒫 = similar(X, type), similar(X, type), similar(𝐏) p<0 ? 𝒫=[inv(P) for P in 𝐏] : 𝒫=𝐏 diff --git a/docs/build/MainModule/index.html b/docs/build/MainModule/index.html index a4aba28..aaf5dae 100644 --- a/docs/build/MainModule/index.html +++ b/docs/build/MainModule/index.html @@ -1,5 +1,5 @@ -MainModule (PosDefManifold.jl) · PosDefManifold

MainModule (PosDefManifold.jl)

This is the main unit containing the PosDefManifold module.

It uses the following standard Julia packages:

using
LinearAlgebra
Statistics

Examples in some units of PosDefManifold also uses the Plots package. Take a look at this tutorial for an introduction to data plotting with Julia.

The main module does not contains functions, but it declares all constant, types and aliases of Julia functions used in all units.

Contents
constants
aliases
types
tips & tricks

constants

constantvaluenumeric value
sqrt2√21.4142135623730951
sqrt2inv1/√20.7071067811865475
golden(√5+1)/21.618033988749...
goldeninv(√5-1)/20.618033988749...
maxpos1e15100000000000000

aliases

aliasJulia functionin Packagetab-completitionREPL support
𝚺sumBase\bfSigma
𝛍meanStatistics\bfmu
𝕄MatrixBase\bbM
𝔻DiagonalLinearAlgebra\bbD
HermitianLinearAlgebra\bbH
𝕃LowerTriangularLinearAlgebra\bbH

All packages above are built-in julia packages.

types

Metric::Enumerated type

@enum Metric begin
+MainModule (PosDefManifold.jl) · PosDefManifold

MainModule (PosDefManifold.jl)

This is the main unit containing the PosDefManifold module.

It uses the following standard Julia packages:

using
LinearAlgebra
Statistics

Examples in some units of PosDefManifold also uses the Plots package. Take a look at this tutorial for an introduction to data plotting with Julia.

The main module does not contains functions, but it declares all constant, types and aliases of Julia functions used in all units.

Contents
constants
aliases
types
tips & tricks

constants

constantvaluenumeric value
sqrt2√21.4142135623730951
sqrt2inv1/√20.7071067811865475
golden(√5+1)/21.618033988749...
goldeninv(√5-1)/20.618033988749...
maxpos1e15100000000000000

aliases

aliasJulia functionin Packagetab-completitionREPL support
𝚺sumBase\bfSigma
𝛍meanStatistics\bfmu
𝕄MatrixBase\bbM
𝔻DiagonalLinearAlgebra\bbD
HermitianLinearAlgebra\bbH
𝕃LowerTriangularLinearAlgebra\bbH

All packages above are built-in julia packages.

types

Metric::Enumerated type

@enum Metric begin
   Euclidean    =1
   invEuclidean =2
   ChoEuclidean =3
@@ -10,11 +10,10 @@
   Jeffrey      =8
   VonNeumann   =9
   Wasserstein  =10
-end

Riemannian manipulations are defined for a given metric (see metrics). An instance for this type is requested as an argument in many functions contained in the riemannianGeometry.jl unit in order to specify the metric.

 ## Example
- # generate a 15x15 symmetric positive definite matrix
+end

Riemannian manipulations are defined for a given metric (see metrics). An instance for this type is requested as an argument in many functions contained in the riemannianGeometry.jl unit in order to specify the metric, for example:

 # generate a 15x15 symmetric positive definite matrix
  P=randP(15)
  # distance from P to the identity matrix according to the logdet0 metric
- d=distance(logdet0, P)

If you want to work consistently with a specific metric, you may want to declare in your script a global variable such as

global metric=logdet0  or  global metric=Metric(Int(logdet0)),

and then pass metric as argument in all your computations, e.g., referring to the above example,

d=distance(metric, P).

To know what is the current metric, you can get it as a string using:

s=string(metric)

To see the list of metrics in type Metric use:

instances(Metric)

Array of Matrices types

𝕄Vector type

𝕄Vector=Vector{𝕄}

This is a vector of general Matrix matrices, alias of MatrixVector. Julia sees it as: Array{Array{T,2} where T,1}. See aliases for the 𝕄 symbol and typecasting matrices for the use of matrices in PosDefManifold.

Nota bene

This object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension.

See dim, typeofMatrix

𝕄Vector₂ type

𝕄Vector₂=Vector{𝕄Vector}

This is a vector of 𝕄Vector type objects, i.e., a vector of vectors of matrices. It is the alias of MatrixVector₂. Julia sees it as: Array{Array{Array{T,2} where T,1},1}.

Nota bene

This object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension. However the several 𝕄Vector objects it holds do not need to have the same length.

See dim, typeofMatrix

𝔻Vector type

𝔻Vector=Vector{𝔻}

This is a vector of Diagonal matrices, alias of DiagonalVector. Julia sees it as: Array{Diagonal,1}. See aliases for the 𝔻 symbol and typecasting matrices for the use of Diagonal matrices in PosDefManifold.

Nota bene

This object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension.

See dim, typeofMatrix

𝔻Vector₂ type

𝔻Vector₂=Vector{𝔻Vector}

This is a vector of 𝔻Vector type objects, i.e., a vector of vectors of Diagonal matrices. It is the alias of DiagonalVector₂. Julia sees it as: Array{Array{Diagonal,1},1}.

Nota bene

This object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension. However the several 𝔻Vector objects it holds do not need to have the same length.

See dim, typeofMatrix

𝕃Vector type

𝕃Vector=Vector{𝕃}

This is a vector of LowerTriangular matrices, alias of LowerTriangularVector. Julia sees it as: Array{LowerTriangular,1}. See aliases for the 𝕃 symbol and typecasting matrices for the use of LowerTriangular matrices in PosDefManifold.

Nota bene

This object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension.

See dim, typeofMatrix

𝕃Vector₂ type

𝕃Vector₂=Vector{𝕃Vector}

This is a vector of 𝕃Vector type objects, i.e., a vector of vectors of LowerTriangular matrices. It is the alias of LowerTriangularVector₂. Julia sees it as: Array{Array{LowerTriangular,1},1}.

Nota bene

This object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension. However the several 𝕃Vector objects it holds do not need to have the same length.

See dim, typeofMatrix

ℍVector type

ℍVector=Vector{ℍ}

This is a vector of Hermitian matrices, alias of HermitianVector. Julia sees is at: Array{Hermitian,1}.See aliases for the ℍ symbol and typecasting matrices for the use of Hermitian matrices in PosDefManifold.

Nota bene

This object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension.

See dim, typeofMatrix

ℍVector₂ type

`ℍVector₂=Vector{ℍVector}`

This is a vector of ℍVector type objects, i.e., a vector of vectors of Hermitian matrices. It is the alias of HermitianVector₂. Julia sees it as: Array{Array{Hermitian,1},1}.

Nota bene

This object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension. However the several ℍVector objects it holds do not need to have the same length.

See dim, typeofMatrix

RealOrComplex type

RealOrComplex=Union{Real, Complex}

This is the Union of Real and Complex types.

AnyMatrix type

AnyMatrix=Union{𝔻{T}, 𝕃{T}, ℍ{T}, 𝕄{T}} where T<:RealOrComplex

This is the Union of real or complex Diagonal, LowerTriangular, Hermitian and Matrix types. It is often used in the definition of functions.

See aliases

AnyMatrixVector type

AnyMatrixVector=Union{𝕄Vector, 𝔻Vector, 𝕃Vector, ℍVector}

This is the Union of 𝕄Vector, 𝔻Vector, 𝕃Vector and ℍVector. It is often used in the definition of functions. See Array of Matrices types.

AnyMatrixVector₂ type

AnyMatrixVector₂=Union{𝕄Vector₂, 𝔻Vector₂, 𝕃Vector₂, ℍVector₂}

This is the Union of 𝕄Vector₂, 𝔻Vector₂, 𝕃Vector₂, ℍVector₂. It is often used in the definition of functions. See Array of Matrices types.

tips & tricks

typecasting matrices

Several functions in PosDefManifold implement multiple dispatch and can handle several kinds of matrices as input, however the core functions for manipulating objects on the Riemannian manifold of positive definite matrices act by definition on positive definite matrices only. Those matrices must therefore be either symmetric positive definite (SPD, real) or Hermitian positive definite (HPD, complex). Such matrices are uniformly identified in PosDefManifold as being of the Hermitian type, using the standard LinearAlgebra package. The alias is used consistently in the code (see aliases). If the input is not flagged as Hermitian, the functions restricting the input to positive definite matrices will not be accessible.

Example

julia> using LinearAlgebra
+ d=distance(logdet0, P)

If you want to work consistently with a specific metric, you may want to declare in your script a global variable such as

global metric=logdet0  or  global metric=Metric(Int(logdet0)),

and then pass metric as argument in all your computations, e.g., referring to the above example,

d=distance(metric, P).

To know what is the current metric, you can get it as a string using:

s=string(metric)

To see the list of metrics in type Metric use:

instances(Metric)

Array of Matrices types

𝕄Vector type

𝕄Vector=Vector{𝕄}

This is a vector of general Matrix matrices, alias of MatrixVector. Julia sees it as: Array{Array{T,2} where T,1}. See aliases for the 𝕄 symbol and typecasting matrices for the use of matrices in PosDefManifold.

Nota bene

This object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension.

See dim, typeofMatrix

𝕄Vector₂ type

𝕄Vector₂=Vector{𝕄Vector}

This is a vector of 𝕄Vector type objects, i.e., a vector of vectors of matrices. It is the alias of MatrixVector₂. Julia sees it as: Array{Array{Array{T,2} where T,1},1}.

Nota bene

This object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension. However the several 𝕄Vector objects it holds do not need to have the same length.

See dim, typeofMatrix

𝔻Vector type

𝔻Vector=Vector{𝔻}

This is a vector of Diagonal matrices, alias of DiagonalVector. Julia sees it as: Array{Diagonal,1}. See aliases for the 𝔻 symbol and typecasting matrices for the use of Diagonal matrices in PosDefManifold.

Nota bene

This object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension.

See dim, typeofMatrix

𝔻Vector₂ type

𝔻Vector₂=Vector{𝔻Vector}

This is a vector of 𝔻Vector type objects, i.e., a vector of vectors of Diagonal matrices. It is the alias of DiagonalVector₂. Julia sees it as: Array{Array{Diagonal,1},1}.

Nota bene

This object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension. However the several 𝔻Vector objects it holds do not need to have the same length.

See dim, typeofMatrix

𝕃Vector type

𝕃Vector=Vector{𝕃}

This is a vector of LowerTriangular matrices, alias of LowerTriangularVector. Julia sees it as: Array{LowerTriangular,1}. See aliases for the 𝕃 symbol and typecasting matrices for the use of LowerTriangular matrices in PosDefManifold.

Nota bene

This object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension.

See dim, typeofMatrix

𝕃Vector₂ type

𝕃Vector₂=Vector{𝕃Vector}

This is a vector of 𝕃Vector type objects, i.e., a vector of vectors of LowerTriangular matrices. It is the alias of LowerTriangularVector₂. Julia sees it as: Array{Array{LowerTriangular,1},1}.

Nota bene

This object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension. However the several 𝕃Vector objects it holds do not need to have the same length.

See dim, typeofMatrix

ℍVector type

ℍVector=Vector{ℍ}

This is a vector of Hermitian matrices, alias of HermitianVector. Julia sees is at: Array{Hermitian,1}.See aliases for the ℍ symbol and typecasting matrices for the use of Hermitian matrices in PosDefManifold.

Nota bene

This object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension.

See dim, typeofMatrix

ℍVector₂ type

`ℍVector₂=Vector{ℍVector}`

This is a vector of ℍVector type objects, i.e., a vector of vectors of Hermitian matrices. It is the alias of HermitianVector₂. Julia sees it as: Array{Array{Hermitian,1},1}.

Nota bene

This object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension. However the several ℍVector objects it holds do not need to have the same length.

See dim, typeofMatrix

RealOrComplex type

RealOrComplex=Union{Real, Complex}

This is the Union of Real and Complex types.

AnyMatrix type

AnyMatrix=Union{𝔻{T}, 𝕃{T}, ℍ{T}, 𝕄{T}} where T<:RealOrComplex

This is the Union of real or complex Diagonal, LowerTriangular, Hermitian and Matrix types. It is often used in the definition of functions.

See aliases

AnyMatrixVector type

AnyMatrixVector=Union{𝕄Vector, 𝔻Vector, 𝕃Vector, ℍVector}

This is the Union of 𝕄Vector, 𝔻Vector, 𝕃Vector and ℍVector. It is often used in the definition of functions. See Array of Matrices types.

AnyMatrixVector₂ type

AnyMatrixVector₂=Union{𝕄Vector₂, 𝔻Vector₂, 𝕃Vector₂, ℍVector₂}

This is the Union of 𝕄Vector₂, 𝔻Vector₂, 𝕃Vector₂, ℍVector₂. It is often used in the definition of functions. See Array of Matrices types.

tips & tricks

typecasting matrices

Several functions in PosDefManifold implement multiple dispatch and can handle several kinds of matrices as input, however the core functions for manipulating objects on the Riemannian manifold of positive definite matrices act by definition on positive definite matrices only. Those matrices must therefore be either symmetric positive definite (SPD, real) or Hermitian positive definite (HPD, complex). Such matrices are uniformly identified in PosDefManifold as being of the Hermitian type, using the standard LinearAlgebra package. The alias is used consistently in the code (see aliases). If the input is not flagged as Hermitian, the functions restricting the input to positive definite matrices will not be accessible.

Example

julia> using LinearAlgebra
 
 julia> f(S::Hermitian)=S*S'
 f (generic function with 1 method)
@@ -34,12 +33,12 @@
 julia> f(H)
 ERROR: MethodError: no method matching f(::Array{Float64,2})
 Closest candidates are:
-  f(::Hermitian) at none:1

If you construct a positive definite matrix and it is not flagged, you can do so simply by typecasting it, that is, passing as argument to the functions Hermitian(P) instead of just P. The alias can be used for short, i.e., ℍ(P). Continuing the example above:

julia> f(ℍ(H))  # this way it works, equivalent to f(Hermitian(H))
+  f(::Hermitian) at none:1

If you construct a positive definite matrix and it is not flagged, you can do so simply by typecasting it, that is, passing as argument to the functions Hermitian(P) instead of just P. The alias can be used for short, i.e., ℍ(P). Continuing the example above:

julia> f(ℍ(H))  # this way it works, equivalent to f(Hermitian(H))
 3×3 Array{Float64,2}:
  2.47388  3.74948  4.54381
  3.74948  6.4728   6.21635
- 4.54381  6.21635  8.91504

Be careful: Hermitian(P) will construct and Hermitian matrix from the argument. If the matrix argument is not symmetric (if real) or Hermitian (if complex) it will be made so by copying the transpose (if real) or complex conjugate and transpose (if complex) of a triangular part into the other. See Hermitian.

If you want to construct an ℍVector type from, say, two Hermitian matrices P and Q, don't write A=[P, Q], but rather A=ℍVector([P, Q]). In fact, the first is seen by Julia as

2-element Array{Hermitian{Float64,Array{Float64,2}},1},

while the latter as

2-element Array{Hermitian,1},

which is the type expected in all functions taking an ℍVector type as argument.

Other functions act on generic matrices (of type Matrix). This is seen by Julia as Array{T,2} where T. Keep in mind that the functions writing on the argument matrix such as normalizeCol! will give an error if you pass an Hermitian matrix, since Julia does not allow writing on non-diagonal elements of those matrices. In this case typecast it in another object using the Matrix type; suppose H is Hermitian, you would use for example:

julia> X=Matrix(H)
+ 4.54381  6.21635  8.91504

Be careful: Hermitian(P) will construct and Hermitian matrix from the argument. If the matrix argument is not symmetric (if real) or Hermitian (if complex) it will be made so by copying the transpose (if real) or complex conjugate and transpose (if complex) of a triangular part into the other. See Hermitian.

If you want to construct an ℍVector type from, say, two Hermitian matrices P and Q, don't write A=[P, Q], but rather A=ℍVector([P, Q]). In fact, the first is seen by Julia as

2-element Array{Hermitian{Float64,Array{Float64,2}},1},

while the latter as

2-element Array{Hermitian,1},

which is the type expected in all functions taking an ℍVector type as argument.

Other functions act on generic matrices (of type Matrix). This is seen by Julia as Array{T,2} where T. Keep in mind that the functions writing on the argument matrix such as normalizeCol! will give an error if you pass an Hermitian matrix, since Julia does not allow writing on non-diagonal elements of those matrices. In this case typecast it in another object using the Matrix type; suppose H is Hermitian, you would use for example:

julia> X=Matrix(H)
 julia> normalizeCol!(X, 1)
 julia> norm(X[:, 1])
-1.0

Some more examples:

  • Typecasting Adjoint matrices:
```Matrix(X')```
  • here is how to get an Hermitian matrix out of the diagonal part of an Hermitian matrix H:
```Hermitian(Matrix(Diagonal(H)))```
  • here is how to get a LowerTriangular matrix out of an Hermitian matrix H:
```LowerTriangular(Matrix(H))```

For example, you can use this to pass a full inter-distance matrix to the laplacian function to obtain the Laplacian matrix.

A useful function is typeofMatrix. For example, the following line typecasts matrix M to the type of matrix P and put the result in A:

A=typeofMatrix(P)(M)

Threads

Some functions in PosDefManifold explicitly call BLAS routines for optimal performnce. This is reported in the help section of the concerned functions. Most functions calls BLAS routine implicitly via Julia. You can set the number of threads the BLAS library should use by:

using LinearAlgebra
-BLAS.set_num_threads(n)

where n is the number of threads. By default, PosDefManifold reserves to BLAS all CPU threads available on your computer (given by the output of Sys.CPU_THREADS). The number of threads used by Julia for multi-threaded computations is given by the output of function Threads.nthreads(). In Windows this latter number of threads is set to half the available threads. In Linux and OSX defaults to one and is controlled by an environment variable, i.e.,

export JULIA_NUM_THREADS=4.

In Linux, working with the Atom IDE, you also have to set to global the field found in Atom under Settings(or Preferences)/julia-client/Settings/Julia Options/Number of Threads.

In Windows, set the desired number of threads in the settings of the julia-client Juno package.

See for example this post, this post and julia doc on threads.

Notice that PosDefManifold features many multi-threaded functions and these may allow a gain in computation time only if Julia is instructed to use at least two threads.

+1.0

Some more examples:

  • Typecasting Adjoint matrices:
Matrix(X')
  • here is how to get an Hermitian matrix out of the diagonal part of an Hermitian matrix H:
Hermitian(Matrix(Diagonal(H)))
  • here is how to get a LowerTriangular matrix out of an Hermitian matrix H:
LowerTriangular(Matrix(H))

For example, you can use this to pass a full inter-distance matrix to the laplacian function to obtain the Laplacian matrix.

A useful function is typeofMatrix. For example, the following line typecasts matrix M to the type of matrix P and put the result in A:

A=typeofMatrix(P)(M)

Threads

Some functions in PosDefManifold explicitly call BLAS routines for optimal performnce. This is reported in the help section of the concerned functions. Most functions calls BLAS routine implicitly via Julia. You can set the number of threads the BLAS library should use by:

using LinearAlgebra
+BLAS.set_num_threads(n)

where n is the number of threads. By default, PosDefManifold reserves to BLAS all CPU threads available on your computer (given by the output of Sys.CPU_THREADS). The number of threads used by Julia for multi-threaded computations is given by the output of function Threads.nthreads(). In Windows this latter number of threads is set to half the available threads. In Linux and OSX defaults to one and is controlled by an environment variable, i.e.,

export JULIA_NUM_THREADS=4.

In Linux, working with the Atom IDE, you also have to set to global the field found in Atom under Settings(or Preferences)/julia-client/Settings/Julia Options/Number of Threads.

In Windows, set the desired number of threads in the settings of the julia-client Juno package.

See for example this post, this post and julia doc on threads.

Notice that PosDefManifold features many multi-threaded functions and these may allow a gain in computation time only if Julia is instructed to use at least two threads.

diff --git a/docs/build/assets/documenter.js b/docs/build/assets/documenter.js index 22f0f9a..a1ada46 100644 --- a/docs/build/assets/documenter.js +++ b/docs/build/assets/documenter.js @@ -1,15 +1,15 @@ // Generated by Documenter.jl requirejs.config({ paths: { - 'highlight-julia': 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.15.10/languages/julia.min', - 'headroom': 'https://cdnjs.cloudflare.com/ajax/libs/headroom/0.10.3/headroom.min', + 'highlight-julia': 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.0.1/languages/julia.min', + 'headroom': 'https://cdnjs.cloudflare.com/ajax/libs/headroom/0.12.0/headroom.min', 'jqueryui': 'https://cdnjs.cloudflare.com/ajax/libs/jqueryui/1.12.1/jquery-ui.min', - 'katex-auto-render': 'https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.11.1/contrib/auto-render.min', - 'jquery': 'https://cdnjs.cloudflare.com/ajax/libs/jquery/3.4.1/jquery.min', - 'headroom-jquery': 'https://cdnjs.cloudflare.com/ajax/libs/headroom/0.10.3/jQuery.headroom.min', - 'katex': 'https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.11.1/katex.min', - 'highlight': 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.15.10/highlight.min', - 'highlight-julia-repl': 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.15.10/languages/julia-repl.min', + 'katex-auto-render': 'https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.13.11/contrib/auto-render.min', + 'jquery': 'https://cdnjs.cloudflare.com/ajax/libs/jquery/3.6.0/jquery.min', + 'headroom-jquery': 'https://cdnjs.cloudflare.com/ajax/libs/headroom/0.12.0/jQuery.headroom.min', + 'katex': 'https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.13.11/katex.min', + 'highlight': 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.0.1/highlight.min', + 'highlight-julia-repl': 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.0.1/languages/julia-repl.min', }, shim: { "highlight-julia": { @@ -65,11 +65,72 @@ $(document).ready(function() { }) //////////////////////////////////////////////////////////////////////////////// -require(['jquery', 'highlight', 'highlight-julia', 'highlight-julia-repl'], function($, hljs) { +require(['jquery', 'highlight', 'highlight-julia', 'highlight-julia-repl'], function($) { $(document).ready(function() { - hljs.initHighlighting(); + hljs.highlightAll(); }) +}) +//////////////////////////////////////////////////////////////////////////////// +require([], function() { +function addCopyButtonCallbacks() { + for (const el of document.getElementsByTagName("pre")) { + const button = document.createElement("button"); + button.classList.add("copy-button", "fas", "fa-copy"); + el.appendChild(button); + + const success = function () { + button.classList.add("success", "fa-check"); + button.classList.remove("fa-copy"); + }; + + const failure = function () { + button.classList.add("error", "fa-times"); + button.classList.remove("fa-copy"); + }; + + button.addEventListener("click", function () { + copyToClipboard(el.innerText).then(success, failure); + + setTimeout(function () { + button.classList.add("fa-copy"); + button.classList.remove("success", "fa-check", "fa-times"); + }, 5000); + }); + } +} + +function copyToClipboard(text) { + // clipboard API is only available in secure contexts + if (window.navigator && window.navigator.clipboard) { + return window.navigator.clipboard.writeText(text); + } else { + return new Promise(function (resolve, reject) { + try { + const el = document.createElement("textarea"); + el.textContent = text; + el.style.position = "fixed"; + el.style.opacity = 0; + document.body.appendChild(el); + el.select(); + document.execCommand("copy"); + + resolve(); + } catch (err) { + reject(err); + } finally { + document.body.removeChild(el); + } + }); + } +} + +if (document.readyState === "loading") { + document.addEventListener("DOMContentLoaded", addCopyButtonCallbacks); +} else { + addCopyButtonCallbacks(); +} + }) //////////////////////////////////////////////////////////////////////////////// require(['jquery', 'headroom', 'headroom-jquery'], function($, Headroom) { @@ -208,6 +269,10 @@ $(document).ready(function() { $('#documenter-themepicker option').each(function(i,e) { e.selected = (e.value === theme); }) + } else { + $('#documenter-themepicker option').each(function(i,e) { + e.selected = $("html").hasClass(`theme--${e.value}`); + }) } } }) diff --git a/docs/build/assets/search.js b/docs/build/assets/search.js index 42a273f..1a51454 100644 --- a/docs/build/assets/search.js +++ b/docs/build/assets/search.js @@ -1,9 +1,9 @@ // Generated by Documenter.jl requirejs.config({ paths: { - 'lunr': 'https://cdnjs.cloudflare.com/ajax/libs/lunr.js/2.3.6/lunr.min', - 'lodash': 'https://cdnjs.cloudflare.com/ajax/libs/lodash.js/4.17.15/lodash.min', - 'jquery': 'https://cdnjs.cloudflare.com/ajax/libs/jquery/3.4.1/jquery.min', + 'lunr': 'https://cdnjs.cloudflare.com/ajax/libs/lunr.js/2.3.9/lunr.min', + 'lodash': 'https://cdnjs.cloudflare.com/ajax/libs/lodash.js/4.17.21/lodash.min', + 'jquery': 'https://cdnjs.cloudflare.com/ajax/libs/jquery/3.6.0/jquery.min', } }); //////////////////////////////////////////////////////////////////////////////// @@ -46,7 +46,7 @@ $(document).ready(function() { }) // list below is the lunr 2.1.3 list minus the intersect with names(Base) - // (all, any, get, in, is, which) and (do, else, for, let, where, while, with) + // (all, any, get, in, is, only, which) and (do, else, for, let, where, while, with) // ideally we'd just filter the original list but it's not available as a variable lunr.stopWordFilter = lunr.generateStopWordFilter([ 'a', @@ -112,7 +112,6 @@ $(document).ready(function() { 'off', 'often', 'on', - 'only', 'or', 'other', 'our', @@ -182,7 +181,7 @@ $(document).ready(function() { var store = {} documenterSearchIndex['docs'].forEach(function(e) { - store[e.location] = {title: e.title, category: e.category} + store[e.location] = {title: e.title, category: e.category, page: e.page} }) $(function(){ @@ -196,14 +195,14 @@ $(document).ready(function() { q.term(t.toString(), { fields: ["title"], boost: 100, - usePipeline: false, + usePipeline: true, editDistance: 0, wildcard: lunr.Query.wildcard.NONE }) q.term(t.toString(), { fields: ["title"], boost: 10, - usePipeline: false, + usePipeline: true, editDistance: 2, wildcard: lunr.Query.wildcard.NONE }) @@ -222,7 +221,11 @@ $(document).ready(function() { data = store[result.ref] link = $(''+data.title+'') link.attr('href', documenterBaseURL+'/'+result.ref) - cat = $('('+data.category+')') + if (data.category != "page"){ + cat = $('('+data.category+', '+data.page+')') + } else { + cat = $('('+data.category+')') + } li = $('
  • ').append(link).append(" ").append(cat) searchresults.append(li) }) diff --git a/docs/build/index.html b/docs/build/index.html index 5b62ec2..8fd6892 100644 --- a/docs/build/index.html +++ b/docs/build/index.html @@ -1,2 +1,2 @@ -PosDefManifold Documentation · PosDefManifold

    PosDefManifold Documentation

    Requirements

    Julia version ≥ 1.3

    Installation

    Execute the following command in Julia's REPL:

    ]add PosDefManifold

    To obtain the latest development version execute instead

    ]add PosDefManifold#master

    About the Author

    Marco Congedo is a research scientist of CNRS (Centre National de la Recherche Scientifique), working in Grenoble, France.

    Overview

    Figure 1

    Riemannian geometry studies smooth manifolds, multi-dimensional curved spaces with peculiar geometries endowed with non-Euclidean metrics. In these spaces Riemannian geometry allows the definition of angles, geodesics (shortest path between two points), distances between points, centers of mass of several points, etc.

    In this package we are concerned with the manifold P of positive definite matrices, either symmetric positive definite or Hermitian positive definite.

    In several fields of research such as computer vision and brain-computer interface, treating data in the P manifold has allowed the introduction of machine learning approaches with remarkable characteristics, such as simplicity of use, excellent classification accuracy, as demonstrated by the winning score obtained in six international data classification competitions, and the ability to operate transfer learning (Congedo et al., 2017)🎓).

    For a formal introduction to the P manifold the reader is referred to the monography written by Bhatia (2007)🎓.

    For an introduction to Riemannian geometry and an overview of mathematical tools implemented in this package, see Intro to Riemannian Geometry in this documentation.

    For starting using this package, browse the code units listed here below and execute the many code examples you will find therein. The core functions are contained in unit riemannianGeometry.jl.

    Code units

    PosDefManifold includes six code units (.jl files):

    UnitDescription
    MainModule (PosDefManifold.jl)Main module, constants, types, aliases, tips & tricks
    riemannianGeometry.jlThe fundamental unit collecting all functions acting on the P manifold
    linearAlgebra.jlCollection of linear algebra routines
    statistics.jlCollection of statistics routines
    signalProcessing.jlCollection of signal processing routines
    test.jlUnit performing all tests

    Contents

    Index

    +PosDefManifold Documentation · PosDefManifold

    PosDefManifold Documentation

    Requirements

    Julia version ≥ 1.3

    Installation

    Execute the following command in Julia's REPL:

    ]add PosDefManifold

    To obtain the latest development version execute instead

    ]add PosDefManifold#master

    About the Author

    Marco Congedo is a Research Director of CNRS (Centre National de la Recherche Scientifique), working in Grenoble, France.

    Overview

    Figure 1

    Riemannian geometry studies smooth manifolds, multi-dimensional curved spaces with peculiar geometries endowed with non-Euclidean metrics. In these spaces Riemannian geometry allows the definition of angles, geodesics (shortest path between two points), distances between points, centers of mass of several points, etc.

    In this package we are concerned with the manifold P of positive definite matrices, either symmetric positive definite or Hermitian positive definite.

    In several fields of research such as computer vision and brain-computer interface, treating data in the P manifold has allowed the introduction of machine learning approaches with remarkable characteristics, such as simplicity of use, excellent classification accuracy, as demonstrated by the winning score obtained in six international data classification competitions, and the ability to operate transfer learning (Congedo et al., 2017)🎓).

    For a formal introduction to the P manifold the reader is referred to the monography written by Bhatia (2007)🎓.

    For an introduction to Riemannian geometry and an overview of mathematical tools implemented in this package, see Intro to Riemannian Geometry in this documentation.

    For starting using this package, browse the code units listed here below and execute the many code examples you will find therein. The core functions are contained in unit riemannianGeometry.jl.

    Code units

    PosDefManifold includes six code units (.jl files):

    UnitDescription
    MainModule (PosDefManifold.jl)Main module, constants, types, aliases, tips & tricks
    riemannianGeometry.jlThe fundamental unit collecting all functions acting on the P manifold
    linearAlgebra.jlCollection of linear algebra routines
    statistics.jlCollection of statistics routines
    signalProcessing.jlCollection of signal processing routines
    test.jlUnit performing all tests

    Contents

    Index

    diff --git a/docs/build/introToRiemannianGeometry/index.html b/docs/build/introToRiemannianGeometry/index.html index 8f6c6f1..8b0f552 100644 --- a/docs/build/introToRiemannianGeometry/index.html +++ b/docs/build/introToRiemannianGeometry/index.html @@ -1,2 +1,2 @@ -Intro to Riemannian Geometry · PosDefManifold

    Intro to Riemannian Geometry

    The study of appropriate distance measures for positive definite matrices has recently grown very fast, driven by practical problems in radar data processing, image processing, computer vision, shape analysis, medical imaging (especially diffusion MRI and Brain-Computer Interface), sensor networks, elasticity, mechanics, numerical analysis and machine learning (e.g., see references in Congedo et al., 2017a)🎓.

    In many applications the observed data can be conveniently summarized by positive definite matrices, which are either symmetric positive definite (SPD: real) or Hermitian Positive Definite (HPD: complex). For example, those may be some form of the data covariance matrix in the time, frequency or time-frequency domain, or autocorrelation matrices, kernels, slices of tensors, density matrices, elements of a search space, etc. Positive definite matrices are naturally treated as points on a smooth Riemannian manifold allowing useful operations such as interpolation, smoothing, filtering, approximation, averaging, signal detection and classification. Such operations are the object of the present PosDefManifold library.

    More formally, this Julia library treats operations on the metric space $($P$, δ^2)$ of n・n positive definite matrices endowed with a distance or symmetric divergence $δ($P x P$)→[0, ∞]$. Several matrix distances or matrix divergences $δ$ are considered. Using some of them, the most important one being the Fisher metric, we define a Riemannian manifold. In mathematics, this is the subject of Riemannian geometry and information geometry.

    Note that throughout this library the word 'metric' is used loosely for referring to the actual Riemannian metric on the tangent space and to the resulting distance or to general symmetric divergence acting on P, regardless the fact that we are dealing with a metric in the strict sense and that it induces or not a Riemannian geometry in P. This is done for convenience of exposition, since in practice those 'metrics' in PosDefManifold may be used interchangeably.

    Riemannian manifolds

    Here are some important definitions:

    A smooth manifold in differential geometry is a topological space that is locally similar to the Euclidean space and has a globally defined differential structure.

    The tangent space at point $G$ is the vector space containing the tangent vectors to all curves on the manifold passing through $G$ (Fig. 1).

    A smooth Riemannian manifold is equipped with an inner product on the tangent space (a Riemannian metric) defined at each point and varying smoothly from point to point. For manifold P the tangent space is the space of symmetric or Hermitian matrices.

    Thus, a Riemannian metric turns the metric space $($P$, δ^2)$ into a Riemannian manifold. This is the case, for example, of the Fisher metric, which has a fundamental role in the manifolds of positive definite matrices and of the Wasserstein metric, fundamental in optimal transport theory.

    Figure 1 Figure 1. Schematic illustration of the Riemannian manifold of positive definite matrices. Left: geodesic relying points $P$ and $Q$ passing through its-mid-point (mean) $G$ (green curve), tangent space at point $G$ with tangent vectors to geodesic from $G$ to $P$ and from $G$ to $Q$ (blue arrowed lines) and distance $δ(G, Q)$. Right: the center of mass (also named mean) $G$ of points $P_1,…,P_4$ defined as the point minimizing the sum of the four squared distances $δ²(G, P_i)$, for $i={1,…,4}$.

    geodesic

    The key object in the P manifold is the geodesic, loosely defined as the shortest path joining two points $P$ and $Q$ on the manifold, analogous to straight lines in the Euclidean space (Fig. 1). The gedesic equation with arclength $0≤a≤1$ is the equation of the points along the path, denoted $\gamma(P, Q, a)$ where with $a=0$ we stay at $P$ and with $a=1$ we move all the way to $Q$. The points along the geodesic in between $P$ and $Q$ $(0<a<1)$ can be understood as weighted means of $P$ and $Q$. For example, the geodesic equation according to the Euclidean metric is $(1-a)P + aQ$, which is the traditional way to define weighted means. With the metrics we consider here, geodesics are unique and always exist. Furthermore, as we will see, using the Fisher metric those geodesics extends indefinitely, i.e., they are definied and always remain positive definite for $-∞<a<∞$.

    distance

    The length of the geodesic (at constant velocity) between two points gives the distance $δ(P, Q)$. The distance is always real, non-negative and equal to zero if and only if $P=Q$.

    distance from the origin

    In contrast to an Euclidean space, the origin of the P manifold endowed with the Fisher metric is not $0_n$, but $I_n$, the identity matrix of dimension n・n. The distance between a point $P$ and the origin, i.e., $δ(P, I)$, is analogous therein to the length of vectors in Euclidean space. This Riemannian manifold is symmetric around $I_n$, i.e., $δ(P, I)=δ(P^{-1}, I)$ and $δ(P, Q)=δ(P^{-1}, Q^{-1})$. This will be made more precise when we talk about invariances.

    mean

    The mid-point on the geodesic relying $P$ and $Q$ is named the mean. Using the Euclidean metric this is the arithmetic mean of $P$ and $Q$ and using the inverse Euclidean metric this is their harmonic mean. As we will see, those are straightforward extensions of their scalar counterparts. Using the Fisher metric the mid-point of the geodesic relying $P$ and $Q$ allows the proper generalization to matrices of the scalars' geometric mean. The other metrics allows other definition of means (see below).

    Fréchet mean

    Using Fréchet's variational approach we can extend to positive-definite matrices the concept of weighted mean of a set of scalars; as the midpoint $G$ on the geodesic relying $P$ and $Q$ is the minimizer of $\sigma^2(P, G)+\sigma^2(Q, G)$, so the mean $G$ of points $P_1, P_2,...,P_k$ is the matrix $G$ verifying

    $\textrm{argmin}_{G}\sum_{i=1}^{k}δ^2(P_i,G).$

    Thus, every metric induces a distance (or divergence) function, which, in turn, induces a mean.

    invariances

    An important characteristic of metrics is that they may induce invariance properties on the distance, which are in turn inherited by the mean.

    Let us denote shortly by $\{P_i\}$ the set $\{P_1,...,P_k\}$, where $i=\{1,...,k\}$ and by $G\{P_i\}$ the Fréchet mean of the set (in this section we drop the weights here for keeping the notation short). The most important invariance properties are:

    invarianceeffect on distance $δ(P,Q)$effect on mean $G\{P_i\}$
    rotation$δ(P,Q)=δ(U^HPU,U^HQU)$$G\{U^HP_iU\}=U^HG\{P_i\}U$
    affinity$δ(P,Q)=δ(B^HPB,B^HQB)$$G\{B^HP_iB\}=B^HG\{P_i\}B$
    inversion$δ(P,Q)=δ(P^{-1},Q^{-1})$$G\{P_i^{-1}\}=G^{-1}\{P_i\}$

    for any unitary $U$ and non-singular $B$.

    The affine invariance implies the rotation invariance and is also named congruence invariance.

    metrics

    We are interested in distance or divergence functions, the difference between the two being that a divergence does not need to be symmetric nor to satisfy the triangle inequality. Note that in PosDefManifold we consider only distances and symmetric divergences. In fact those are of greater interest in practice. One can find several distances and divergences in the literature and they often turn out to be related to each other, see for example (Chebby and Moakher, 2012; Cichocki et al., 2015; Sra, 2016)🎓. Ten of them are implemented in PosDefManifold and two of them are Riemannian metrics (the Fisher and Wasserstein metric as we have said). In this section we give a complete list of the expressions for their induced

    • distance of a point $P$ from the origin,
    • distance between two points $P$ and $Q$,
    • geodesic relying $P$ to $Q$ (hence the weighted means of $P$ and $Q$)
    • weighted Fréchet mean $G(P,w)$ of a set of $k>2$ points $\{P_1,...,P_k\}$ with associated real non-negative weights $\{w_1,...,w_k\}$ summing up to 1.
    Nota Bene

    In the following, the weights $\{w_1,...,w_k\}$ are always supposed summing up to 1, superscript $H$ indicate conjugate transpose (or just transpose if the matrix is real) and if $a$ is the arclength of a geodesic, we define for convenience $b=1-a$.

    Euclidean

    This is the classical Euclidean distance leading to the usual arithmetic mean. In general this metric is not well adapted to the P manifold. It verifies only the rotation invariance, however the mean also verifies the congruence invariance.

    distance² to $I$distance²
    $∥P-I∥^2$$∥P-Q∥^2$
    geodesicFréchet mean
    $bP + aQ$$\sum_{i=1}^{k}w_i P_i$

    inverse Euclidean

    This is the classical harmonic distance leading to the harmonic mean. It verifies only the rotation invariance, however the mean also verifies the congruence invariance.

    distance² to $I$distance²
    $∥P^{-1}-I∥^2$$∥P^{-1}-Q^{-1}∥^2$
    geodesicFréchet mean
    $\big(bP^{-1} + aQ^{-1}\big)^{-1}$$\big(\sum_{i=1}^{k}w_i P_i^{-1}\big)^{-1}$

    Cholesky Euclidean

    This is a very simple metric that has been tried to improve the Euclidean one. It is rarely used (see for example Dai et al., 2016)🎓. It does not verify any invariance. Let $L_P$ be the lower triangular Cholesky factor of $P$, then

    distance² to $I$distance²
    $∥L_P-I∥^2$$∥ L_P-L_Q ∥^2$
    geodesicFréchet mean
    $(bL_P+aL_Q)(bL_{P}+aL_{Q})^H$$\big(\sum_{i=1}^{k}w_i L_{P_i}\big)\big(\sum_{i=1}^{k}w_i L_{P_i}\big)^H$

    log Euclidean

    If matrices $\{P_1,...,P_k\}$ all pair-wise commute, then this metric coincides with the Fisher metric. See (Arsigny et al., 2007 ; Bhatia et al., 2019a)🎓. It enjoys the rotation and inversion invariance. The log-Euclidean distance to $I$ is the same as per the Fisher metric. This mean has the same determinant as the Fisher mean, and trace equal or superior to the trace of the Fisher mean. A minimum trace log Euclidean mean approximating well the Fisher mean has been proposed in Congedo et al. (2015)🎓.

    distance² to $I$distance²
    $∥\textrm{log}(P)∥^2$$∥\textrm{log}(P)-\textrm{log}(Q)∥^2$
    geodesicFréchet mean
    $\textrm{exp}\big(\textrm{log}P + a\textrm{log}Q\big)$$\textrm{exp}\big(\sum_{i=1}^{k}w_i\hspace{1pt}\textrm{log}P_i\big)$

    log Cholesky

    It is a recently proposed distance in P. Like the Cholesky Euclidean metric here above, it exploits the diffeomorphism between matrices in P and their Cholesky factor, such that $L_PL_P^H=P$, thanks to the fact that the Cholesky factor is unique and that the map is smooth (Lin, 2019)🎓. The mean has the same determinant as the Fisher and log-Euclidean mean.

    Let $L_X$,$S_X$ and $D_X$ be the lower triangle, the strictly lower triangle and the diagonal part of $X$, respectively (hence, $S_X+D_X=L_X$), then

    Distance² to $I$Distance²
    $∥S_P-I∥^2+∥\textrm{log}D_P∥^2$$∥S_P-S_Q∥^2+∥\textrm{log}D_P-\textrm{log}D_Q∥^2$

    geodesic: $S_P+a(S_Q-S_P)+D_P\hspace{2pt}\textrm{exp}\big(a\textrm{log}D_Q-a\textrm{log}D_P\big)$

    Fréchet mean: $TT^H$, where $T=\sum_{i=1}^{k}w_iS_{P_i}+\sum_{i=1}^{k}w_i\textrm{log}D_{P_i}$

    Fisher

    The Fisher metric, also known as affine-invariant, natural and Fisher-Rao metric, among others names, has a paramount importance for the P manifold, standing out as the natural choice both from the perspective of differential geometry and information geometry. Endowed with the Fisher metric the manifold P is Riemannian, has nonpositive curvature and is symmetric. This metric verifies all three invariances we have considered.

    Distance² to $I$Distance²
    $∥\textrm{log}(P)∥^2$$∥\textrm{log}(P^{-1/2}QP^{-1/2})∥^2$
    geodesic
    $P^{1/2} \big(P^{-1/2} Q P^{-1/2}\big)^a P^{1/2}$

    Fréchet mean: it does not have a closed-form solution in general. The solution is the unique positive definite matrix $G$ satisfying (Bhatia and Holbrook, 2006; Moakher, 2005).🎓

    $\sum_{i=1}^{k}w_i\textrm{log}\big(G^{-1/2} P_i G^{-1/2}\big)=0.$

    For estimating it, PosDefManifold implements the well-known gradient descent algorithm, resulting in iterations:

    $G ←G^{1/2}\textrm{exp}\big(\sum_{i=1}^{k}w_i\textrm{log}(G^{-1/2} P_i G^{-1/2})\big)G^{1/2}.$

    Alternatively, and more efficiently, one can ask for an approximate solution invoking the MPM algorithm (Congedo et al., 2017b)🎓, which is also implemented (in order to estimate the geometric mean use function powerMean with parameter $p=0$ or with a very small value of $p$).

    This mean is known under many different names (Fisher, Rao, Fisher-Rao, Pusz-Woronowicz, Cartan, Fréchet, Karcher, geometric....). The ‘centrality’ of this mean among a wide family of divergence-based means can be appreciated in Fig. 4 of Cichocki et al. (2015)🎓.

    The geometric mean $G$ of two matrices $P$ and $Q$ is denoted $\gamma(P, Q, \frac{1}{2})$. Currently it is an object of intense study because of its interesting mathematical properties. For instance,

    • it is the unique solution to Riccati equation $GQ^{-1}G=P$
    • it is equal to $F^{-H}D_1^{1/2}D_2^{1/2}F^{-1}$ for whatever joint diagonalizer $F$ of $P$ and $Q$, i.e., for whatever matrix $F$ satisfying $F^HPF=D_1$ and $F^HQF=D_2$, with $D_1$, $D_1$ non-singular diagonal matrices (Congedo et al., 2015)🎓.
    • it enjoys all 10 properties of means postulated in the seminal work of Ando et al. (2010)🎓.

    When $P$ and $Q$ commutes, the Fisher mean of two matrices reduces to $P^{1/2}Q^{1/2}$, which indeed in this case is the log-Euclidean mean $\frac{1}{2}\textrm{log}P + \frac{1}{2}\textrm{log}Q$.

    We denote the Fisher geodesic equation as $\gamma(P, Q, a)$. Note that $\gamma(I, P, a)=P^a$ and $\gamma(P, I, a)=P^{b}$, where $b=1-a$.

    Fisher geodesic equation verifies $\gamma(P, Q, a)=\gamma(Q, P, b)$ and $(\gamma(P, Q, a))^{-1}=\gamma(P^{-1}, Q^{-1}, a)$.

    An interesting property of the Fisher metric is that using its geodesic equation we can extrapolate positive matrices, always remaining in P. That is, using any real value of $a$ :

    • with $0 < a < 1$ we move toward $Q$ (attraction),
    • with $a > 1$ we move over and beyond $Q$ (extrapolation) and
    • with $a< 0$ we move back away from $Q$ (repulsion).

    Something similar can be done using the log Cholesky metric as well.

    power means

    The arithmetic, harmonic and geometric mean we have encountered are all members of the 1-parameter family of power means (with parameter $p∊[-1, 1]$) introduced by Lim and Palfia (2012)🎓 to generalize the concept of power means of scalars (also known as Hölder means or generalized means). The family of power means $G$ with parameter $p$ satisfies equation

    $G=\sum_{i=1}^{k}w_i\gamma(G, P, p)$,

    where$\gamma(G, P, p)$ is the Fisher geodesic equation we have discussed here above talking about the Fisher metric. In particular:

    • with $p=-1$ this is the harmonic mean (see the inverse Euclidean metric)
    • with $p=+1$ this is the arithmetic mean (see the Euclidean metric)
    • at the limit of $p$ evaluated at zero from both side this is the geometric mean (see the Fisher metric).

    Thus, the family of power means continuously interpolate between the arithmetic and harmonic mean passing through the the geometric mean.

    Power means are the unique positive definite solution of (Yamazaki, 2019)🎓

    $\sum_{i=1}^{k}w_i\big(G^{-1/2} P_i G^{-1/2}\big)^p=I$.

    All power means enjoy the congruence invariance (hence the rotation invariance), but only the geometric mean enjoy also the inversion invariance.

    The power mean with $p=\frac{1}{2}$ is the solution of the Fréchet mean problem using the following divergence (Bhatia, Gaubert and Jain, 2019)🎓

    $δ^2(P,Q)=\textrm{tr}(P+Q)-2\textrm{tr}\gamma(G, P, \frac{1}{2}) = \textrm{tr}(\textrm{arithm. mean}(P, Q)) – \textrm{tr}(\textrm{geom. mean}(P, Q)).$

    generalized means

    When the matrices in the set all pairwise commute, it has been proved in Lim and Palfia (2012, see Property 1, p. 1502) 🎓 that the power means we have just seen reduce to

    $\big(\sum_{i=1}^{k}w_iP_i^p\big)^{1/p}$,

    which are the straightforward extension of scalar power means (see generalized means) to matrices. As usual, such straightforward extensions work well in commuting algebra, but not in general. See for example the case of the mean obtained using the log Euclidean metric, which is the straightforward extension to matrices of the scalar geometric mean, but is not the matrix geometric mean, unless the matrices all pairwise commute.

    Both the generalized means and the power means have a parameter $p∊[-1, 1]$. For the latter, the solution is implemented via the fixed-point MPM algorithm (Congedo et al., 2017b)🎓.

    modified Bhattacharyya mean

    If matrices $P_1, P_2,...,P_k$ all pair-wise commute, the special case $p=\frac{1}{2}$ yields the following instance of power means (and of generalized means):

    $\big(\sum_{i=1}^{k}w_iP_i^{1/2}\big)^{1/2}$.

    This mean has been proposed in a different context by Moakher (2012)🎓 as a modified Bhattacharyya mean, since it is a modification of the Bhattacharyya mean we will encounter next under the name logdet zero. It is worth noting that in commuting algebra Moakher’s mean also corresponds to the mean obtained with the Wasserstein metric.

    logdet zero

    The logdet zero divergence, also known as the square of the Bhattacharyya divergence (Mohaker, 2013)🎓, Stein divergence (Harandi et al., 2016)🎓, symmetrized Jensen divergence, the S-divergence (Sra, 2016)🎓 or the log determinant α-divergence (with α=0, Chebby and Moakher, 2012 🎓) is a Jensen-Bregman symmetric divergence enjoying all three invariances we have listed.

    Its square root has been shown to be a distance (Sra, 2016)🎓. It behaves very similarly to the Fisher metric at short distances (Moakher, 2012; Sra, 2016; Cichocki et al., 2015; Harandi et al., 2016) 🎓 and the mean of two matrices in P is the same as the Fisher mean (Harandi et al., 2016) 🎓. Thus, it has often been used instead of the Fisher metric because it allows more efficient calculations. In fact, the calculation of this distance requires only three Cholesky decompositions, whereas the computation of the Fisher distance involves extracting generalized eigenvalues.

    distance² to $I$distance²
    $\textrm{logdet}\frac{1}{2}(P+I)-\frac{1}{2}\textrm{logdet}(P)$$\textrm{logdet}\frac{1}{2}(P+Q)-\frac{1}{2}\textrm{logdet}(PQ)$

    geodesic: we use the Fréchet mean with appropriate weights.

    Fréchet mean: the solution is the unique positive definite matrix $G$ satisfying

    $\sum_{i=1}^{k}w_i\big(\frac{1}{2}P_i+\frac{1}{2}G\big)^{-1}=G^{-1}$.

    For estimating it PosDefManifold implements the fixed-point iterations (Moakher, 2012, p315)🎓:

    $G ← \frac{k}{2}\big(\sum_{i=1}^{k}w_i(P_i+G)^{-1}\big)^{-1}$.

    The logdet zero divergence between $P$ and $Q$ can also be written as the log-determinant of their arithmetic mean minus the log-determinant of their geometric mean (Moakher, 2012)🎓, which thus defines a possible extension to matrices of the useful concept of Wiener entropy.

    logdet α

    The log determinant $α$-divergence family for $α∊[-1…1]$ (Chebby and Moakher, 2012)🎓 allows

    • the logdet zero mean for $α=0$,
    • the left Kullback-Leibler mean for $α=-1$ (which is the harmonic mean)
    • the right Kullback-Leibler mean for $α=1$ (which is the arithmetic mean).

    We do not consider the left and right Kullback-Leibler divergences because the related means are trivially the arithmetic and harmonic one (Moakher, 2012). As per the symmetrized Kullback-Leibler divergence, this is known as Jeffrey divergence and will be considered next. The log determinant $α$-divergence family of means is not implemented in PosDefManifold (besides the special cases $α=(-1, 0, 1)$, since the family of power means are implemented.

    Jeffrey

    This is a Jensen-Bregman symmetric divergence, also known as the symmetrized Kullback-Leibler divergence (see logdet α) (Faraki et al., 2015)🎓. It enjoyes all three invariances we have listed.

    distance² to $I$distance²
    $\frac{1}{2}\textrm{tr} \big(P+P^{-1}\big)-n$$\frac{1}{2}\textrm{tr}(Q^{-1}P+P^{-1}Q)-n$

    geodesic: we use the Fréchet mean with appropriate weights.

    Fréchet mean: $A^{1/2}\big(A^{-1/2}HA^{-1/2}\big)^{1/2}A^{1/2}$, where $A$ is the arithmetic mean (see Euclidean metric) and $H$ is the harmonic mean (see inverse Euclidean metric). Thus, the weighted Fréchet mean is the geometric mean (see Fisher metric) of the arithmetic and harmonic mean (Moakher, 2012)🎓.

    Note that this is the geometric mean only for $k=2$, that is, for scalars, but not in general for matrices, the geometric mean is the geometric mean of the arithmetic mean and harmonic mean (the only metric inducing the geometric mean in general is the Fisher mean).

    Von Neumann

    The Von Neumann divergence is a Jensen-Bregman symmetric divergence (Sra, 2016; Taghia et al., 2019)🎓. It enjoyes only the rotation invariance.

    distance² to $I$distance²
    $\frac{1}{2}\textrm{tr}(P\textrm{log}P-\textrm{log}P)$$\frac{1}{2}\textrm{tr}\big(P(\textrm{log}P-\textrm{log}Q)+Q(\textrm{log}Q-\textrm{log}P)\big)$

    The geodesic and weighted Fréchet mean for this metric are not available.

    Wasserstein

    This is an extension to matrices of the Hellinger divergence for vectors and is also known as the Bures divergence in quantum physics, where it is applied on density matrices (unit trace positive-definite matrices). It enjoyes only the rotation invariance. Endowed with the Wasserstein metric the manifold P has a Riemannian geometry of nonnegative curvature. See ( Bhatia et al., 2019a; Bhatia et al., 2019b)🎓.

    distance² to $I$distance²
    $\textrm{tr}(P+I)-2\textrm{tr}(P^{1/2})$$\textrm{tr}(P+Q) -2\textrm{tr}\big(P^{1/2}QP^{1/2}\big)^{1/2}$
    geodesic
    $b^2P+a^2Q +ab\big[(PQ)^{1/2} +(QP)^{1/2}\big]$

    The quantity $\textrm{tr}\big(P^{1/2}QP^{1/2}\big)^{1/2}$ is known in quantum physics as the fidelity of $P$ and $Q$ when those are density matrices (unit-trace positive definite matrices).

    Fréchet mean: the solution is the unique positive definite matrix $G$ satisfying (Agueh and Carlier, 2011) 🎓

    $G=\sum_{i=1}^{k}w_i\big( G^{1/2} P_i G^{1/2}\big)^{1/2}$.

    For estimating it, PosDefManifold implements the fixed-point algorithm of Álvarez-Esteban et al. (2016)🎓, giving iterations:

    $G ← G^{-1/2} \big(\sum_{i=1}^{k} w_i(G^{1/2}P_i G^{1/2})^{1/2}\big)^2 G^{-1/2}$

    In the special case when the matrices all pair-wise commute, the Wasserstein mean is equal to the instance of power means and generalized means with $p=\frac{1}{2}$ (Bhatia, Jain and Lim, 2019b)🎓, that is, to the modified Bhattacharyya mean.

    In the special case $k$=2 and equal weight the mean is $W=\frac{1}{4}\big(P+Q+(PQ) ^{1/2}+(QP)^{1/2}\big)$.

    🎓

    References

    M. Agueh, G. Carlier (2011) Barycenters in the Wasserstein space, SIAM J. Mat. Anal. Appl. 43, 904-924.

    P. C. Álvarez-Esteban, E. del Barrio, J.A. Cuesta-Albertos, C. Matrána (2016) A fixed-point approach to barycenters in Wasserstein space, Journal of Mathematical Analysis and Applications, 441(2), 744-762.

    T. Ando, C.-K. Li, R. Mathias (2004) Geometric means, Linear Algebra and its Applications, 385(1), 305-334.

    V. Arsigny, P. Fillard, X. Pennec, N. Ayache (2007) Geometric means in a novel vector space structure on symmetric positive-definite matrices, SIAM journal on matrix analysis and applications, 29(1), 328-347.

    A. Barachant, S. Bonnet, M. Congedo, C. Jutten (2012) Multi-class Brain Computer Interface Classification by Riemannian Geometry, IEEE Transactions on Biomedical Engineering, 59(4), 920-928.

    A. Barachant, S. Bonnet, M. Congedo, C. Jutten (2013) Classification of covariance matrices using a Riemannian-based kernel for BCI applications, Neurocomputing, 112, 172-178.

    R. Bhatia (2007) Positive Definite Matrices. Princeton University press.

    R. Bhatia, M. Congedo (2019) Procrustes problems in manifolds of positive definite matrices Linear Algebra and its Applications, 563, 440-445.

    R. Bhatia, S. Gaubert, T. Jain (2019) Matrix versions of the Hellinger distance, arXiv:1901.01378.

    R. Bhatia, J. Holbrook (2006) Riemannian geometry and matrix geometric means, Linear Algebra and its Applications, 413 (2-3), 594-618.

    R. Bhatia, T. Jain (2010) Approximation problems in the Riemannian metric on positive definite matrices, Ann. Funct. Anal., 5(2), 118-126.

    R. Bhatia, T. Jain,Y. Lim (2019a) Inequalities for the Wasserstein mean of positive definite matrices, Linear Algebra and its Applications, in press.

    R. Bhatia, T. Jain, Y. Lim (2019b) On the Bures-Wasserstein distance between positive definite matrices Expositiones Mathematicae, in press.

    Z. Chebbi, M. Moakher (2012) Means of Hermitian positive-definite matrices based on the log-determinant α-divergence function, Linear Algebra and its Applications, 436(7), 1872-1889.

    A. Cichocki, S. Cruces, S-I- Amari (2015) Log-Determinant Divergences Revisited: Alpha-Beta and Gamma Log-Det Divergences, Entropy, 17(5), 2988-3034.

    R.R. Coifman, Y. Shkolnisky, F.J. Sigworth, A. Singer (2008) Graph Laplacian Tomography From Unknown Random Projections, IEEE Transactions on Image Processing, 17(10), 1891-1899.

    M. Congedo, B. Afsari, A. Barachant, M Moakher (2015) Approximate Joint Diagonalization and Geometric Mean of Symmetric Positive Definite Matrices, PLoS ONE 10(4): e0121423.

    M. Congedo, A. Barachant, R. Bhatia R (2017a) Riemannian Geometry for EEG-based Brain-Computer Interfaces; a Primer and a Review, Brain-Computer Interfaces, 4(3), 155-174.

    M. Congedo, A. Barachant, E. Kharati Koopaei (2017b) Fixed Point Algorithms for Estimating Power Means of Positive Definite Matrices, IEEE Transactions on Signal Processing, 65(9), 2211-2220.

    X. Dai, S. Khamis, Y. Zhang, L.S. Davis (2016) Parameterizing region covariance: an efficient way to apply sparse codes on second order statistics, arXiv:1602.02822.

    M. Faraki, M. Harandi, F. Porikli (2015) More About VLAD: A Leap from Euclidean to Riemannian Manifolds, IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Boston.

    W. Förstner, B. Moonen (1999) A metric for covariance matrices, In Krumm K and Schwarze VS eds. Qho vadis geodesia...?, number 1999.6 in tech. report of the Dep. Of Geodesy and Geoinformatics, p.113–128, Stuttgart University.

    M.T. Harandi, R. Hartley, B. Lovell, C. Sanderson (2016) Sparse coding on symmetric positive definite manifolds using bregman divergences, IEEE transactions on neural networks and learning systems, 27 (6), 1294-1306.

    N.J. Higham (1988) Computing a Nearest Symmetric Positive Semidefinite Matrix Linear Algebra and its Applications, 103, 103-118.

    J. Ho, G. Cheng, H. Salehian, B.C. Vemuri (2013) Recursive Karcher Expectation Estimators and Geometric Law of Large Numbers, Proc. of the AISTATS Conf.

    B. Iannazzo (2016) The geometric mean of two matrices from a computational viewpoint Numerical Linear Algebra with Applications, 23-2, 208-229.

    S. Lafon (2004) Diffusion maps and geometric harmonics, Ph.D. dissertation, Yale University, New Heaven, CT.

    Y. Lim, M. Pálfia (2012) Matrix power means and the Karcher mean, Journal of Functional Analysis, 262(4), 1498-1514.

    Z. Lin (2019) Riemannian Geometry of Symmetric Positive Definite Matrices via Cholesky Decomposition, in press.

    E. Massart, J.M. Hendrickx, P.-A. Absil (2018) Matrix Geometric Meansbased on shuffled inductive sequences Linear Algebra and its Aplications, 252, 334-359.

    M. Moakher (2005) A Differential Geometric Approach to the Geometric Mean of Symmetric Positive-Definite Matrices, SIAM Journal on Matrix Analysis and Applications, 26(3), 735-747.

    M. Moakher (2012) Divergence measures and means of symmetric positive-definite matrices, in D.H Lailaw and A. Vilanova (Eds) "New Developments in the Visualization and Processing of Tensor Fields", Springer, Berlin.

    C. Mostajeran, C. Grussler, R. Sepulchre (2019) Geometric Matrix Midranges arXiv:1907.04188.

    X. Pennec, P. Fillard, N. Ayache (2006) A Riemannian Framework for Tensor Computing, International Journal of Computer Vision, 66(1), 41-66.

    P.L.C. Rodrigues, M. Congedo, C Jutten (2018) Multivariate Time-Series Analysis Via Manifold Learning, in Proc. of the the IEEE Statistical Signal Processing Workshop (SSP 2018), Fribourg-en-Brisgau, Germany.

    S. Sra (2016) Positive definite matrices and the S-divergence, Proc. Amer. Math. Soc., 144, 2787-2797.

    J. Taghia, M. Bånkestad, F. Lindsten, T.B. Schön (2019) Constructing the Matrix Multilayer Perceptron and its Application to the VAE, arXiv:1902.01182v1

    S. Umeyama (1988) An Eigendecomposition Approach to Weighted Graph Matching Problems, IEEE Trans. Pattern. Anal. Mach. Intell., 10(5), 695-703.

    O. Yair, M. Ben-Chen, R. Talmon (2019) Parallel Transport on the Cone Manifold of SPD Matrices for Domain Adaptation IEEE Trans. Sig. Process. 67(7), 1797-1811.

    T. Yamazaki (2019) The Ando-Hiai inequalities for the solution of the generalized Karcher Equation and related results arXiv:1802.06200v2.

    +Intro to Riemannian Geometry · PosDefManifold

    Intro to Riemannian Geometry

    The study of appropriate distance measures for positive definite matrices has recently grown very fast, driven by practical problems in radar data processing, image processing, computer vision, shape analysis, medical imaging (especially diffusion MRI and Brain-Computer Interface), sensor networks, elasticity, mechanics, numerical analysis and machine learning (e.g., see references in Congedo et al., 2017a)🎓.

    In many applications the observed data can be conveniently summarized by positive definite matrices, which are either symmetric positive definite (SPD: real) or Hermitian Positive Definite (HPD: complex). For example, those may be some form of the data covariance matrix in the time, frequency or time-frequency domain, or autocorrelation matrices, kernels, slices of tensors, density matrices, elements of a search space, etc. Positive definite matrices are naturally treated as points on a smooth Riemannian manifold allowing useful operations such as interpolation, smoothing, filtering, approximation, averaging, signal detection and classification. Such operations are the object of the present PosDefManifold library.

    More formally, this Julia library treats operations on the metric space $($P$, δ^2)$ of n・n positive definite matrices endowed with a distance or symmetric divergence $δ($P x P$)→[0, ∞]$. Several matrix distances or matrix divergences $δ$ are considered. Using some of them, the most important one being the Fisher metric, we define a Riemannian manifold. In mathematics, this is the subject of Riemannian geometry and information geometry.

    Note that throughout this library the word 'metric' is used loosely for referring to the actual Riemannian metric on the tangent space and to the resulting distance or to general symmetric divergence acting on P, regardless the fact that we are dealing with a metric in the strict sense and that it induces or not a Riemannian geometry in P. This is done for convenience of exposition, since in practice those 'metrics' in PosDefManifold may be used interchangeably.

    Riemannian manifolds

    Here are some important definitions:

    A smooth manifold in differential geometry is a topological space that is locally similar to the Euclidean space and has a globally defined differential structure.

    The tangent space at point $G$ is the vector space containing the tangent vectors to all curves on the manifold passing through $G$ (Fig. 1).

    A smooth Riemannian manifold is equipped with an inner product on the tangent space (a Riemannian metric) defined at each point and varying smoothly from point to point. For manifold P the tangent space is the space of symmetric or Hermitian matrices.

    Thus, a Riemannian metric turns the metric space $($P$, δ^2)$ into a Riemannian manifold. This is the case, for example, of the Fisher metric, which has a fundamental role in the manifolds of positive definite matrices and of the Wasserstein metric, fundamental in optimal transport theory.

    Figure 1 Figure 1. Schematic illustration of the Riemannian manifold of positive definite matrices. Left: geodesic relying points $P$ and $Q$ passing through its-mid-point (mean) $G$ (green curve), tangent space at point $G$ with tangent vectors to geodesic from $G$ to $P$ and from $G$ to $Q$ (blue arrowed lines) and distance $δ(G, Q)$. Right: the center of mass (also named mean) $G$ of points $P_1,…,P_4$ defined as the point minimizing the sum of the four squared distances $δ²(G, P_i)$, for $i={1,…,4}$.

    geodesic

    The key object in the P manifold is the geodesic, loosely defined as the shortest path joining two points $P$ and $Q$ on the manifold, analogous to straight lines in the Euclidean space (Fig. 1). The gedesic equation with arclength $0≤a≤1$ is the equation of the points along the path, denoted $\gamma(P, Q, a)$ where with $a=0$ we stay at $P$ and with $a=1$ we move all the way to $Q$. The points along the geodesic in between $P$ and $Q$ $(0<a<1)$ can be understood as weighted means of $P$ and $Q$. For example, the geodesic equation according to the Euclidean metric is $(1-a)P + aQ$, which is the traditional way to define weighted means. With the metrics we consider here, geodesics are unique and always exist. Furthermore, as we will see, using the Fisher metric those geodesics extends indefinitely, i.e., they are definied and always remain positive definite for $-∞<a<∞$.

    distance

    The length of the geodesic (at constant velocity) between two points gives the distance $δ(P, Q)$. The distance is always real, non-negative and equal to zero if and only if $P=Q$.

    distance from the origin

    In contrast to an Euclidean space, the origin of the P manifold endowed with the Fisher metric is not $0_n$, but $I_n$, the identity matrix of dimension n・n. The distance between a point $P$ and the origin, i.e., $δ(P, I)$, is analogous therein to the length of vectors in Euclidean space. This Riemannian manifold is symmetric around $I_n$, i.e., $δ(P, I)=δ(P^{-1}, I)$ and $δ(P, Q)=δ(P^{-1}, Q^{-1})$. This will be made more precise when we talk about invariances.

    mean

    The mid-point on the geodesic relying $P$ and $Q$ is named the mean. Using the Euclidean metric this is the arithmetic mean of $P$ and $Q$ and using the inverse Euclidean metric this is their harmonic mean. As we will see, those are straightforward extensions of their scalar counterparts. Using the Fisher metric the mid-point of the geodesic relying $P$ and $Q$ allows the proper generalization to matrices of the scalars' geometric mean. The other metrics allows other definition of means (see below).

    Fréchet mean

    Using Fréchet's variational approach we can extend to positive-definite matrices the concept of weighted mean of a set of scalars; as the midpoint $G$ on the geodesic relying $P$ and $Q$ is the minimizer of $\sigma^2(P, G)+\sigma^2(Q, G)$, so the mean $G$ of points $P_1, P_2,...,P_k$ is the matrix $G$ verifying

    $\textrm{argmin}_{G}\sum_{i=1}^{k}δ^2(P_i,G).$

    Thus, every metric induces a distance (or divergence) function, which, in turn, induces a mean.

    invariances

    An important characteristic of metrics is that they may induce invariance properties on the distance, which are in turn inherited by the mean.

    Let us denote shortly by $\{P_i\}$ the set $\{P_1,...,P_k\}$, where $i=\{1,...,k\}$ and by $G\{P_i\}$ the Fréchet mean of the set (in this section we drop the weights here for keeping the notation short). The most important invariance properties are:

    invarianceeffect on distance $δ(P,Q)$effect on mean $G\{P_i\}$
    rotation$δ(P,Q)=δ(U^HPU,U^HQU)$$G\{U^HP_iU\}=U^HG\{P_i\}U$
    affinity$δ(P,Q)=δ(B^HPB,B^HQB)$$G\{B^HP_iB\}=B^HG\{P_i\}B$
    inversion$δ(P,Q)=δ(P^{-1},Q^{-1})$$G\{P_i^{-1}\}=G^{-1}\{P_i\}$

    for any unitary $U$ and non-singular $B$.

    The affine invariance implies the rotation invariance and is also named congruence invariance.

    metrics

    We are interested in distance or divergence functions, the difference between the two being that a divergence does not need to be symmetric nor to satisfy the triangle inequality. Note that in PosDefManifold we consider only distances and symmetric divergences. In fact those are of greater interest in practice. One can find several distances and divergences in the literature and they often turn out to be related to each other, see for example (Chebby and Moakher, 2012; Cichocki et al., 2015; Sra, 2016)🎓. Ten of them are implemented in PosDefManifold and two of them are Riemannian metrics (the Fisher and Wasserstein metric as we have said). In this section we give a complete list of the expressions for their induced

    • distance of a point $P$ from the origin,
    • distance between two points $P$ and $Q$,
    • geodesic relying $P$ to $Q$ (hence the weighted means of $P$ and $Q$)
    • weighted Fréchet mean $G(P,w)$ of a set of $k>2$ points $\{P_1,...,P_k\}$ with associated real non-negative weights $\{w_1,...,w_k\}$ summing up to 1.
    Nota Bene

    In the following, the weights $\{w_1,...,w_k\}$ are always supposed summing up to 1, superscript $H$ indicate conjugate transpose (or just transpose if the matrix is real) and if $a$ is the arclength of a geodesic, we define for convenience $b=1-a$.

    Euclidean

    This is the classical Euclidean distance leading to the usual arithmetic mean. In general this metric is not well adapted to the P manifold. It verifies only the rotation invariance, however the mean also verifies the congruence invariance.

    distance² to $I$distance²
    $∥P-I∥^2$$∥P-Q∥^2$
    geodesicFréchet mean
    $bP + aQ$$\sum_{i=1}^{k}w_i P_i$

    inverse Euclidean

    This is the classical harmonic distance leading to the harmonic mean. It verifies only the rotation invariance, however the mean also verifies the congruence invariance.

    distance² to $I$distance²
    $∥P^{-1}-I∥^2$$∥P^{-1}-Q^{-1}∥^2$
    geodesicFréchet mean
    $\big(bP^{-1} + aQ^{-1}\big)^{-1}$$\big(\sum_{i=1}^{k}w_i P_i^{-1}\big)^{-1}$

    Cholesky Euclidean

    This is a very simple metric that has been tried to improve the Euclidean one. It is rarely used (see for example Dai et al., 2016)🎓. It does not verify any invariance. Let $L_P$ be the lower triangular Cholesky factor of $P$, then

    distance² to $I$distance²
    $∥L_P-I∥^2$$∥ L_P-L_Q ∥^2$
    geodesicFréchet mean
    $(bL_P+aL_Q)(bL_{P}+aL_{Q})^H$$\big(\sum_{i=1}^{k}w_i L_{P_i}\big)\big(\sum_{i=1}^{k}w_i L_{P_i}\big)^H$

    log Euclidean

    If matrices $\{P_1,...,P_k\}$ all pair-wise commute, then this metric coincides with the Fisher metric. See (Arsigny et al., 2007 ; Bhatia et al., 2019a)🎓. It enjoys the rotation and inversion invariance. The log-Euclidean distance to $I$ is the same as per the Fisher metric. This mean has the same determinant as the Fisher mean, and trace equal or superior to the trace of the Fisher mean. A minimum trace log Euclidean mean approximating well the Fisher mean has been proposed in Congedo et al. (2015)🎓.

    distance² to $I$distance²
    $∥\textrm{log}(P)∥^2$$∥\textrm{log}(P)-\textrm{log}(Q)∥^2$
    geodesicFréchet mean
    $\textrm{exp}\big(\textrm{log}P + a\textrm{log}Q\big)$$\textrm{exp}\big(\sum_{i=1}^{k}w_i\hspace{1pt}\textrm{log}P_i\big)$

    log Cholesky

    It is a recently proposed distance in P. Like the Cholesky Euclidean metric here above, it exploits the diffeomorphism between matrices in P and their Cholesky factor, such that $L_PL_P^H=P$, thanks to the fact that the Cholesky factor is unique and that the map is smooth (Lin, 2019)🎓. The mean has the same determinant as the Fisher and log-Euclidean mean.

    Let $L_X$,$S_X$ and $D_X$ be the lower triangle, the strictly lower triangle and the diagonal part of $X$, respectively (hence, $S_X+D_X=L_X$), then

    Distance² to $I$Distance²
    $∥S_P-I∥^2+∥\textrm{log}D_P∥^2$$∥S_P-S_Q∥^2+∥\textrm{log}D_P-\textrm{log}D_Q∥^2$

    geodesic: $S_P+a(S_Q-S_P)+D_P\hspace{2pt}\textrm{exp}\big(a\textrm{log}D_Q-a\textrm{log}D_P\big)$

    Fréchet mean: $TT^H$, where $T=\sum_{i=1}^{k}w_iS_{P_i}+\sum_{i=1}^{k}w_i\textrm{log}D_{P_i}$

    Fisher

    The Fisher metric, also known as affine-invariant, natural and Fisher-Rao metric, among others names, has a paramount importance for the P manifold, standing out as the natural choice both from the perspective of differential geometry and information geometry. Endowed with the Fisher metric the manifold P is Riemannian, has nonpositive curvature and is symmetric. This metric verifies all three invariances we have considered.

    Distance² to $I$Distance²
    $∥\textrm{log}(P)∥^2$$∥\textrm{log}(P^{-1/2}QP^{-1/2})∥^2$
    geodesic
    $P^{1/2} \big(P^{-1/2} Q P^{-1/2}\big)^a P^{1/2}$

    Fréchet mean: it does not have a closed-form solution in general. The solution is the unique positive definite matrix $G$ satisfying (Bhatia and Holbrook, 2006; Moakher, 2005).🎓

    $\sum_{i=1}^{k}w_i\textrm{log}\big(G^{-1/2} P_i G^{-1/2}\big)=0.$

    For estimating it, PosDefManifold implements the well-known gradient descent algorithm, resulting in iterations:

    $G ←G^{1/2}\textrm{exp}\big(\sum_{i=1}^{k}w_i\textrm{log}(G^{-1/2} P_i G^{-1/2})\big)G^{1/2}.$

    Alternatively, and more efficiently, one can ask for an approximate solution invoking the MPM algorithm (Congedo et al., 2017b)🎓, which is also implemented (in order to estimate the geometric mean use function powerMean with parameter $p=0$ or with a very small value of $p$).

    This mean is known under many different names (Fisher, Rao, Fisher-Rao, Pusz-Woronowicz, Cartan, Fréchet, Karcher, geometric....). The ‘centrality’ of this mean among a wide family of divergence-based means can be appreciated in Fig. 4 of Cichocki et al. (2015)🎓.

    The geometric mean $G$ of two matrices $P$ and $Q$ is denoted $\gamma(P, Q, \frac{1}{2})$. Currently it is an object of intense study because of its interesting mathematical properties. For instance,

    • it is the unique solution to Riccati equation $GQ^{-1}G=P$
    • it is equal to $F^{-H}D_1^{1/2}D_2^{1/2}F^{-1}$ for whatever joint diagonalizer $F$ of $P$ and $Q$, i.e., for whatever matrix $F$ satisfying $F^HPF=D_1$ and $F^HQF=D_2$, with $D_1$, $D_1$ non-singular diagonal matrices (Congedo et al., 2015)🎓.
    • it enjoys all 10 properties of means postulated in the seminal work of Ando et al. (2010)🎓.

    When $P$ and $Q$ commutes, the Fisher mean of two matrices reduces to $P^{1/2}Q^{1/2}$, which indeed in this case is the log-Euclidean mean $\frac{1}{2}\textrm{log}P + \frac{1}{2}\textrm{log}Q$.

    We denote the Fisher geodesic equation as $\gamma(P, Q, a)$. Note that $\gamma(I, P, a)=P^a$ and $\gamma(P, I, a)=P^{b}$, where $b=1-a$.

    Fisher geodesic equation verifies $\gamma(P, Q, a)=\gamma(Q, P, b)$ and $(\gamma(P, Q, a))^{-1}=\gamma(P^{-1}, Q^{-1}, a)$.

    An interesting property of the Fisher metric is that using its geodesic equation we can extrapolate positive matrices, always remaining in P. That is, using any real value of $a$ :

    • with $0 < a < 1$ we move toward $Q$ (attraction),
    • with $a > 1$ we move over and beyond $Q$ (extrapolation) and
    • with $a< 0$ we move back away from $Q$ (repulsion).

    Something similar can be done using the log Cholesky metric as well.

    power means

    The arithmetic, harmonic and geometric mean we have encountered are all members of the 1-parameter family of power means (with parameter $p∊[-1, 1]$) introduced by Lim and Palfia (2012)🎓 to generalize the concept of power means of scalars (also known as Hölder means or generalized means). The family of power means $G$ with parameter $p$ satisfies equation

    $G=\sum_{i=1}^{k}w_i\gamma(G, P, p)$,

    where$\gamma(G, P, p)$ is the Fisher geodesic equation we have discussed here above talking about the Fisher metric. In particular:

    • with $p=-1$ this is the harmonic mean (see the inverse Euclidean metric)
    • with $p=+1$ this is the arithmetic mean (see the Euclidean metric)
    • at the limit of $p$ evaluated at zero from both side this is the geometric mean (see the Fisher metric).

    Thus, the family of power means continuously interpolate between the arithmetic and harmonic mean passing through the the geometric mean.

    Power means are the unique positive definite solution of (Yamazaki, 2019)🎓

    $\sum_{i=1}^{k}w_i\big(G^{-1/2} P_i G^{-1/2}\big)^p=I$.

    All power means enjoy the congruence invariance (hence the rotation invariance), but only the geometric mean enjoy also the inversion invariance.

    The power mean with $p=\frac{1}{2}$ is the solution of the Fréchet mean problem using the following divergence (Bhatia, Gaubert and Jain, 2019)🎓

    $δ^2(P,Q)=\textrm{tr}(P+Q)-2\textrm{tr}\gamma(G, P, \frac{1}{2}) = \textrm{tr}(\textrm{arithm. mean}(P, Q)) – \textrm{tr}(\textrm{geom. mean}(P, Q)).$

    generalized means

    When the matrices in the set all pairwise commute, it has been proved in Lim and Palfia (2012, see Property 1, p. 1502) 🎓 that the power means we have just seen reduce to

    $\big(\sum_{i=1}^{k}w_iP_i^p\big)^{1/p}$,

    which are the straightforward extension of scalar power means (see generalized means) to matrices. As usual, such straightforward extensions work well in commuting algebra, but not in general. See for example the case of the mean obtained using the log Euclidean metric, which is the straightforward extension to matrices of the scalar geometric mean, but is not the matrix geometric mean, unless the matrices all pairwise commute.

    Both the generalized means and the power means have a parameter $p∊[-1, 1]$. For the latter, the solution is implemented via the fixed-point MPM algorithm (Congedo et al., 2017b)🎓.

    modified Bhattacharyya mean

    If matrices $P_1, P_2,...,P_k$ all pair-wise commute, the special case $p=\frac{1}{2}$ yields the following instance of power means (and of generalized means):

    $\big(\sum_{i=1}^{k}w_iP_i^{1/2}\big)^{1/2}$.

    This mean has been proposed in a different context by Moakher (2012)🎓 as a modified Bhattacharyya mean, since it is a modification of the Bhattacharyya mean we will encounter next under the name logdet zero. It is worth noting that in commuting algebra Moakher’s mean also corresponds to the mean obtained with the Wasserstein metric.

    logdet zero

    The logdet zero divergence, also known as the square of the Bhattacharyya divergence (Mohaker, 2013)🎓, Stein divergence (Harandi et al., 2016)🎓, symmetrized Jensen divergence, the S-divergence (Sra, 2016)🎓 or the log determinant α-divergence (with α=0, Chebby and Moakher, 2012 🎓) is a Jensen-Bregman symmetric divergence enjoying all three invariances we have listed.

    Its square root has been shown to be a distance (Sra, 2016)🎓. It behaves very similarly to the Fisher metric at short distances (Moakher, 2012; Sra, 2016; Cichocki et al., 2015; Harandi et al., 2016) 🎓 and the mean of two matrices in P is the same as the Fisher mean (Harandi et al., 2016) 🎓. Thus, it has often been used instead of the Fisher metric because it allows more efficient calculations. In fact, the calculation of this distance requires only three Cholesky decompositions, whereas the computation of the Fisher distance involves extracting generalized eigenvalues.

    distance² to $I$distance²
    $\textrm{logdet}\frac{1}{2}(P+I)-\frac{1}{2}\textrm{logdet}(P)$$\textrm{logdet}\frac{1}{2}(P+Q)-\frac{1}{2}\textrm{logdet}(PQ)$

    geodesic: we use the Fréchet mean with appropriate weights.

    Fréchet mean: the solution is the unique positive definite matrix $G$ satisfying

    $\sum_{i=1}^{k}w_i\big(\frac{1}{2}P_i+\frac{1}{2}G\big)^{-1}=G^{-1}$.

    For estimating it PosDefManifold implements the fixed-point iterations (Moakher, 2012, p315)🎓:

    $G ← \frac{k}{2}\big(\sum_{i=1}^{k}w_i(P_i+G)^{-1}\big)^{-1}$.

    The logdet zero divergence between $P$ and $Q$ can also be written as the log-determinant of their arithmetic mean minus the log-determinant of their geometric mean (Moakher, 2012)🎓, which thus defines a possible extension to matrices of the useful concept of Wiener entropy.

    logdet α

    The log determinant $α$-divergence family for $α∊[-1…1]$ (Chebby and Moakher, 2012)🎓 allows

    • the logdet zero mean for $α=0$,
    • the left Kullback-Leibler mean for $α=-1$ (which is the harmonic mean)
    • the right Kullback-Leibler mean for $α=1$ (which is the arithmetic mean).

    We do not consider the left and right Kullback-Leibler divergences because the related means are trivially the arithmetic and harmonic one (Moakher, 2012). As per the symmetrized Kullback-Leibler divergence, this is known as Jeffrey divergence and will be considered next. The log determinant $α$-divergence family of means is not implemented in PosDefManifold (besides the special cases $α=(-1, 0, 1)$, since the family of power means are implemented.

    Jeffrey

    This is a Jensen-Bregman symmetric divergence, also known as the symmetrized Kullback-Leibler divergence (see logdet α) (Faraki et al., 2015)🎓. It enjoyes all three invariances we have listed.

    distance² to $I$distance²
    $\frac{1}{2}\textrm{tr} \big(P+P^{-1}\big)-n$$\frac{1}{2}\textrm{tr}(Q^{-1}P+P^{-1}Q)-n$

    geodesic: we use the Fréchet mean with appropriate weights.

    Fréchet mean: $A^{1/2}\big(A^{-1/2}HA^{-1/2}\big)^{1/2}A^{1/2}$, where $A$ is the arithmetic mean (see Euclidean metric) and $H$ is the harmonic mean (see inverse Euclidean metric). Thus, the weighted Fréchet mean is the geometric mean (see Fisher metric) of the arithmetic and harmonic mean (Moakher, 2012)🎓.

    Note that this is the geometric mean only for $k=2$, that is, for scalars, but not in general for matrices, the geometric mean is the geometric mean of the arithmetic mean and harmonic mean (the only metric inducing the geometric mean in general is the Fisher mean).

    Von Neumann

    The Von Neumann divergence is a Jensen-Bregman symmetric divergence (Sra, 2016; Taghia et al., 2019)🎓. It enjoyes only the rotation invariance.

    distance² to $I$distance²
    $\frac{1}{2}\textrm{tr}(P\textrm{log}P-\textrm{log}P)$$\frac{1}{2}\textrm{tr}\big(P(\textrm{log}P-\textrm{log}Q)+Q(\textrm{log}Q-\textrm{log}P)\big)$

    The geodesic and weighted Fréchet mean for this metric are not available.

    Wasserstein

    This is an extension to matrices of the Hellinger divergence for vectors and is also known as the Bures divergence in quantum physics, where it is applied on density matrices (unit trace positive-definite matrices). It enjoyes only the rotation invariance. Endowed with the Wasserstein metric the manifold P has a Riemannian geometry of nonnegative curvature. See ( Bhatia et al., 2019a; Bhatia et al., 2019b)🎓.

    distance² to $I$distance²
    $\textrm{tr}(P+I)-2\textrm{tr}(P^{1/2})$$\textrm{tr}(P+Q) -2\textrm{tr}\big(P^{1/2}QP^{1/2}\big)^{1/2}$
    geodesic
    $b^2P+a^2Q +ab\big[(PQ)^{1/2} +(QP)^{1/2}\big]$

    The quantity $\textrm{tr}\big(P^{1/2}QP^{1/2}\big)^{1/2}$ is known in quantum physics as the fidelity of $P$ and $Q$ when those are density matrices (unit-trace positive definite matrices).

    Fréchet mean: the solution is the unique positive definite matrix $G$ satisfying (Agueh and Carlier, 2011) 🎓

    $G=\sum_{i=1}^{k}w_i\big( G^{1/2} P_i G^{1/2}\big)^{1/2}$.

    For estimating it, PosDefManifold implements the fixed-point algorithm of Álvarez-Esteban et al. (2016)🎓, giving iterations:

    $G ← G^{-1/2} \big(\sum_{i=1}^{k} w_i(G^{1/2}P_i G^{1/2})^{1/2}\big)^2 G^{-1/2}$

    In the special case when the matrices all pair-wise commute, the Wasserstein mean is equal to the instance of power means and generalized means with $p=\frac{1}{2}$ (Bhatia, Jain and Lim, 2019b)🎓, that is, to the modified Bhattacharyya mean.

    In the special case $k$=2 and equal weight the mean is $W=\frac{1}{4}\big(P+Q+(PQ) ^{1/2}+(QP)^{1/2}\big)$.

    🎓

    References

    M. Agueh, G. Carlier (2011) Barycenters in the Wasserstein space, SIAM J. Mat. Anal. Appl. 43, 904-924.

    P. C. Álvarez-Esteban, E. del Barrio, J.A. Cuesta-Albertos, C. Matrána (2016) A fixed-point approach to barycenters in Wasserstein space, Journal of Mathematical Analysis and Applications, 441(2), 744-762.

    T. Ando, C.-K. Li, R. Mathias (2004) Geometric means, Linear Algebra and its Applications, 385(1), 305-334.

    V. Arsigny, P. Fillard, X. Pennec, N. Ayache (2007) Geometric means in a novel vector space structure on symmetric positive-definite matrices, SIAM journal on matrix analysis and applications, 29(1), 328-347.

    A. Barachant, S. Bonnet, M. Congedo, C. Jutten (2012) Multi-class Brain Computer Interface Classification by Riemannian Geometry, IEEE Transactions on Biomedical Engineering, 59(4), 920-928.

    A. Barachant, S. Bonnet, M. Congedo, C. Jutten (2013) Classification of covariance matrices using a Riemannian-based kernel for BCI applications, Neurocomputing, 112, 172-178.

    R. Bhatia (2007) Positive Definite Matrices. Princeton University press.

    R. Bhatia, M. Congedo (2019) Procrustes problems in manifolds of positive definite matrices Linear Algebra and its Applications, 563, 440-445.

    R. Bhatia, S. Gaubert, T. Jain (2019) Matrix versions of the Hellinger distance, arXiv:1901.01378.

    R. Bhatia, J. Holbrook (2006) Riemannian geometry and matrix geometric means, Linear Algebra and its Applications, 413 (2-3), 594-618.

    R. Bhatia, T. Jain (2010) Approximation problems in the Riemannian metric on positive definite matrices, Ann. Funct. Anal., 5(2), 118-126.

    R. Bhatia, T. Jain,Y. Lim (2019a) Inequalities for the Wasserstein mean of positive definite matrices, Linear Algebra and its Applications, in press.

    R. Bhatia, T. Jain, Y. Lim (2019b) On the Bures-Wasserstein distance between positive definite matrices Expositiones Mathematicae, in press.

    Z. Chebbi, M. Moakher (2012) Means of Hermitian positive-definite matrices based on the log-determinant α-divergence function, Linear Algebra and its Applications, 436(7), 1872-1889.

    A. Cichocki, S. Cruces, S-I- Amari (2015) Log-Determinant Divergences Revisited: Alpha-Beta and Gamma Log-Det Divergences, Entropy, 17(5), 2988-3034.

    R.R. Coifman, Y. Shkolnisky, F.J. Sigworth, A. Singer (2008) Graph Laplacian Tomography From Unknown Random Projections, IEEE Transactions on Image Processing, 17(10), 1891-1899.

    M. Congedo, B. Afsari, A. Barachant, M Moakher (2015) Approximate Joint Diagonalization and Geometric Mean of Symmetric Positive Definite Matrices, PLoS ONE 10(4): e0121423.

    M. Congedo, A. Barachant, R. Bhatia R (2017a) Riemannian Geometry for EEG-based Brain-Computer Interfaces; a Primer and a Review, Brain-Computer Interfaces, 4(3), 155-174.

    M. Congedo, A. Barachant, E. Kharati Koopaei (2017b) Fixed Point Algorithms for Estimating Power Means of Positive Definite Matrices, IEEE Transactions on Signal Processing, 65(9), 2211-2220.

    X. Dai, S. Khamis, Y. Zhang, L.S. Davis (2016) Parameterizing region covariance: an efficient way to apply sparse codes on second order statistics, arXiv:1602.02822.

    M. Faraki, M. Harandi, F. Porikli (2015) More About VLAD: A Leap from Euclidean to Riemannian Manifolds, IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Boston.

    W. Förstner, B. Moonen (1999) A metric for covariance matrices, In Krumm K and Schwarze VS eds. Qho vadis geodesia...?, number 1999.6 in tech. report of the Dep. Of Geodesy and Geoinformatics, p.113–128, Stuttgart University.

    M.T. Harandi, R. Hartley, B. Lovell, C. Sanderson (2016) Sparse coding on symmetric positive definite manifolds using bregman divergences, IEEE transactions on neural networks and learning systems, 27 (6), 1294-1306.

    N.J. Higham (1988) Computing a Nearest Symmetric Positive Semidefinite Matrix Linear Algebra and its Applications, 103, 103-118.

    J. Ho, G. Cheng, H. Salehian, B.C. Vemuri (2013) Recursive Karcher Expectation Estimators and Geometric Law of Large Numbers, Proc. of the AISTATS Conf.

    B. Iannazzo (2016) The geometric mean of two matrices from a computational viewpoint Numerical Linear Algebra with Applications, 23-2, 208-229.

    S. Lafon (2004) Diffusion maps and geometric harmonics, Ph.D. dissertation, Yale University, New Heaven, CT.

    Y. Lim, M. Pálfia (2012) Matrix power means and the Karcher mean, Journal of Functional Analysis, 262(4), 1498-1514.

    Y. Lim, M. Pálfia (2019) Strong law of large numbers for the L1-Karcher mean arXiv:1912.09295

    Z. Lin (2019) Riemannian Geometry of Symmetric Positive Definite Matrices via Cholesky Decomposition, in press.

    E. Massart, J.M. Hendrickx, P.-A. Absil (2018) Matrix Geometric Meansbased on shuffled inductive sequences Linear Algebra and its Aplications, 252, 334-359.

    M. Moakher (2005) A Differential Geometric Approach to the Geometric Mean of Symmetric Positive-Definite Matrices, SIAM Journal on Matrix Analysis and Applications, 26(3), 735-747.

    M. Moakher (2012) Divergence measures and means of symmetric positive-definite matrices, in D.H Lailaw and A. Vilanova (Eds) "New Developments in the Visualization and Processing of Tensor Fields", Springer, Berlin.

    C. Mostajeran, C. Grussler, R. Sepulchre (2019) Geometric Matrix Midranges arXiv:1907.04188.

    X. Pennec, P. Fillard, N. Ayache (2006) A Riemannian Framework for Tensor Computing, International Journal of Computer Vision, 66(1), 41-66.

    P.L.C. Rodrigues, M. Congedo, C Jutten (2018) Multivariate Time-Series Analysis Via Manifold Learning, in Proc. of the the IEEE Statistical Signal Processing Workshop (SSP 2018), Fribourg-en-Brisgau, Germany.

    S. Sra (2016) Positive definite matrices and the S-divergence, Proc. Amer. Math. Soc., 144, 2787-2797.

    J. Taghia, M. Bånkestad, F. Lindsten, T.B. Schön (2019) Constructing the Matrix Multilayer Perceptron and its Application to the VAE, arXiv:1902.01182v1

    S. Umeyama (1988) An Eigendecomposition Approach to Weighted Graph Matching Problems, IEEE Trans. Pattern. Anal. Mach. Intell., 10(5), 695-703.

    O. Yair, M. Ben-Chen, R. Talmon (2019) Parallel Transport on the Cone Manifold of SPD Matrices for Domain Adaptation IEEE Trans. Sig. Process. 67(7), 1797-1811.

    T. Yamazaki (2019) The Ando-Hiai inequalities for the solution of the generalized Karcher Equation and related results arXiv:1802.06200v2.

    diff --git a/docs/build/linearAlgebra/index.html b/docs/build/linearAlgebra/index.html index c9ed724..83ab853 100644 --- a/docs/build/linearAlgebra/index.html +++ b/docs/build/linearAlgebra/index.html @@ -1,6 +1,6 @@ -linearAlgebra.jl · PosDefManifold

    linearAlgebra.jl

    This unit contains linear algebra functions useful in relation to the Riemannian geometry of the manifold of Symmetric Positive Definite (SPD) or Hermitian Positive Definite (HPD) matrices. In Julia those are Hermitian matrices, see typecasting matrices.

    In general they take a matrix as input (some may take other arrays as input) and are divided in eight categories depending on what kind of functions thay are and what they give as output:

    CategoryOutput
    1. Utilities- - -
    2. Matrix normalizations and approximationsmatrix
    3. Boolean functions of matricesmatrix
    4. Scalar functions of matricesscalar
    5. Diagonal functions of matricesdiagonal matrix
    6. Unitary functions of matricesorthogonal/unitary matrix
    7. Matrix function of matricesmatrix
    8. Spectral decompositions of positive matricesspectral function of input
    9. Decompositions involving triangular matricestriangular matrix

    Utilities

    FunctionDescription
    typeofMatrix, typeofMatReturn the type of the matrix argument
    typeofVector, typeofVecReturn the type of the matrix vector argument
    dimlength of the dimensions of matrices and vectors of matrices

    PosDefManifold.typeofMatrixFunction
    function typeofMatrix(
    -array::Union{AnyMatrix, AnyMatrixVector, AnyMatrixVector₂})

    alias: typeofMat

    Return the type of a matrix, either Hermitian, Diagonal, LowerTriangular, or Matrix. Argument array may be a matrix of one of these types, but also one of the following:

    ℍVector, ℍVector₂, 𝔻Vector, 𝔻Vector₂, 𝕃Vector, 𝕃Vector₂, 𝕄Vector, 𝕄Vector₂.

    Those are Array of Matrices types. See also aliases for the symbols , 𝔻, 𝕃 and 𝕄.

    Note that this function is different from Julia function typeof, which returns the concrete type (see example below), thus cannot be used for typecasting matrices.

    Examples

    using LinearAlgebra, PosDefManifold
    +linearAlgebra.jl · PosDefManifold

    linearAlgebra.jl

    This unit contains linear algebra functions useful in relation to the Riemannian geometry of the manifold of Symmetric Positive Definite (SPD) or Hermitian Positive Definite (HPD) matrices. In Julia those are Hermitian matrices, see typecasting matrices.

    In general they take a matrix as input (some may take other arrays as input) and are divided in eight categories depending on what kind of functions thay are and what they give as output:

    CategoryOutput
    1. Utilities- - -
    2. Matrix normalizations and approximationsmatrix
    3. Boolean functions of matricesmatrix
    4. Scalar functions of matricesscalar
    5. Diagonal functions of matricesdiagonal matrix
    6. Unitary functions of matricesorthogonal/unitary matrix
    7. Matrix function of matricesmatrix
    8. Spectral decompositions of positive matricesspectral function of input
    9. Decompositions involving triangular matricestriangular matrix

    Utilities

    FunctionDescription
    typeofMatrix, typeofMatReturn the type of the matrix argument
    typeofVector, typeofVecReturn the type of the matrix vector argument
    dimlength of the dimensions of matrices and vectors of matrices
    removeRemove one or more elements from a vector or one or more
    columns or rows from a matrix
    isSquareReturn true if matrix arguement is square, false otherwise

    PosDefManifold.typeofMatrixFunction
    function typeofMatrix(
    +array::Union{AnyMatrix, AnyMatrixVector, AnyMatrixVector₂})

    alias: typeofMat

    Return the type of a matrix, either Hermitian, Diagonal, LowerTriangular, or Matrix. Argument array may be a matrix of one of these types, but also one of the following:

    ℍVector, ℍVector₂, 𝔻Vector, 𝔻Vector₂, 𝕃Vector, 𝕃Vector₂, 𝕄Vector, 𝕄Vector₂.

    Those are Array of Matrices types. See also aliases for the symbols , 𝔻, 𝕃 and 𝕄.

    Note that this function is different from Julia function typeof, which returns the concrete type (see example below), thus cannot be used for typecasting matrices.

    Examples

    using LinearAlgebra, PosDefManifold
     P=randP(3) # generate a 3x3 Hermitian matrix
     typeofMatrix(P) # returns `Hermitian`
     typeof(P) # returns `Hermitian{Float64,Array{Float64,2}}`
    @@ -11,16 +11,16 @@
     
     Pset=randP(3, 4) # generate a set of 4 3x3 Hermitian matrix
     # Pset is an ℍVector type
    -typeofMatrix(Pset) # again returns `Hermitian`
    source
    PosDefManifold.typeofVectorFunction
    function typeofVector(
    -array::Union{AnyMatrix, AnyMatrixVector, AnyMatrixVector₂})

    alias: typeofVec

    Return the type of a Vector, either HermitianVector, DiagonalVector, LowerTriangularVector, or MatrixVector. The aliases of those are, respectvely, ℍVector, 𝔻Vector, 𝕃Vector and 𝕄Vector. Argument array may be a vector of one of these types, but also one of the following:

    , 𝔻, 𝕃 and 𝕄, ℍVector₂, 𝔻Vector₂, 𝕃Vector₂, 𝕄Vector₂.

    See aliases for the symbols , 𝔻, 𝕃 and 𝕄. The last four are Array of Matrices types.

    Note that this function is different from Julia function typeof only in that it returns the vector type also if array is not of the ℍVector, 𝔻Vector, 𝕃Vector or 𝕄Vector type.

    Examples

    using LinearAlgebra, PosDefManifold
    +typeofMatrix(Pset) # again returns `Hermitian`
    source
    PosDefManifold.typeofVectorFunction
    function typeofVector(
    +array::Union{AnyMatrix, AnyMatrixVector, AnyMatrixVector₂})

    alias: typeofVec

    Return the type of a Vector, either HermitianVector, DiagonalVector, LowerTriangularVector, or MatrixVector. The aliases of those are, respectvely, ℍVector, 𝔻Vector, 𝕃Vector and 𝕄Vector. Argument array may be a vector of one of these types, but also one of the following:

    , 𝔻, 𝕃 and 𝕄, ℍVector₂, 𝔻Vector₂, 𝕃Vector₂, 𝕄Vector₂.

    See aliases for the symbols , 𝔻, 𝕃 and 𝕄. The last four are Array of Matrices types.

    Note that this function is different from Julia function typeof only in that it returns the vector type also if array is not of the ℍVector, 𝔻Vector, 𝕃Vector or 𝕄Vector type.

    Examples

    using LinearAlgebra, PosDefManifold
     P=randP(3, 4) # generate 4 3x3 Hermitian matrix
     typeofMatrix(P) # returns `Array{Hermitian,1}`
     typeof(P) # also returns `Array{Hermitian,1}`
     
     typeofMatrix(P[1]) # returns `Array{Hermitian,1}`
    -typeof(P[1]) # returns `Hermitian{Float64,Array{Float64,2}}`
    source
    PosDefManifold.dimFunction
    (1) function dim(X::AnyMatrix, [d])
    +typeof(P[1]) # returns `Hermitian{Float64,Array{Float64,2}}`
    source
    PosDefManifold.dimFunction
    (1) function dim(X::AnyMatrix, [d])
     (2) function dim(vector::AnyMatrixVector, [d])
    -(3) function dim(vector₂::AnyMatrixVector₂, [d])

    (1) $X$ is a real or complex Matrix, Diagonal, LowerTriangular or Hermitian matrix. Return a 2-tuple containing the dimensions of $X$, which is two times the same dimension for all possible types of $X$ with the exception of the Matrix type, which can be rectangular. Optionally you can specify a dimension (1 or 2) to get just the length of that dimension.

    (2) vector is an 𝕄Vector, 𝔻Vector, 𝕃Vector or ℍVector type (see AnyMatrixVector type). Return a 3-tuple containing the number of matrices it holds (dimension 1) and their dimensions (dimension 2 and 3). Optionally you can specify a dimension (1, 2, or 3) to get just the length of that dimension.

    (3) vector₂ is an 𝕄Vector₂, 𝔻Vector₂, 𝕃Vector₂ or ℍVector₂ type (see AnyMatrixVector type). Return a 4-tuple containing

    • the number of vectors of matrices it holds (dimension 1),
    • a vector holding the number of matrices in each vector of matrices (dimensions 2),
    • the two dimensions of the matrices (dimension 3 and 4).

    Optionally you can specify a dimension (1, 2, 3 or 4) to get just the length of that dimension.

    vector and vector₂ are Array of Matrices types. See also aliases for the symbols , 𝔻, 𝕃 and 𝕄.

    Nota Bene

    If you specify a dimension and this is out of the valid range, the function returns zero.

    Both the vector(2) and the vector₂(3) object are meant to hold matrices living in the same manifold, therefore it is assumed that all matrices they holds are of the same dimension. The dimensions of the matrices are retrived from

    • the first matrix in vector(2),
    • the first matrix in the first vector of vector₂(3).

    This function replaces Julia size function, which cannot be used to retrive dimension for matrix vectors. It is not possible to overload the size function for matrix vectors since this causes problems to other Julia functions.

    Examples

    using LinearAlgebra, PosDefManifold
    +(3) function dim(vector₂::AnyMatrixVector₂, [d])

    (1) $X$ is a real or complex Matrix, Diagonal, LowerTriangular or Hermitian matrix. Return a 2-tuple containing the dimensions of $X$, which is two times the same dimension for all possible types of $X$ with the exception of the Matrix type, which can be rectangular. Optionally you can specify a dimension (1 or 2) to get just the length of that dimension.

    (2) vector is an 𝕄Vector, 𝔻Vector, 𝕃Vector or ℍVector type (see AnyMatrixVector type). Return a 3-tuple containing the number of matrices it holds (dimension 1) and their dimensions (dimension 2 and 3). Optionally you can specify a dimension (1, 2, or 3) to get just the length of that dimension.

    (3) vector₂ is an 𝕄Vector₂, 𝔻Vector₂, 𝕃Vector₂ or ℍVector₂ type (see AnyMatrixVector type). Return a 4-tuple containing

    • the number of vectors of matrices it holds (dimension 1),
    • a vector holding the number of matrices in each vector of matrices (dimensions 2),
    • the two dimensions of the matrices (dimension 3 and 4).

    Optionally you can specify a dimension (1, 2, 3 or 4) to get just the length of that dimension.

    vector and vector₂ are Array of Matrices types. See also aliases for the symbols , 𝔻, 𝕃 and 𝕄.

    Nota Bene

    If you specify a dimension and this is out of the valid range, the function returns zero.

    Both the vector(2) and the vector₂(3) object are meant to hold matrices living in the same manifold, therefore it is assumed that all matrices they holds are of the same dimension. The dimensions of the matrices are retrived from

    • the first matrix in vector(2),
    • the first matrix in the first vector of vector₂(3).

    This function replaces Julia size function, which cannot be used to retrive dimension for matrix vectors. It is not possible to overload the size function for matrix vectors since this causes problems to other Julia functions.

    Examples

    using LinearAlgebra, PosDefManifold
     # (1)
     M=randn(3, 4) # generate a 3x4 `Matrix`
     dim(M) # returns (3, 4)
    @@ -50,12 +50,23 @@
     dim(A, 5) # out of range: return 0
     
     # note: to create an ℍVector₂ object holding k ℍVector objects use
    -sets=ℍVector₂(undef, k) # and then fill them
    source

    Matrix normalizations and approximations

    FunctionDescription
    det1Normalize the determinant
    tr1Normalize the trace
    nearestPosDefNearest Symmetric/Hermitian Positive Semi-definite matrix
    nearestOrthogonal nearestOrthNearest Orthogonal matrix
    normalizeCol!Normalize one or more columns

    PosDefManifold.det1Function
    function det1(X::AnyMatrix; <tol::Real=0.>)

    Return the argument matrix $X$ normalized so as to have unit determinant. For square positive definite matrices this is the best approximant from the set of matrices in the special linear group - see Bhatia and Jain (2014)🎓.

    $X$ can be a real or complex Diagonal, LowerTriangular, Matrix, or Hermitian matrix. (see AnyMatrix type)

    If the determinant is not greater than tol (which defalts to zero) a warning is printed and $X$ is returned.

    Nota Bene

    This function is meant for positive definite matrices. Julia may throws an error while computing the determinant if the matrix is defective.

    See Julia det function.

    See also: tr1.

    Examples

    using LinearAlgebra, PosDefManifold
    +sets=ℍVector₂(undef, k) # and then fill them
    source
    PosDefManifold.removeFunction
    function remove(X::Union{Vector, Matrix}, what::Union{Int, Vector{Int}};
    +				dims=1)

    Remove one or more elements from a vector or one or more columns or rows from a matrix.

    If X is a Matrix, dims=1 (default) remove rows, dims=2 remove columns.

    If X is a Vector, dims has no effect.

    The second argument is either an integer or a vector of integers.

    Examples

    a=randn(5)
    +b=remove(a, 2)
    +b=remove(a, collect(1:3)) # remove rows 1 to 3
    +A=randn(3, 3)
    +B=remove(A, 2)
    +B=remove(A, 2; dims=2)
    +A=randn(5, 5)
    +B=remove(A, collect(1:2:5)) # remove rows 1, 3 and 5
    +C=remove(A, [1, 4])
    +A=randn(10, 10)
    +A=remove(A, [collect(2:3); collect(8:10)]; dims=2)
    source

    Matrix normalizations and approximations

    FunctionDescription
    det1Normalize the determinant
    tr1Normalize the trace
    nearestPosDefNearest Symmetric/Hermitian Positive Semi-definite matrix
    nearestOrthogonal nearestOrthNearest Orthogonal matrix
    normalizeCol!Normalize one or more columns

    PosDefManifold.det1Function
    function det1(X::AnyMatrix; <tol::Real=0.>)

    Return the argument matrix $X$ normalized so as to have unit determinant. For square positive definite matrices this is the best approximant from the set of matrices in the special linear group - see Bhatia and Jain (2014)🎓.

    $X$ can be a real or complex Diagonal, LowerTriangular, Matrix, or Hermitian matrix. (see AnyMatrix type)

    If the determinant is not greater than tol (which defalts to zero) a warning is printed and $X$ is returned.

    Nota Bene

    This function is meant for positive definite matrices. Julia may throws an error while computing the determinant if the matrix is defective.

    See Julia det function.

    See also: tr1.

    Examples

    using LinearAlgebra, PosDefManifold
     P=randP(5) # generate a random real positive definite matrix 5x5
     Q=det1(P)
     det(Q) # must be 1
     # using a tolerance
    -Q=det1(P; tol=1e-12)
    source
    PosDefManifold.tr1Function
    tr1(X::AnyMatrix; tol::Real=0.)

    Return the argument matrix $X$ normalized so as to have unit trace.

    $X$ can be a real or complex Diagonal, LowerTriangular, Matrix or Hermitian matrix (see AnyMatrix type). Its trace must be real. If the absolute value of its imaginary part is greater than tol (which defalts to zero) a warning is printed and $X$ is returned. Also, if the trace is not greater than tol a warning is printed and $X$ is returned.

    See: Julia trace function.

    See also: tr, det1.

    Examples

    using LinearAlgebra, PosDefManifold
    +Q=det1(P; tol=1e-12)
    source
    PosDefManifold.tr1Function
    tr1(X::AnyMatrix; tol::Real=0.)

    Return the argument matrix $X$ normalized so as to have unit trace.

    $X$ can be a real or complex Diagonal, LowerTriangular, Matrix or Hermitian matrix (see AnyMatrix type). Its trace must be real. If the absolute value of its imaginary part is greater than tol (which defalts to zero) a warning is printed and $X$ is returned. Also, if the trace is not greater than tol a warning is printed and $X$ is returned.

    See: Julia trace function.

    See also: tr, det1.

    Examples

    using LinearAlgebra, PosDefManifold
     
     P=randP(5) # generate a random real positive definite matrix 5x5
     Q=tr1(P)
    @@ -65,26 +76,26 @@
     
     Pc=randP(ComplexF64, 5) # generate a random real positive definite matrix 5x5
     Qc=tr1(Pc)
    -tr(Qc)  # must be 1
    source
    PosDefManifold.nearestPosDefFunction
    nearestPosDef(X::Union{𝔻, 𝕄}; tol::Real=0.)

    Return the nearest symmetric/Hermitian positive semi-definite matrix of a diagonal or of an arbitary square matrix X according to the Frobenius norm. If the eigenvalues of the symmetric part of X are all non-negative, the result is positive definite and will be flagged as Hermitian, otherwise it is positive semi-definite and will not be flagged. The nearest matrix is given by

    $(Y+H)/2$

    where

    $Y=(X+X^H)/2$

    is the symmetric part of $X$, and $H$ is the symmetric polar factor of $Y$. See Higham(1988)🎓 for details and for the way it is computed.

    See also: det1, procrustes.

    Examples

    using LinearAlgebra, PosDefManifold
    +tr(Qc)  # must be 1
    source
    PosDefManifold.nearestPosDefFunction
    nearestPosDef(X::Union{𝔻, 𝕄}; tol::Real=0.)

    Return the nearest symmetric/Hermitian positive semi-definite matrix of a diagonal or of an arbitary square matrix X according to the Frobenius norm. If the eigenvalues of the symmetric part of X are all non-negative, the result is positive definite and will be flagged as Hermitian, otherwise it is positive semi-definite and will not be flagged. The nearest matrix is given by

    $(Y+H)/2$

    where

    $Y=(X+X^H)/2$

    is the symmetric part of $X$, and $H$ is the symmetric polar factor of $Y$. See Higham(1988)🎓 for details and for the way it is computed.

    See also: det1, procrustes.

    Examples

    using LinearAlgebra, PosDefManifold
     X=randn(5, 5) # generate an arbitrary 5x5 matrix
     S=nearestPosDef(X)
     
     P=randP(5) # generate a random real positive definite 5x5 matrix
     S=nearestPosDef(Matrix(P)) # typecasting an Hermitian matrix as a `Matrix`
     # Since P is a positive definite matrix S must be equal to P
    -S ≈ P ? println(" ⭐ ") : println(" ⛔ ")
    source
    PosDefManifold.nearestOrthogonalFunction
    nearestOrthogonal(X::AnyMatrix)

    alias: nearestOrth

    Return the nearest orthogonal matrix of a square Hermitian, LowerTriangular, Diagonal or generic Matrix X (see AnyMatrix type). This is given by

    $UV^H$,

    where

    $\textrm(SVD)=UΛV^H$.

    If X is Diagonal, return X.

    See also: nearestPosDef, procrustes.

    Examples

    using PosDefManifold
    -U=nearestOrth(randn(5, 5))
    source
    PosDefManifold.nearestOrthogonalFunction
    nearestOrthogonal(X::AnyMatrix)

    alias: nearestOrth

    Return the nearest orthogonal matrix of a square Hermitian, LowerTriangular, Diagonal or generic Matrix X (see AnyMatrix type). This is given by

    $UV^H$,

    where

    $\textrm(SVD)=UΛV^H$.

    If X is Diagonal, return X.

    See also: nearestPosDef, procrustes.

    Examples

    using PosDefManifold
    +U=nearestOrth(randn(5, 5))
    source
    PosDefManifold.normalizeCol!Function
    (1) normalizeCol!(X::𝕄{T}, j::Int)
     (2) normalizeCol!(X::𝕄{T}, j::Int, by::Number)
     (3) normalizeCol!(X::𝕄{T}, range::UnitRange)
     (4) normalizeCol!(X::𝕄{T}, range::UnitRange, by::Number)
    -for all above: where T<:RealOrComplex

    Given a Matrix type $X$ comprised of real or complex elements,

    • (1) normalize the $j^{th}$ column to unit norm
    • (2) divide the elements of the $j^{th}$ column by number $by$
    • (3) normalize the columns in $range$ to unit norm
    • (4) divide the elements of columns in $range$ by number $by$.

    $by$ is a number of abstract supertype Number. It should be an integer, real or complex number. For efficiency, it should be of the same type as the elements of $X$.

    $range$ is a UnitRange type.

    Methods (1) and (3) call the BLAS.nrm2 routine for computing the norm of concerned columns. See Threads.

    Nota Bene

    Julia does not allow normalizing the columns of Hermitian matrices. If you want to call this function for an Hermitian matrix see typecasting matrices.

    See norm and also randn for the example below.

    See also: colNorm, colProd.

    Examples

    using PosDefManifold
    +for all above: where T<:RealOrComplex

    Given a Matrix type $X$ comprised of real or complex elements,

    • (1) normalize the $j^{th}$ column to unit norm
    • (2) divide the elements of the $j^{th}$ column by number $by$
    • (3) normalize the columns in $range$ to unit norm
    • (4) divide the elements of columns in $range$ by number $by$.

    $by$ is a number of abstract supertype Number. It should be an integer, real or complex number. For efficiency, it should be of the same type as the elements of $X$.

    $range$ is a UnitRange type.

    Methods (1) and (3) call the BLAS.nrm2 routine for computing the norm of concerned columns. See Threads.

    Nota Bene

    Julia does not allow normalizing the columns of Hermitian matrices. If you want to call this function for an Hermitian matrix see typecasting matrices.

    See norm and also randn for the example below.

    See also: colNorm, colProd.

    Examples

    using PosDefManifold
     X=randn(10, 20)
     normalizeCol!(X, 2)                  # (1) normalize columns 2
     normalizeCol!(X, 2, 10.0)            # (2) divide columns 2 by 10.0
     normalizeCol!(X, 2:4)                # (3) normalize columns 2 to 4
     X=randn(ComplexF64, 10, 20)
     normalizeCol!(X, 3)                  # (1) normalize columns 3
    -normalizeCol!(X, 3:6, (2.0 + 0.5im)) # (4) divide columns 3 to 5 by (2.0 + 0.5im)
    source

    Boolean functions of matrices

    FunctionDescription
    isposCheck whether a real vector or diagonal matrix are comprised of all positive elements
    PosDefManifold.isposFunction
        (1) ispos(λ::Vector{T};
    +normalizeCol!(X, 3:6, (2.0 + 0.5im)) # (4) divide columns 3 to 5 by (2.0 + 0.5im)
    source

    Boolean functions of matrices

    FunctionDescription
    isposCheck whether a real vector or diagonal matrix are comprised of all positive elements
    PosDefManifold.isposFunction
        (1) ispos(λ::Vector{T};
     	<
     	tol::Real=0,
     	rev=true,
    @@ -94,35 +105,35 @@
         (2) ispos(Λ::𝔻{T};
     	< same optional keyword arguments as in (1) > )
     
    -	for all above: where T<:Real

    Return $true$ if all numbers in (1) real vector $λ$ or in (2) real Diagonal matrix $Λ$ are not inferior to $tol$, otherwise return $false$. This is used, for example, in spectral functions to check that all eigenvalues are positive.

    Nota Bene

    $tol$ defaults to the square root of Base.eps of the type of $λ$ (1) or $Λ$ (2). This corresponds to requiring positivity beyond about half of the significant digits.

    The following are <optional keyword arguments>:

    • If $rev=true$ the (1) elements in $λ$ or (2) the diagonal elements

    in $Λ$ will be chacked in reverse order. This is done for allowing a very fast check when the elements are sorted and it is known from where is best to start checking.

    If the result is $false$:

    • if $🔔=true$ a bell character will be printed. In most systems this will ring a bell on the computer.
    • if string $msg$ is provided, a warning will print $msg$ followed by:

    "at position pos", where pos is the position where the first non-positive element has been found.

     ## Examples
    - using PosDefManifold
    - a=[1, 0, 2, 8]
    - ispos(a, msg="non-positive element found")
    +	for all above: where T<:Real

    Return $true$ if all numbers in (1) real vector $λ$ or in (2) real Diagonal matrix $Λ$ are not inferior to $tol$, otherwise return $false$. This is used, for example, in spectral functions to check that all eigenvalues are positive.

    Nota Bene

    $tol$ defaults to the square root of Base.eps of the type of $λ$ (1) or $Λ$ (2). This corresponds to requiring positivity beyond about half of the significant digits.

    The following are <optional keyword arguments>:

    • If $rev=true$ the (1) elements in $λ$ or (2) the diagonal elements

    in $Λ$ will be chacked in reverse order. This is done for allowing a very fast check when the elements are sorted and it is known from where is best to start checking.

    If the result is $false$:

    • if $🔔=true$ a bell character will be printed. In most systems this will ring a bell on the computer.
    • if string $msg$ is provided, a warning will print $msg$ followed by:

    "at position pos", where pos is the position where the first non-positive element has been found.

    Examples

    using PosDefManifold
    +a=[1, 0, 2, 8]
    +ispos(a, msg="non-positive element found")
     
    - # it will print:
    - # ┌ Warning: non-positive element found at position 2
    - # └ @ [here julie will point to the line of code issuing the warning]
    source

    Scalar functions of matrices

    FunctionDescription
    colProdSum of products of the elements in two columns
    sumOfSqr, ssSum of squares of all elements or of specified columns
    sumOfSqrDiag, ssdSum of squares of the diagonal elements
    colNormEucliden norm of a column
    sumOfSqrTril, sstSum of squares of the lower triangle elements up to a given underdiagonal
    trFast trace of the product of two Hermitian matrices
    quadraticForm, qfFast quadratic form
    fidelity(Quantum) Fidelity of two positive matrices

    PosDefManifold.colProdFunction
    (1) colProd(X::Union{𝕄{T}, ℍ{T}}, j::Int, l::Int)
    +# it will print:
    +# ┌ Warning: non-positive element found at position 2
    +# └ @ [here julie will point to the line of code issuing the warning]
    source

    Scalar functions of matrices

    FunctionDescription
    colProdSum of products of the elements in two columns
    sumOfSqr, ssSum of squares of all elements or of specified columns
    sumOfSqrDiag, ssdSum of squares of the diagonal elements
    colNormEucliden norm of a column
    sumOfSqrTril, sstSum of squares of the lower triangle elements up to a given underdiagonal
    trFast trace of the product of two Hermitian matrices
    quadraticForm, qfFast quadratic form
    fidelity(Quantum) Fidelity of two positive matrices

    PosDefManifold.colProdFunction
    (1) colProd(X::Union{𝕄{T}, ℍ{T}}, j::Int, l::Int)
     (2) colProd(X::Union{𝕄{T}, ℍ{T}}, Y::Union{𝕄{T}, ℍ{T}}, j::Int, l::Int)
    -for all above: where T<:RealOrComplex

    (1) Given a real or complex Matrix or Hermitian matrix $X$, return the dot product of the $j^{th}$ and $l^{th}$ columns, defined as,

    $\sum_{i=1}^{r} \big(x_{ij}^*x_{il}\big),$

    where $r$ is the number of rows of $X$ and $^*$ denotes complex conjugate (nothing if the matrix is real).

    (2) Given real or complex Matrix or Hermitian matrices $X$ and $Y$, return the dot product of the $j^{th}$ column of $X$ and the $l^{th}$ column of $Y$, defined as,

    $\sum_{i=1}^{r} \big(x_{ij}^*y_{il}\big),$

    where $r$ is the number of rows of $X$ and of $Y$ and $^*$ is as above.

    Nota Bene

    $X$ and of $Y$ may have a different number of columns, but must have the same number of rows.

    Arguments $j$ and $l$ must be positive integers in range

    • (1) j,l in 1:size(X, 2),
    • (2) j in 1:size(X, 2), l in 1:size(Y, 2).

    See also: normalizeCol!, colNorm.

    Examples

    using PosDefManifold
    +for all above: where T<:RealOrComplex

    (1) Given a real or complex Matrix or Hermitian matrix $X$, return the dot product of the $j^{th}$ and $l^{th}$ columns, defined as,

    $\sum_{i=1}^{r} \big(x_{ij}^*x_{il}\big),$

    where $r$ is the number of rows of $X$ and $^*$ denotes complex conjugate (nothing if the matrix is real).

    (2) Given real or complex Matrix or Hermitian matrices $X$ and $Y$, return the dot product of the $j^{th}$ column of $X$ and the $l^{th}$ column of $Y$, defined as,

    $\sum_{i=1}^{r} \big(x_{ij}^*y_{il}\big),$

    where $r$ is the number of rows of $X$ and of $Y$ and $^*$ is as above.

    Nota Bene

    $X$ and of $Y$ may have a different number of columns, but must have the same number of rows.

    Arguments $j$ and $l$ must be positive integers in range

    • (1) j,l in 1:size(X, 2),
    • (2) j in 1:size(X, 2), l in 1:size(Y, 2).

    See also: normalizeCol!, colNorm.

    Examples

    using PosDefManifold
     X=randn(10, 20)
     p=colProd(X, 1, 3)
     Y=randn(10, 30)
    -q=colProd(X, Y, 2, 25)
    source
    PosDefManifold.sumOfSqrFunction
    (1) sumOfSqr(A::Array)
     (2) sumOfSqr(H::ℍ{T})
     (3) sumOfSqr(L::𝕃{T})
     (4) sumOfSqr(D::𝔻{T})
     (5) sumOfSqr(X::Union{𝕄{T}, ℍ{T}}, j::Int)
     (6) sumOfSqr(X::Union{𝕄{T}, ℍ{T}}, range::UnitRange)
    -for (1)-(6) above: where T<:RealOrComplex

    alias: ss

    Return

    • (1) the sum of squares of the elements in an array $A$ of any dimensions.
    • (2) as in (1), but for an Hermitian matrix $H$, using only the lower triangular part.
    • (3) as in (1), but for a LowerTriangular matrix $L$.
    • (4) as in (1), but for a Diagonal matrix $D$ (sum of squares of diagonal elements).
    • (5) the sum of square of the $j^{th}$ column of a Matrix or Hermitian $X$.
    • (6) the sum of square of the columns of a Matrix or Hermitian $X$ in a given range.

    All methods support real and complex matrices.

    Only method (1) works for arrays of any dimensions.

    Methods (1)-(4) return the square of the Frobenius norm.

    For method (5), $j$ is a positive integer in range 1:size(X, 1).

    For method (6), $range$ is a UnitRange type.

    See also: colNorm, sumOfSqrDiag, sumOfSqrTril.

    Examples

    using PosDefManifold
    +for (1)-(6) above: where T<:RealOrComplex

    alias: ss

    Return

    • (1) the sum of squares of the elements in an array $A$ of any dimensions.
    • (2) as in (1), but for an Hermitian matrix $H$, using only the lower triangular part.
    • (3) as in (1), but for a LowerTriangular matrix $L$.
    • (4) as in (1), but for a Diagonal matrix $D$ (sum of squares of diagonal elements).
    • (5) the sum of square of the $j^{th}$ column of a Matrix or Hermitian $X$.
    • (6) the sum of square of the columns of a Matrix or Hermitian $X$ in a given range.

    All methods support real and complex matrices.

    Only method (1) works for arrays of any dimensions.

    Methods (1)-(4) return the square of the Frobenius norm.

    For method (5), $j$ is a positive integer in range 1:size(X, 1).

    For method (6), $range$ is a UnitRange type.

    See also: colNorm, sumOfSqrDiag, sumOfSqrTril.

    Examples

    using PosDefManifold
     X=randn(10, 20)
     sum2=sumOfSqr(X)        # (1) sum of squares of all elements
     sum2=sumOfSqr(X, 1)     # (2) sum of squares of elements in column 1
    -sum2=sumOfSqr(X, 2:4)   # (3) sum of squares of elements in column 2 to 4
    source
    PosDefManifold.sumOfSqrDiagFunction
    sumOfSqrDiag(X::AnyMatrix)

    alias: ssd

    Sum of squares of the diagonal elements in real or complex Matrix, Diagonal, Hermitian or LowerTriangular matrix $X$. If $X$ is rectangular (which can be only if it is of the Matrix type), the main diagonal is considered.

    See AnyMatrix type

    See also: sumOfSqr, sumOfSqrTril.

    Examples

    using LinearAlgebra, PosDefManifold
    +sum2=sumOfSqr(X, 2:4)   # (3) sum of squares of elements in column 2 to 4
    source
    PosDefManifold.sumOfSqrDiagFunction
    sumOfSqrDiag(X::AnyMatrix)

    alias: ssd

    Sum of squares of the diagonal elements in real or complex Matrix, Diagonal, Hermitian or LowerTriangular matrix $X$. If $X$ is rectangular (which can be only if it is of the Matrix type), the main diagonal is considered.

    See AnyMatrix type

    See also: sumOfSqr, sumOfSqrTril.

    Examples

    using LinearAlgebra, PosDefManifold
     X=randn(10, 20)
     sumDiag2=sumOfSqrDiag(X) # (1)
    -sumDiag2=sumOfSqrDiag(𝔻(X)) # (2) 𝔻=LinearAlgebra.Diagonal
    source
    PosDefManifold.colNormFunction
    colNorm(X::Union{𝕄{T}, ℍ{T}}, j::Int) where T<:RealOrComplex

    Given a real or complex Matrix or Hermitian matrix $X$, return the Euclidean norm of its $j^{th}$ column.

    This function calls the BLAS.nrm2 routine. See Threads.

    See also: normalizeCol!, colProd, sumOfSqr.

    Examples

    using PosDefManifold
    +sumDiag2=sumOfSqrDiag(𝔻(X)) # (2)
    +# 𝔻=LinearAlgebra.Diagonal is declated in the main module
    source
    PosDefManifold.colNormFunction
    colNorm(X::Union{𝕄{T}, ℍ{T}}, j::Int) where T<:RealOrComplex

    Given a real or complex Matrix or Hermitian matrix $X$, return the Euclidean norm of its $j^{th}$ column.

    This function calls the BLAS.nrm2 routine. See Threads.

    See also: normalizeCol!, colProd, sumOfSqr.

    Examples

    using PosDefManifold
     X=randn(10, 20)
    -normOfSecondColumn=colNorm(X, 2)
    source
    PosDefManifold.sumOfSqrTrilFunction
    sumOfSqrTril(X::AnyMatrix, k::Int=0)

    alias: sst

    Given a real or complex Matrix, Diagonal, Hermitian or LowerTriangular matrix $X$ (see AnyMatrix type), return the sum of squares of the elements in its lower triangle up to the $k^{th}$ underdiagonal.

    Matrix $X$ may be rectangular.

    $k$ must be in range

    • 1-size(X, 1):c-1 for $X$ Matrix, Diagonal or Hermitian,
    • 1-size(X, 1):0 for $X$ LowerTriangular.

    For $X$ Diagonal the result is

    • $0$ if $k<0$,
    • the sum of the squares of the diagonal elements otherwise.

    See julia tril(M, k::Integer) function for numbering of diagonals.

    See also: sumOfSqr, sumOfSqrDiag.

    Examples

    using PosDefManifold
    +normOfSecondColumn=colNorm(X, 2)
    source
    PosDefManifold.sumOfSqrTrilFunction
    sumOfSqrTril(X::AnyMatrix, k::Int=0)

    alias: sst

    Given a real or complex Matrix, Diagonal, Hermitian or LowerTriangular matrix $X$ (see AnyMatrix type), return the sum of squares of the elements in its lower triangle up to the $k^{th}$ underdiagonal.

    Matrix $X$ may be rectangular.

    $k$ must be in range

    • 1-size(X, 1):c-1 for $X$ Matrix, Diagonal or Hermitian,
    • 1-size(X, 1):0 for $X$ LowerTriangular.

    For $X$ Diagonal the result is

    • $0$ if $k<0$,
    • the sum of the squares of the diagonal elements otherwise.

    See julia tril(M, k::Integer) function for numbering of diagonals.

    See also: sumOfSqr, sumOfSqrDiag.

    Examples

    using PosDefManifold
     A=[4. 3.; 2. 5.; 1. 2.]
     #3×2 Array{Float64,2}:
     # 4.0  3.0
    @@ -133,28 +144,27 @@
     # 9.0 = 1²+2²+2²
     
     s=sumOfSqrTril(A, 0)
    -# 50.0 = 1²+2²+2²+4²+5²
    source
    LinearAlgebra.trFunction
    (1) tr(P::ℍ{T}, Q::ℍ{T})
     (2) tr(P::ℍ{T}, M::𝕄{T})
     (3) tr(D::𝔻{T}, H::Union{ℍ{T}, 𝕄{T}})
     (4) tr(H::Union{ℍ{T}, 𝕄{T}}, D::𝔻{T})
    -for all above: where T<:RealOrComplex

    Given (1) two Hermitian positive definite matrix $P$ and $Q$, return the trace of the product $PQ$. This is real even if $P$ and $Q$ are complex.

    $P$ must always be flagged as Hermitian. See typecasting matrices.

    In (2) $Q$ is a Matrix object, in which case return

    • a real trace if the product $PQ$ is real or if it has all positive real eigenvalues.
    • a complex trace if the product $PQ$ is not real and has complex eigenvalues.

    Methods (3) and (4) return the trace of the product $DH$ or $HD$, where $D$ is a Diagonal matrix and $H$ an $Hermitian$ or $Matrix$ object. The result is of the same type as the input matrices.

    For all methods all arguments must be of the same type.

    Math

    Let $P$ and $Q$ be Hermitian matrices, using the properties of the trace (e.g., the cyclic property and the similarity invariance) you can use this function to fast compute the trace of several expressions. For example:

    $\textrm{tr}(PQ)=\textrm{tr}(P^{1/2}QP^{1/2})$

    and

    $\textrm{tr}(PQP)=\textrm{tr}(P^{2}Q)$ (see example below).

    See: trace.

    See also: DiagOfProd, tr1.

    Examples

    using PosDefManifold
    +for all above: where T<:RealOrComplex

    Given (1) two Hermitian positive definite matrix $P$ and $Q$, return the trace of the product $PQ$. This is real even if $P$ and $Q$ are complex.

    $P$ must always be flagged as Hermitian. See typecasting matrices.

    In (2) $Q$ is a Matrix object, in which case return

    • a real trace if the product $PQ$ is real or if it has all positive real eigenvalues.
    • a complex trace if the product $PQ$ is not real and has complex eigenvalues.

    Methods (3) and (4) return the trace of the product $DH$ or $HD$, where $D$ is a Diagonal matrix and $H$ an $Hermitian$ or $Matrix$ object. The result is of the same type as the input matrices.

    For all methods all arguments must be of the same type.

    Math

    Let $P$ and $Q$ be Hermitian matrices, using the properties of the trace (e.g., the cyclic property and the similarity invariance) you can use this function to fast compute the trace of several expressions. For example:

    $\textrm{tr}(PQ)=\textrm{tr}(P^{1/2}QP^{1/2})$

    and

    $\textrm{tr}(PQP)=\textrm{tr}(P^{2}Q)$ (see example below).

    See: trace.

    See also: DiagOfProd, tr1.

    Examples

    using PosDefManifold
     P=randP(ComplexF64, 5) # generate a random complex positive definite matrix 5x5
     Q=randP(ComplexF64, 5) # generate a random complex positive definite matrix 5x5
     tr(P, Q) ≈ tr(P*Q) ? println(" ⭐ ") : println(" ⛔ ")
     tr(P, Q) ≈ tr(sqrt(P)*Q*sqrt(P)) ? println(" ⭐ ") : println(" ⛔ ")
    -tr(sqr(P), Q) ≈ tr(P*Q*P) ? println(" ⭐ ") : println(" ⛔ ")
    source
    PosDefManifold.quadraticFormFunction
    (1) quadraticForm(v::Vector{T}, P::ℍ{T}) where T<:Real
    +tr(sqr(P), Q) ≈ tr(P*Q*P) ? println(" ⭐ ") : println(" ⛔ ")
    source
    PosDefManifold.quadraticFormFunction
    (1) quadraticForm(v::Vector{T}, P::ℍ{T}) where T<:Real
     (2) quadraticForm(v::Vector{T}, L::𝕃{T}) where T<:Real
     (3) quadraticForm(v::Vector{T}, X::𝕄{T}, forceLower::Bool=false) where T<:Real
    -(4) quadraticForm(v::Vector{S}, X::Union{𝕄{S}, ℍ{S}, 𝕃{S}}) where S<:Complex

    alias: qf

    (1) Given a real vector $v$ and a real Hermitian matrix $P$, compute the quadratic form

    $v^TPv$,

    where the superscript T denotes transpose. It uses only the lower triangular part of $P$.

    (2) As in (1), given a real vector $v$ and a LowerTriangular matrix $L$.

    (3) As in (1), given a real vector $v$ and a real generic Matrix $M$, if forceLower=true. If forceLower=false, the product $v^TMv$ is evaluated instead using the whole matrix $M$.

    (4) Quadratic form $v^HPv$, where superscript H denotes complex conjugate and transpose, for a complex vector v and a complex Matrix, LowerTrianglar or Hermitian matrix. The whole matrix is used.

    Math

    For $v$ and $X$ real and $X$ symmetric, the quadratic form is

    $\sum_i(v_i^2x_{ii})+\sum_{i>j}(2v_iv_jx_{ij})$.

    For $L$ lower triangular is

    $\sum_i(v_i^2x_{ii})+\sum_{i>j}(v_iv_jx_{ij})$.

    These formula are used in methods (1), (2) and (3).

    Examples

    using PosDefManifold
    +(4) quadraticForm(v::Vector{S}, X::Union{𝕄{S}, ℍ{S}, 𝕃{S}}) where S<:Complex

    alias: qf

    (1) Given a real vector $v$ and a real Hermitian matrix $P$, compute the quadratic form

    $v^TPv$,

    where the superscript T denotes transpose. It uses only the lower triangular part of $P$.

    (2) As in (1), given a real vector $v$ and a LowerTriangular matrix $L$.

    (3) As in (1), given a real vector $v$ and a real generic Matrix $M$, if forceLower=true. If forceLower=false, the product $v^TMv$ is evaluated instead using the whole matrix $M$.

    (4) Quadratic form $v^HPv$, where superscript H denotes complex conjugate and transpose, for a complex vector v and a complex Matrix, LowerTrianglar or Hermitian matrix. The whole matrix is used.

    Math

    For $v$ and $X$ real and $X$ symmetric, the quadratic form is

    $\sum_i(v_i^2x_{ii})+\sum_{i>j}(2v_iv_jx_{ij})$.

    For $L$ lower triangular is

    $\sum_i(v_i^2x_{ii})+\sum_{i>j}(v_iv_jx_{ij})$.

    These formula are used in methods (1), (2) and (3).

    Examples

    using PosDefManifold
     P=randP(5) # generate a random real positive definite matrix 5x5
     v=randn(5)
     q1=quadraticForm(v, P) # or q1=qf(v, P)
     q2=v'*P*v
    -q1 ≈ q2 ? println(" ⭐ ") : println(" ⛔ ")
    source
    PosDefManifold.fidelityFunction
    fidelity(P::ℍ{T}, Q::ℍ{T}) where T<:RealOrComplex

    Given two positive definte Hermitian matrices $P$ and $Q$, return their fidelity:

    $tr\big(P^{1/2}QP^{1/2}\big)^{1/2}.$

    This is used in quantum physics and is related to the Wasserstein metric. See for example Bhatia, Jain and Lim (2019b)🎓.

    Examples

    using PosDefManifold
    +q1 ≈ q2 ? println(" ⭐ ") : println(" ⛔ ")
    source
    PosDefManifold.fidelityFunction
    fidelity(P::ℍ{T}, Q::ℍ{T}) where T<:RealOrComplex

    Given two positive definte Hermitian matrices $P$ and $Q$, return their fidelity:

    $tr\big(P^{1/2}QP^{1/2}\big)^{1/2}.$

    This is used in quantum physics and is related to the Wasserstein metric. See for example Bhatia, Jain and Lim (2019b)🎓.

    Examples

    using PosDefManifold
     P=randP(5);
     Q=randP(5);
    -f=fidelity(P, Q)
    source

    Diagonal functions of matrices

    FunctionDescription
    fDiag, 𝑓𝔻Elemen-wise functions of matrix diagonals
    DiagOfProd, dopDiagonal of the product of two matrices

    PosDefManifold.fDiagFunction
    fDiag(func::Function, X::AnyMatrix, k::Int=0)

    alias: 𝑓𝔻

    Applies function func element-wise to the elements of the $k^{th}$ diagonal of real or complex Diagonal, LowerTriangular, Matrix or Hermitian matrix $X$ and return a diagonal matrix with these elements. $X$ must be square in all cases, but for the 𝕄=Matrix type argument, in which case it may be of dimension r⋅c, with r ≠ c.

    See julia tril(M, k::Integer) function for numbering of diagonals.

    Bt default the main diagonal is considered.

    • If $X$ is Diagonal, $k$ is set automatically to zero (main diagonal).
    • If $X$ is LowerTriangular, $k$ cannot be positive.

    Note that if $X$ is rectangular the dimension of the result depends on the size of $X$ and on the chosen diagonal. For example,

    • r ≠ c and $k$=0 (main diagonal), the result will be of dimension min(r,c)min(r,c),
    • $X$ 3⋅4 and $k=-1$, the result will be 2⋅2,
    • $X$ 3⋅4 and $k=1$, the result will be 3⋅3, etc.
    Nota Bene

    The function func must support the func. syntax and therefore must be able to apply element-wise to the elements of the chosen diagonal (this includes anonymous functions).

    If the input matrix is complex, the function `func`
    -must be able to support complex arguments.

    See also: DiagOfProd, tr.

    Examples

    using PosDefManifold
    +f=fidelity(P, Q)
    source

    Diagonal functions of matrices

    FunctionDescription
    fDiag, 𝑓𝔻Elemen-wise functions of matrix diagonals
    DiagOfProd, dopDiagonal of the product of two matrices

    PosDefManifold.fDiagFunction
    fDiag(func::Function, X::AnyMatrix, k::Int=0)

    alias: 𝑓𝔻

    Applies function func element-wise to the elements of the $k^{th}$ diagonal of real or complex Diagonal, LowerTriangular, Matrix or Hermitian matrix $X$ and return a diagonal matrix with these elements. $X$ must be square in all cases, but for the 𝕄=Matrix type argument, in which case it may be of dimension r⋅c, with r ≠ c.

    See julia tril(M, k::Integer) function for numbering of diagonals.

    Bt default the main diagonal is considered.

    • If $X$ is Diagonal, $k$ is set automatically to zero (main diagonal).
    • If $X$ is LowerTriangular, $k$ cannot be positive.

    Note that if $X$ is rectangular the dimension of the result depends on the size of $X$ and on the chosen diagonal. For example,

    • r ≠ c and $k$=0 (main diagonal), the result will be of dimension min(r,c)min(r,c),
    • $X$ 3⋅4 and $k=-1$, the result will be 2⋅2,
    • $X$ 3⋅4 and $k=1$, the result will be 3⋅3, etc.
    Nota Bene

    The function func must support the func. syntax and therefore must be able to apply element-wise to the elements of the chosen diagonal (this includes anonymous functions). If the input matrix is complex, the function func must be able to support complex arguments.

    See also: DiagOfProd, tr.

    Examples

    using PosDefManifold
     P=randP(5) # use P=randP(ComplexF64, 5) for generating an Hermitian matrix
     
     # diagonal matrix with the inverse of the first sub-diagonal of P
    @@ -166,15 +176,15 @@
     Δ=fDiag(log, Λ)
     
     # using an anonymous function for the square of the eigenvalues
    -Δ=fDiag(x->x^2, Λ)
    source
    PosDefManifold.DiagOfProdFunction
    DiagOfProd(P::ℍ{T}, Q::ℍ{T}) where T<:RealOrComplex

    alias: dop

    Return the Diagonal matrix holding the diagonal of the product $PQ$ of two Hermitian matrices P and Q. Only the diagoanl part of the product is computed.

    See also: tr, fDiag.

    Examples

    using PosDefManifold, LinearAlgebra
    +Δ=fDiag(x->x^2, Λ)
    source
    PosDefManifold.DiagOfProdFunction
    DiagOfProd(P::ℍ{T}, Q::ℍ{T}) where T<:RealOrComplex

    alias: dop

    Return the Diagonal matrix holding the diagonal of the product $PQ$ of two Hermitian matrices P and Q. Only the diagoanl part of the product is computed.

    See also: tr, fDiag.

    Examples

    using PosDefManifold, LinearAlgebra
     P, Q=randP(5), randP(5)
    -DiagOfProd(P, Q)≈Diagonal(P*Q) ? println("⭐ ") : println("⛔ ")
    source

    Unitary functions of matrices

    FunctionDescription
    mgsModified Gram-Schmidt orthogonalization

    PosDefManifold.mgsFunction
    mgs(X::𝕄{T}, numCol::Int=0) where T<:RealOrComplex

    Modified (stabilized) Gram-Schmidt orthogonalization of the columns of square or tall matrix $X$, which can be comprised of real or complex elements. The orthogonalized $X$ is returned by the function. $X$ is not changed.

    All columns are orthogonalized by default. If instead argument numCol is provided, then only the first numCol columns of $X$ are orthogonalized. In this case only the firt numCol columns will be returned.

    Examples

    using LinearAlgebra, PosDefManifold
    +DiagOfProd(P, Q)≈Diagonal(P*Q) ? println("⭐ ") : println("⛔ ")
    source

    Unitary functions of matrices

    FunctionDescription
    mgsModified Gram-Schmidt orthogonalization

    PosDefManifold.mgsFunction
    mgs(X::𝕄{T}, numCol::Int=0) where T<:RealOrComplex

    Modified (stabilized) Gram-Schmidt orthogonalization of the columns of square or tall matrix $X$, which can be comprised of real or complex elements. The orthogonalized $X$ is returned by the function. $X$ is not changed.

    All columns are orthogonalized by default. If instead argument numCol is provided, then only the first numCol columns of $X$ are orthogonalized. In this case only the firt numCol columns will be returned.

    Examples

    using LinearAlgebra, PosDefManifold
     X=randn(10, 10);
     U=mgs(X)        # result is 10⋅10
     U=mgs(X, 3)     # result is 10⋅3
     U'*U ≈ I ? println(" ⭐ ") : println(" ⛔ ")
     # julia undertands also:
    -U'U ≈ I ? println(" ⭐ ") : println(" ⛔ ")
    source

    Matrix function of matrices

    FunctionDescription
    fVecGeneral function for multi-threaded computation of means and sums of matrix vectors
    congruence, congCompute congruent transformations

    PosDefManifold.fVecFunction
    	(1) fVec(f::Function, 𝐏::AnyMatrixVector;
    +U'U ≈ I ? println(" ⭐ ") : println(" ⛔ ")
    source

    Matrix function of matrices

    FunctionDescription
    fVecGeneral function for multi-threaded computation of means and sums of matrix vectors
    congruence, congCompute congruent transformations

    PosDefManifold.fVecFunction
    	(1) fVec(f::Function, 𝐏::AnyMatrixVector;
     	<
     	w::Vector=[],
     	✓w=false,
    @@ -182,8 +192,8 @@
     	>
     
     	(2) fVec(f::Function, g::Function, 𝐏::AnyMatrixVector;
    -	< same optional keyword arguments in (1) >)

    Given a 1d array $𝐏={P_1,...,P_k}$ of $k$ matrices of the 𝕄Vector type, 𝔻Vector type, 𝕃Vector type or ℍVector type and an optional non-negative real weights vector $w={w_1,...,w_k}$, return expression

    $(1)\hspace{6pt}f_{i=1}^{k}(w_iP_i)$,

    or

    $(2)\hspace{6pt}f_{i=1}^{k}(w_ig(P_i))$,

    where $f$ is either the mean or the sum standard julia functions and $g$ is whatever matrix function applying to each matrix $P_k$, such as exp, log,sqrt`, etc, and anonymous functions.

    This function is multi-threaded. It works by partitioning the $k$ operations required by the $f$ function in several groups, passing each group to a separate thread and combining the result of the intermediate operations. This function allows a gain in computational time only when the number of matrices (1) and/or their size (2) is high. Use mean and sum otherwise. The maximal gain is obtained when the number of matrices in 𝐏 is an exact multiple of the number of threads Julia is instructed to use. For this latter, see Threads.

    !!! note "Nota Bene"

     Contrarily to Julia `mean` and `sum` function (v 1.1.0) the `fVec` function
    - returns a matrix of the same type of the matrices in ``𝐏``.

    <optional keword argument> allocs allows to pass pre-allocated memory for holding the intermediate result of each thread. Argument allocs must be a vector of as many matrices as threads and where the matrices have the same dimension as the the matrices in $𝐏$ (see the example here below). Using this option is worthwhile only if the size of the matrices is very high and/or when fVec is to be called repeatedly on many vector of matrices, where the matrices have always the same size, so that one allocation works for all calls.

    If <optional keyword argument> ✓w=true is passed, the weights are normalized so as to sum up to 1, otherwise they are used as they are passed. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time. By default ✓w is false.

    Examples

    using LinearAlgebra, PosDefManifold
    +	< same optional keyword arguments in (1) >)

    Given a 1d array $𝐏={P_1,...,P_k}$ of $k$ matrices of the 𝕄Vector type, 𝔻Vector type, 𝕃Vector type or ℍVector type and an optional non-negative real weights vector $w={w_1,...,w_k}$, return expression

    $(1)\hspace{6pt}f_{i=1}^{k}(w_iP_i)$,

    or

    $(2)\hspace{6pt}f_{i=1}^{k}(w_ig(P_i))$,

    where $f$ is either the mean or the sum standard julia functions and $g$ is whatever matrix function applying to each matrix $P_k$, such as exp, log,sqrt`, etc, and anonymous functions.

    This function is multi-threaded. It works by partitioning the $k$ operations required by the $f$ function in several groups, passing each group to a separate thread and combining the result of the intermediate operations. This function allows a gain in computational time only when the number of matrices (1) and/or their size (2) is high. Use mean and sum otherwise. The maximal gain is obtained when the number of matrices in 𝐏 is an exact multiple of the number of threads Julia is instructed to use. For this latter, see Threads.

    !!! note "Nota Bene"

     Contrarily to Julia `mean` and `sum` function (v 1.1.0) the `fVec` function
    + returns a matrix of the same type of the matrices in ``𝐏``.

    <optional keword argument> allocs allows to pass pre-allocated memory for holding the intermediate result of each thread. Argument allocs must be a vector of as many matrices as threads and where the matrices have the same dimension as the the matrices in $𝐏$ (see the example here below). Using this option is worthwhile only if the size of the matrices is very high and/or when fVec is to be called repeatedly on many vector of matrices, where the matrices have always the same size, so that one allocation works for all calls.

    If <optional keyword argument> ✓w=true is passed, the weights are normalized so as to sum up to 1, otherwise they are used as they are passed. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time. By default ✓w is false.

    Examples

    using LinearAlgebra, PosDefManifold
     Pset=randP(4, 1000); # generate 1000 positive definite 4x4 matrices
     mean(Pset) # arithmetic mean calling Julia function
     Threads.nthreads() # check how many threads are available
    @@ -223,16 +233,10 @@
     # standard Julia function
     @benchmark(mean(log, Pset)) 					# (5.271 s)
     # fVec
    -@benchmark(fVec(mean, log, Pset))				# (1.540 s)
    source
    PosDefManifold.congruenceFunction
    (1) congruence(B::AnyMatrix, P::AnyMatrix, matrixType)
     (2) congruence(B::AnyMatrix, 𝐏::AnyMatrixVector, matrixVectorType)
     (3) congruence(B::AnyMatrix, 𝑷::AnyMatrixVector₂, matrixVector₂Type)
    -(4) congruence(𝐁::AnyMatrixVector, 𝑷::AnyMatrixVector₂, matrixVector₂Type)

    alias: cong

    (1) Return the congruent transformation

    $BPB^H$,

    for $B$ and $P$ any combination of Hermitian, LowerTriangular, Diagonal or general Matrix type.

    The result is of the matrixType argument, which must be provided and must be one of these four abstract type (not an instance of them). See aliases for shortening these type using symbols , 𝔻, 𝕃 and 𝕄.

    (2) Return a vector of matrices holding the congruent transformations

    $BP_kB^H$,

    for all $k$ matrices in $𝐏={P_1,...,P_k}$, for $B$ and $𝐏$ any combination of matrix type Hermitian, LowerTriangular, Diagonal or Matrix ($B$) and vector of matrices type ℍVector, 𝔻Vector, 𝕃Vector and 𝕄Vector ($𝐏$). See Array of Matrices types.

    The result is a vector of matrices of the matrixVectorType argument, which must be provided and must be one of the following abstract types: ℍVector, 𝔻Vector, 𝕃Vector or 𝕄Vector (and not an instance of these types).

    (3) Return a vector of vector of matrices holding the congruent transformations

    $BP_{mk}B^H$,

    for all $m$ vectors of $k[m]$ vectors of matrices in $𝑷$, for $B$ and $𝑷$ any combination of matrix type Hermitian, LowerTriangular, Diagonal or Matrix ($B$) and vector of matrices type ℍVector₂, 𝔻Vector₂, 𝕃Vector₂ and 𝕄Vector₂ ($𝑷$). See Array of Matrices types.

    The result is a vector of vector of matrices of the matrixVector₂Type argument, which must be provided and must be one of the following abstract types: ℍVector₂, 𝔻Vector₂, 𝕃Vector₂ or 𝕄Vector₂ (and not an instance of these types).

    (4) Return a vector of vector of matrices holding the congruent transformations

    $B_iP_{ij}B_j^H$, for $i,j∈[1,...,m]$.

    for $𝐁$ holding $m$ matrices and $𝑷$ holding $m$ vectors holding $m$ matrices each. Note that, differently from method (3), here the vectors of $𝑷$ are all of the same length and this is eaxctly the length of $𝐁$. $𝐁$ and $𝑷$ may be any combination of matrix vector type ℍVector, 𝔻Vector, 𝕃Vector and 𝕄Vector ($𝐁$) and vector of matrices type ℍVector₂, 𝔻Vector₂, 𝕃Vector₂ and 𝕄Vector₂ ($𝑷$). See Array of Matrices types.

    Note that this function computes the following algebraic expression:

    $\begin{pmatrix} B_1 & \hspace{0.01cm} & 0 \\ \hspace{0.01cm} & \ddots & \hspace{0.01cm} \\ 0 & \hspace{0.01cm} & B_m \end{pmatrix} \begin{pmatrix} C_{11} & \cdots & C_{1m} \\ \vdots & \ddots & \vdots \\ C_{m1} & \cdots & C_{mm} \end{pmatrix} \begin{pmatrix}B_1^T & \hspace{0.01cm} & 0 \\ \hspace{0.01cm} & \ddots & \hspace{0.01cm} \\ 0 & \hspace{0.01cm} & B_m^T\end{pmatrix}$ .

    The result is a vector of vector of matrices of the matrixVector₂Type argument, which must be provided and must be one of the following abstract types: ℍVector₂, 𝔻Vector₂, 𝕃Vector₂ or 𝕄Vector₂ (and not an instance of these types).

    When you pass it to this function, make sure to typecast $𝐁$ as an ℍVector, 𝔻Vector, 𝕃Vector or 𝕄Vector type if it is not already created as one of these types. See the example here below and typecasting matrices.

    Method (2), (3) and (4) are multi-threaded. See Threads.

    Nota Bene

    Types , 𝔻, 𝕃 or 𝕄 are actually constructors, thus they may modify the result of the congruence(s). This greatly expand the possibilities of this function, but it is your responsibility to pick the right argument matrixType in (1), matrixVectorType in (2) and

    `matrixVector₂Type` in (3)-(4).
    -For example, in (1) if ``B`` and ``P`` are `Hermitian`,
    -calling `cong(B, P, 𝔻)` will actually
    -return the diagonal part of ``B*P*B'`` and calling `cong(B, P, 𝕃)` will
    -actually return its lower triangular part. The full congruence can
    -be obtained as an `Hermitian` matrix by `cong(B, P, ℍ)` and as a generic
    -matrix object by `cong(B, P, 𝕄)`.

    Examples

    using LinearAlgebra, PosDefManifold
    +(4) congruence(𝐁::AnyMatrixVector, 𝑷::AnyMatrixVector₂, matrixVector₂Type)

    alias: cong

    (1) Return the congruent transformation

    $BPB^H$,

    for $B$ and $P$ any combination of Hermitian, LowerTriangular, Diagonal or general Matrix type.

    The result is of the matrixType argument, which must be provided and must be one of these four abstract type (not an instance of them). See aliases for shortening these type using symbols , 𝔻, 𝕃 and 𝕄.

    (2) Return a vector of matrices holding the congruent transformations

    $BP_kB^H$,

    for all $k$ matrices in $𝐏={P_1,...,P_k}$, for $B$ and $𝐏$ any combination of matrix type Hermitian, LowerTriangular, Diagonal or Matrix ($B$) and vector of matrices type ℍVector, 𝔻Vector, 𝕃Vector and 𝕄Vector ($𝐏$). See Array of Matrices types.

    The result is a vector of matrices of the matrixVectorType argument, which must be provided and must be one of the following abstract types: ℍVector, 𝔻Vector, 𝕃Vector or 𝕄Vector (and not an instance of these types).

    (3) Return a vector of vector of matrices holding the congruent transformations

    $BP_{mk}B^H$,

    for all $m$ vectors of $k[m]$ vectors of matrices in $𝑷$, for $B$ and $𝑷$ any combination of matrix type Hermitian, LowerTriangular, Diagonal or Matrix ($B$) and vector of matrices type ℍVector₂, 𝔻Vector₂, 𝕃Vector₂ and 𝕄Vector₂ ($𝑷$). See Array of Matrices types.

    The result is a vector of vector of matrices of the matrixVector₂Type argument, which must be provided and must be one of the following abstract types: ℍVector₂, 𝔻Vector₂, 𝕃Vector₂ or 𝕄Vector₂ (and not an instance of these types).

    (4) Return a vector of vector of matrices holding the congruent transformations

    $B_iP_{ij}B_j^H$, for $i,j∈[1,...,m]$.

    for $𝐁$ holding $m$ matrices and $𝑷$ holding $m$ vectors holding $m$ matrices each. Note that, differently from method (3), here the vectors of $𝑷$ are all of the same length and this is eaxctly the length of $𝐁$. $𝐁$ and $𝑷$ may be any combination of matrix vector type ℍVector, 𝔻Vector, 𝕃Vector and 𝕄Vector ($𝐁$) and vector of matrices type ℍVector₂, 𝔻Vector₂, 𝕃Vector₂ and 𝕄Vector₂ ($𝑷$). See Array of Matrices types.

    Note that this function computes the following algebraic expression:

    $\begin{pmatrix} B_1 & \hspace{0.01cm} & 0 \\ \hspace{0.01cm} & \ddots & \hspace{0.01cm} \\ 0 & \hspace{0.01cm} & B_m \end{pmatrix} \begin{pmatrix} C_{11} & \cdots & C_{1m} \\ \vdots & \ddots & \vdots \\ C_{m1} & \cdots & C_{mm} \end{pmatrix} \begin{pmatrix}B_1^T & \hspace{0.01cm} & 0 \\ \hspace{0.01cm} & \ddots & \hspace{0.01cm} \\ 0 & \hspace{0.01cm} & B_m^T\end{pmatrix}$ .

    The result is a vector of vector of matrices of the matrixVector₂Type argument, which must be provided and must be one of the following abstract types: ℍVector₂, 𝔻Vector₂, 𝕃Vector₂ or 𝕄Vector₂ (and not an instance of these types).

    When you pass it to this function, make sure to typecast $𝐁$ as an ℍVector, 𝔻Vector, 𝕃Vector or 𝕄Vector type if it is not already created as one of these types. See the example here below and typecasting matrices.

    Method (2), (3) and (4) are multi-threaded. See Threads.

    Nota Bene

    Types , 𝔻, 𝕃 or 𝕄 are actually constructors, thus they may modify the result of the congruence(s). This greatly expand the possibilities of this function, but it is your responsibility to pick the right argument matrixType in (1), matrixVectorType in (2) and matrixVector₂Type in (3)-(4). For example, in (1) if $B$ and $P$ are Hermitian, calling cong(B, P, 𝔻) will actually return the diagonal part of $B*P*B'$ and calling cong(B, P, 𝕃) will actually return its lower triangular part. The full congruence can be obtained as an Hermitian matrix by cong(B, P, ℍ) and as a generic matrix object by cong(B, P, 𝕄).

    Examples

    using LinearAlgebra, PosDefManifold
     
     # (1)
     P=randP(3) # generate a 3x3 positive matrix
    @@ -261,7 +265,6 @@
     Qset[2][1]≈M*Pset[2][1]*M' ? println("⭐") : println("⛔")
     Qset[2][4]≈M*Pset[2][4]*M' ? println("⭐") : println("⛔")
     
    -
     # (4)
     Pset1=randP(4, 2); # generate 2 positive definite 4x4 matrices
     Pset2=randP(4, 2);
    @@ -271,40 +274,40 @@
     Qset[1][1]≈U[1]*Pset[1][1]*U[1]' ? println("⭐") : println("⛔")
     Qset[1][2]≈U[1]*Pset[1][2]*U[2]' ? println("⭐") : println("⛔")
     Qset[2][1]≈U[2]*Pset[2][1]*U[1]' ? println("⭐") : println("⛔")
    -Qset[2][2]≈U[2]*Pset[2][2]*U[2]' ? println("⭐") : println("⛔")
    source

    Spectral decompositions of positive matrices

    FunctionDescription
    evdEigenvalue-Eigenvector decomposition of a matrix in $UΛU'=P$ form
    frfFull-rank factorization of an Hermitian matrix
    invfrfInverse of the full-rank factorization of an Hermitian matrix (whitening)
    spectralFunctionsMother function for creating spectral functions of eigenvalues
    powPower of a positive matrix for any number of exponents in one pass
    invsqrtPrincipal square root inverse (whitening) of a positive matrix
    sqrSquare of a positive matrix
    powerIterations, powIterPower method for estimating any number of eigenvectors and associated eigenvalues

    PosDefManifold.evdFunction
    evd(S::Union{𝕄{T}, ℍ{T}}) where T<:RealOrComplex

    Given a positive semi-definite matrix $S$, returns a 2-tuple $(Λ, U)$, where $U$ is the matrix holding in columns the eigenvectors and $Λ$ is the matrix holding the eigenvalues on the diagonal. This is the output of Julia eigen function in $UΛU'=S$ form.

    As for the eigen function, the eigenvalues and associated eigenvectors are sorted by increasing values of eigenvalues.

    $S$ may be real or complex and may be flagged by Julia as Hermitian (in this case PosDefManifold assumes it is positive definite).

    See typecasting matrices.

    See also: spectralFunctions.

    Examples

    using PosDefManifold
    +Qset[2][2]≈U[2]*Pset[2][2]*U[2]' ? println("⭐") : println("⛔")
    source

    Spectral decompositions of positive matrices

    FunctionDescription
    evdEigenvalue-Eigenvector decomposition of a matrix in $UΛU'=P$ form
    frfFull-rank factorization of an Hermitian matrix
    invfrfInverse of the full-rank factorization of an Hermitian matrix (whitening)
    spectralFunctionsMother function for creating spectral functions of eigenvalues
    powPower of a positive matrix for any number of exponents in one pass
    invsqrtPrincipal square root inverse (whitening) of a positive matrix
    sqrSquare of a positive matrix
    powerIterations, powIterPower method for estimating any number of eigenvectors and associated eigenvalues

    PosDefManifold.evdFunction
    evd(S::Union{𝕄{T}, ℍ{T}}) where T<:RealOrComplex

    Given a positive semi-definite matrix $S$, returns a 2-tuple $(Λ, U)$, where $U$ is the matrix holding in columns the eigenvectors and $Λ$ is the matrix holding the eigenvalues on the diagonal. This is the output of Julia eigen function in $UΛU'=S$ form.

    As for the eigen function, the eigenvalues and associated eigenvectors are sorted by increasing values of eigenvalues.

    $S$ may be real or complex and may be flagged by Julia as Hermitian (in this case PosDefManifold assumes it is positive definite).

    See typecasting matrices.

    See also: spectralFunctions.

    Examples

    using PosDefManifold
     A=randn(3, 3);
     S=A+A';
     Λ, U=evd(S); # which is equivalent to (Λ, U)=evd(P)
     (U*Λ*U') ≈ S ? println(" ⭐ ") : println(" ⛔ ")
    -# => UΛU'=S, UΛ=SU, ΛU'=U'S
    source
    PosDefManifold.frfFunction
    frf(P::ℍ{T}) where T<:RealOrComplex

    Full-rank factorization of Hermitian matrix P. It is given by

    $F=UD^{1/2}$,

    where

    $\textrm{EVD}(P)=UDU^{H}$

    is the eigenvalue-eigenvector decomposition of P. It verifies

    $FF^H=P$,

    thus $F^{-1}$ is a whitening matrix.

    See also: invfrf.

    Examples

    using LinearAlgebra, PosDefManifold
    +# => UΛU'=S, UΛ=SU, ΛU'=U'S
    source
    PosDefManifold.frfFunction
    frf(P::ℍ{T}) where T<:RealOrComplex

    Full-rank factorization of Hermitian matrix P. It is given by

    $F=UD^{1/2}$,

    where

    $\textrm{EVD}(P)=UDU^{H}$

    is the eigenvalue-eigenvector decomposition of P. It verifies

    $FF^H=P$,

    thus $F^{-1}$ is a whitening matrix.

    See also: invfrf.

    Examples

    using LinearAlgebra, PosDefManifold
     P=randP(3)
     F = frf(P)
    -F*F'≈P ? println(" ⭐ ") : println(" ⛔ ")
    source
    PosDefManifold.invfrfFunction
    invfrf(P::ℍ{T}) where T<:RealOrComplex

    Inverse of the full-rank factorization of Hermitian matrix P. It is given by

    $F=D^{-1/2}U^H$,

    where

    $\textrm{EVD}(P)=UDU^{H}$

    is the eigenvalue-eigenvector decomposition of P. It verifies

    $FPF^H=I$,

    thus $F$ is a whitening matrix.

    See also: frf.

    Examples

    using LinearAlgebra, PosDefManifold
    +F*F'≈P ? println(" ⭐ ") : println(" ⛔ ")
    source
    PosDefManifold.invfrfFunction
    invfrf(P::ℍ{T}) where T<:RealOrComplex

    Inverse of the full-rank factorization of Hermitian matrix P. It is given by

    $F=D^{-1/2}U^H$,

    where

    $\textrm{EVD}(P)=UDU^{H}$

    is the eigenvalue-eigenvector decomposition of P. It verifies

    $FPF^H=I$,

    thus $F$ is a whitening matrix.

    See also: frf.

    Examples

    using LinearAlgebra, PosDefManifold
     P=randP(3)
     F = invfrf(P)
    -F*P*F'≈I ? println(" ⭐ ") : println(" ⛔ ")
    source
    PosDefManifold.spectralFunctionsFunction
    (1) spectralFunctions(P::ℍ{T}, func) where T<:RealOrComplex
    -(2) spectralFunctions(D::𝔻{S}, func) where S<:Real

    (1) This is the mother function for all spectral functions of eigenvalues implemented in this library, which are:

    • pow (power),
    • isqrt (inverse square root).

    The function sqr (square) does not use it, as it can be obtained more efficiently by simple multiplication.

    You can use this function if you need another spectral function of eigenvalues besides those and those already implemented in the standard package LinearAlgebra. In general, you won't call it directly.

    func is the function that will be applied on the eigenvalues.

    $P$ must be flagged as Hermitian. See typecasting matrices. It must be a positive definite or positive semi-definite matrix, depending on func.

    A special method is provided for real Diagonal matrices (2).

    Nota Bene

    The function func must support the func. syntax and therefore must be able to apply element-wise to the eigenvalues (those include anonymous functions).

    Maths

    The definition of spectral functions for a positive definite matrix $P$ is at it follows:

    $f\big(P\big)=Uf\big(Λ\big)U^H,$

    where $U$ is the matrix holding in columns the eigenvectors of $P$, $Λ$ is the matrix holding on diagonal its eigenvalues and $f$ is a function applying element-wise to the eigenvalues.

    See also: evd.

    Examples

    using LinearAlgebra, PosDefManifold
    +F*P*F'≈I ? println(" ⭐ ") : println(" ⛔ ")
    source
    PosDefManifold.spectralFunctionsFunction
    (1) spectralFunctions(P::ℍ{T}, func) where T<:RealOrComplex
    +(2) spectralFunctions(D::𝔻{S}, func) where S<:Real

    (1) This is the mother function for all spectral functions of eigenvalues implemented in this library, which are:

    • pow (power),
    • isqrt (inverse square root).

    The function sqr (square) does not use it, as it can be obtained more efficiently by simple multiplication.

    You can use this function if you need another spectral function of eigenvalues besides those and those already implemented in the standard package LinearAlgebra. In general, you won't call it directly.

    func is the function that will be applied on the eigenvalues.

    $P$ must be flagged as Hermitian. See typecasting matrices. It must be a positive definite or positive semi-definite matrix, depending on func.

    A special method is provided for real Diagonal matrices (2).

    Nota Bene

    The function func must support the func. syntax and therefore must be able to apply element-wise to the eigenvalues (those include anonymous functions).

    Maths

    The definition of spectral functions for a positive definite matrix $P$ is at it follows:

    $f\big(P\big)=Uf\big(Λ\big)U^H,$

    where $U$ is the matrix holding in columns the eigenvectors of $P$, $Λ$ is the matrix holding on diagonal its eigenvalues and $f$ is a function applying element-wise to the eigenvalues.

    See also: evd.

    Examples

    using LinearAlgebra, PosDefManifold
     n=5
     P=randP(n) # P=randP(ComplexF64, 5) to generate an Hermitian complex matrix
     noise=0.1;
     Q=spectralFunctions(P, x->x+noise) # add white noise to the eigenvalues
    -tr(Q)-tr(P) ≈ noise*n ? println(" ⭐ ") : println(" ⛔ ")
    source
    PosDefManifold.powFunction
    (1) pow(P::ℍ{T}, args...) where T<:RealOrComplex
    -(2) pow(D::𝔻{S}, args...) where S<:Real

    (1) Given a positive semi-definite Hermitian matrix $P$, return the power $P^{r_1}, P^{r_2},...$ for any number of exponents $r_1, r_2,...$. It returns a tuple comprising as many elements as arguments passed after $P$.

    $P$ must be flagged as Hermitian. See typecasting matrices.

    $arg1, arg2,...$ are real numbers.

    A special method is provided for real Diagonal matrices (2).

    See also: invsqrt.

    Examples

    using LinearAlgebra, PosDefManifold
    +tr(Q)-tr(P) ≈ noise*n ? println(" ⭐ ") : println(" ⛔ ")
    source
    PosDefManifold.powFunction
    (1) pow(P::ℍ{T}, args...) where T<:RealOrComplex
    +(2) pow(D::𝔻{S}, args...) where S<:Real

    (1) Given a positive semi-definite Hermitian matrix $P$, return the power $P^{r_1}, P^{r_2},...$ for any number of exponents $r_1, r_2,...$. It returns a tuple comprising as many elements as arguments passed after $P$.

    $P$ must be flagged as Hermitian. See typecasting matrices.

    $arg1, arg2,...$ are real numbers.

    A special method is provided for real Diagonal matrices (2).

    See also: invsqrt.

    Examples

    using LinearAlgebra, PosDefManifold
     P=randP(5);     # use P=randP(ComplexF64, 5) for generating an Hermitian matrix
     Q=pow(P, 0.5);            # =>  QQ=P
     Q, W=pow(P, 0.5, -0.5);
     W*P*W ≈ I ? println(" ⭐ ") : println(" ⛔ ")
     Q*Q ≈ P ? println(" ⭐ ") : println(" ⛔ ")
     R, S=pow(P, 0.3, 0.7);
    -R*S ≈ P ? println(" ⭐ ") : println(" ⛔ ")
    source
    PosDefManifold.invsqrtFunction
    (1) invsqrt(P{T}::ℍ) where T<:RealOrComplex
    -(2) invsqrt(D{S}::𝔻) where S<:Real

    Given a positive definite Hermitian matrix $P$, compute the inverse of the principal square root $P^{-1/2}$.

    $P$ must be flagged as Hermitian. See typecasting matrices.

    A special method is provided for real Diagonal matrices (2).

    Maths

    The principal square root of a positive definite matrix $P$ is the only symmetric (if $P$ is real) or Hermitian (if $P$ is complex) square root. Its inverse $P^{-1/2}$ is also named the whitening or sphering matrix since$P^{-1/2}PP^{-1/2}=I$.

    See: typecasting matrices.

    See also: pow.

    Examples

    using LinearAlgebra, PosDefManifold
    +R*S ≈ P ? println(" ⭐ ") : println(" ⛔ ")
    source
    PosDefManifold.invsqrtFunction
    (1) invsqrt(P{T}::ℍ) where T<:RealOrComplex
    +(2) invsqrt(D{S}::𝔻) where S<:Real

    Given a positive definite Hermitian matrix $P$, compute the inverse of the principal square root $P^{-1/2}$.

    $P$ must be flagged as Hermitian. See typecasting matrices.

    A special method is provided for real Diagonal matrices (2).

    Maths

    The principal square root of a positive definite matrix $P$ is the only symmetric (if $P$ is real) or Hermitian (if $P$ is complex) square root. Its inverse $P^{-1/2}$ is also named the whitening or sphering matrix since$P^{-1/2}PP^{-1/2}=I$.

    See: typecasting matrices.

    See also: pow.

    Examples

    using LinearAlgebra, PosDefManifold
     P=randP(ComplexF64, 5);
     Q=invsqrt(P);
    -Q*P*Q ≈ I ? println(" ⭐ ") : println(" ⛔ ")
    source
    PosDefManifold.sqrFunction
    (1) sqr(P::ℍ{T}) where T<:RealOrComplex
    -(2) sqr(X::Union{𝕄{T}, 𝕃{T}, 𝔻{S}}) where T<:RealOrComplex where S<:Real

    (1) Given a positive semi-definite Hermitian matrix $P$, compute its square $P^{2}$.

    $P$ must be flagged as Hermitian. See typecasting matrices.

    A method is provided also for generic matrices of the Matrix type, LowerTriangular matrices and real Diagonal matrices (2). The output is of the same type as the input.

    See also: pow.

    Examples

    using PosDefManifold
    +Q*P*Q ≈ I ? println(" ⭐ ") : println(" ⛔ ")
    source
    PosDefManifold.sqrFunction
    (1) sqr(P::ℍ{T}) where T<:RealOrComplex
    +(2) sqr(X::Union{𝕄{T}, 𝕃{T}, 𝔻{S}}) where T<:RealOrComplex where S<:Real

    (1) Given a positive semi-definite Hermitian matrix $P$, compute its square $P^{2}$.

    $P$ must be flagged as Hermitian. See typecasting matrices.

    A method is provided also for generic matrices of the Matrix type, LowerTriangular matrices and real Diagonal matrices (2). The output is of the same type as the input.

    See also: pow.

    Examples

    using PosDefManifold
     P=randP(5);
     P²=sqr(P);  # =>  P²=PP
    -sqrt(P²)≈ P ? println(" ⭐ ") : println(" ⛔ ")
    source
    PosDefManifold.powerIterationsFunction
    powerIterations(H::Union{ℍ{T}, 𝕄{T}}, q::Int;
     <
     evalues=false,
     tol::Real=0,
    @@ -312,7 +315,7 @@
     verbose=false>) where T<:RealOrComplex
     
     powerIterations(L::𝕃{S}, q::Int;
    -< same optional keyword arguments in (1)>) where S<:Real

    alias: powIter

    (1) Compute the $q$ eigenvectors associated to the $q$ largest (real) eigenvalues of real or complex Hermitian or Matrix $H$ using the power iterations + Gram-Schmidt orthogonalization as suggested by Strang. The eigenvectors are returned with the same type as the elements of $H$.

    $H$ must have real eigenvalues, that is, it must be a symmetric matrix if it is real or an Hermitian matrix if it is complex.

    (2) as in (1), but using only the LowerTriangular view $L$ of a matrix. This option is available only for real matrices (see below).

    The following are <optional keyword arguments>:

    • `tol is the tolerance for the convergence of the power method (see below),
    • `maxiter is the maximum number of iterations allowed for the power method,
    • if `verbose=true, the convergence of all iterations will be printed,
    • if evalues=true, return the 4-tuple(Λ, U, iterations, covergence)`,
    • if evalues=false return the 3-tuple(U, iterations, covergence)`.
    Nota Bene

    Differently from the evd function, the eigenvectors and eigenvalues are sorted by decreasing order of eigenvalues.

    If $H$ is Hermitian and real, only its lower triangular part is used for computing the power iterations, like in (2). In this case the BLAS.symm routine is used. Otherwise the BLAS.gemm routine is used. See Threads.

    $tol$ defaults to 100 times the square root of Base.eps of the type of $H$. This corresponds to requiring the relative convergence criterion over two successive iterations to vanish for about half the significant digits minus 2.

    See also: mgs.

    Examples

    using LinearAlgebra, PosDefManifold
    +< same optional keyword arguments in (1)>) where S<:Real

    alias: powIter

    (1) Compute the $q$ eigenvectors associated to the $q$ largest (real) eigenvalues of real or complex Hermitian or Matrix $H$ using the power iterations + Gram-Schmidt orthogonalization as suggested by Strang. The eigenvectors are returned with the same type as the elements of $H$.

    $H$ must have real eigenvalues, that is, it must be a symmetric matrix if it is real or an Hermitian matrix if it is complex.

    (2) as in (1), but using only the LowerTriangular view $L$ of a matrix. This option is available only for real matrices (see below).

    The following are <optional keyword arguments>:

    • `tol is the tolerance for the convergence of the power method (see below),
    • `maxiter is the maximum number of iterations allowed for the power method,
    • if `verbose=true, the convergence of all iterations will be printed,
    • if evalues=true, return the 4-tuple(Λ, U, iterations, covergence)`,
    • if evalues=false return the 3-tuple(U, iterations, covergence)`.
    Nota Bene

    Differently from the evd function, the eigenvectors and eigenvalues are sorted by decreasing order of eigenvalues.

    If $H$ is Hermitian and real, only its lower triangular part is used for computing the power iterations, like in (2). In this case the BLAS.symm routine is used. Otherwise the BLAS.gemm routine is used. See Threads.

    $tol$ defaults to 100 times the square root of Base.eps of the type of $H$. This corresponds to requiring the relative convergence criterion over two successive iterations to vanish for about half the significant digits minus 2.

    See also: mgs.

    Examples

    using LinearAlgebra, PosDefManifold
     # Generate an Hermitian (complex) matrix
     H=randP(ComplexF64, 10);
     # 3 eigenvectors and eigenvalues
    @@ -326,12 +329,36 @@
     
     # passing a `LowerTriangular` object (must be a real matrix in this case)
     L=𝕃(randP(10))
    -Λ, U, iterations, convergence=powIter(L, 3, evalues=true)
    source

    Decompositions involving triangular matrices

    FunctionDescription
    choLLower triangular factor of Cholesky decomposition
    choInvLower triangular factor of Cholesky decomposition and its inverse in one pass
    choInv!as choInv, but destroying the input matrix

    PosDefManifold.choLFunction
    (1) choL(P::ℍ{T}) where T<:RealOrComplex
    -(2) choL(D::𝔻{S}) where S<:Real

    (1) Given a real or complex positive definite Hermitian matrix $P$, return the Cholesky lower triangular factor $L$ such that $LL^H=P$. To obtain $L^H$ or both $L$ and $L^H$, use instead julia function cholesky.

    On output, $L$ is of type LowerTriangular.

    (2) For a real Diagonal matrix $D$, return $D^{1/2}$.

    See also: choInv.

    Examples

    using PosDefManifold
    -P=randP(5);
    -L=choL(P);
    -L*L'≈ P ? println(" ⭐ ") : println(" ⛔ ")
    source
    PosDefManifold.choInvFunction
    choInv(P::AbstractArray{T};
    -	   kind::Symbol = :LLt, tol::Real = √eps(T)) where T<:RealOrComplex

    For a real or complex positive definite $P$, let $P=LL^H$ its Cholesky decomposition and $P=L_1DL_1^H$ the related LDLt decomposition. In the above $L$ is a lower triangular matrix, $D$ a positive-definite diagonal matrix and $L_1$ a unit lower triangular matrix. Return:

    • if kindis LLt (default), the 2-tuple $L$, $L^{-H}$
    • if kindis LDLt, the 3-tuple $L_1$, $D$, $L_1^{-H}$.

    Those are obtained in one pass and this is faster then calling Julia's chelosky function and inverting the lower factor.

    Input matrix P may be of type Matrix or Hermitian. Since only the lower triangle is used, P may also be a LowerTriangular view. If P is real it can also be Symmetric.

    The algorithm is a multiplicative Gaussian elimination. If run completely in input matrux P there will be the Identity at the end. Only the lower part of P is required.

    Notes: Output $L^{-H}$ is an inverse square root (whitening matrix) of $P$, since $L^{-1}PL^{-H}=I$. It therefore yields toinvert $P$ as $P^{-1}L^{-H}L^{-1}$. It is the fastes whitening matrix to be computed, however it yields poor numerical precision, especially for large matrices.

    The following relations holds: $L=PL^{-H}$, $L^{H}=L^{-1}P$, $L^{-H}=P^{-1}L$, $L^{-1}=L^{H}P^{-1}$.

    We also have $L^{H}L=L^{-1}P^{2}L^{-H}=L^{H}PL^{-H}=L^{-1}PL=UPU^H$, with $U$ orthogonal (see below) and $L^{-1}L^{-H}=L^{H}C^{-2}L=L^{H}C^{-1}L^{-H}=L^{-H}C^{-1}L=UC^{-1}U^H$.

    LL^{H} and L^{H}L are unitarily similar, that is, ULL^{H}U^H=L^{H}L, where $U=L^{-1}P^{1/2}$, with $P^{1/2}=H$ the principal (unique symmetric) square root of $P$. This is seen writing $PP^{-1}=HHL^{-H}L^{-1}$; multiplying both sides on the left by L^{-1} and on the right by L we obtain $L^{-1}CC^{-1}L=L^{-1}HHL^{-H}=I=(L^{-1}H)(L^{-1}H)^H$ and since $L^{-1}H$ is square it must be unitary.

    From these expressions we have $H=LU=U^HL^H; L=HU^H; H^{-1}=U^HL^{-1}; L^{-1}=UHL^{-1}$. $U$ is the polar factor of $L^{H}$, i.e., $L^{H}=UH$, since $LL^{H}=HU^HUH^H=H^2=P$.

    From $L^{H}L=UCU^H$ we have $L^{H}LU=UC=ULL^{H}$ and from $U=L^{-1}H$ we have $L=HU^H$.

    Examples

    using PosDefManifold
    +Λ, U, iterations, convergence=powIter(L, 3, evalues=true)
    source

    Decompositions involving triangular matrices

    FunctionDescription
    choLLower triangular factor of Cholesky decomposition
    choInvLower triangular factor of Cholesky decomposition and its inverse in one pass
    choInv!as choInv, but destroying the input matrix

    PosDefManifold.choLFunction
    (1) choL(P::ℍ{T}) where T<:RealOrComplex
    +(2) choL(D::𝔻{S}) where S<:Real

    (1) Given a real or complex positive definite Hermitian matrix $P$, return the Cholesky lower triangular factor $L$ such that $LL^H=P$. To obtain $L^H$ or both $L$ and $L^H$, use instead julia function cholesky.

    On output, $L$ is of type LowerTriangular.

    (2) For a real Diagonal matrix $D$, return $D^{1/2}$.

    See also: choInv.

    Examples

    using PosDefManifold
     P=randP(5);
     L=choL(P);
    -L*L'≈ P ? println(" ⭐ ") : println(" ⛔ ")
    source
    Missing docstring.

    Missing docstring for choInv!. Check Documenter's build log for details.

    +L*L'≈ P ? println(" ⭐ ") : println(" ⛔ ")
    source
    PosDefManifold.choInvFunction
    choInv(P::AbstractArray{T};
    +	kind::Symbol = :LLt, tol::Real = √eps(T)) where T<:RealOrComplex

    For a real or complex positive definite matrix $P$, let $P=LL^H$ be its Cholesky decomposition and $P=L_1DL_1^H$ the related LDLt decomposition. In the above, $L$ is a lower triangular matrix, $D$ a positive-definite diagonal matrix and $L_1$ a unit lower triangular matrix. Return:

    • if kindis :LLt (default), the 2-tuple $L$, $L^{-H}$
    • if kindis :LDLt, the 3-tuple $L_1$, $D$, $L_1^{-H}$.

    Those are obtained in one pass and for small matrices this is faster then calling Julia's chelosky function and inverting the lower factor unless you set

     BLAS.set_num_threads(1).

    Input matrix P may be of type Matrix or Hermitian. Since only the lower triangle is used, P may also be a LowerTriangular view of a positive definite matrix. If P is real, it can also be of the Symmetric type.

    The algorithm is a multiplicative Gaussian elimination. If run completely, in input matrix P there will be the Identity at the end.

    Notes: Output $L^{-H}$ is an inverse square root (whitening matrix) of $P$, since $L^{-1}PL^{-H}=I$. It therefore yields the inversion of $P$ as $P^{-1}=L^{-H}L^{-1}$. It is the fastest whitening matrix to be computed, however it yields poor numerical precision, especially for large matrices.

    The following relations holds:

    • $L=PL^{-H}$
    • $L^{H}=L^{-1}P$
    • $L^{-H}=P^{-1}L$
    • $L^{-1}=L^{H}P^{-1}$.

    We also have

    • $L^{H}L=L^{-1}P^{2}L^{-H}=UPU^H$, with $U$ orthogonal (see below) and
    • $L^{-1}L^{-H}=L^{H}P^{-2}L=UP^{-1}U^H$.

    $LL^{H}$ and $L^{H}L$ are unitarily similar, that is,

    $ULL^{H}U^H=L^{H}L$,

    where $U=L^{-1}P^{1/2}$, with $P^{1/2}=H$ the principal (unique symmetric) square root of $P$. This is seen writing $PP^{-1}=HHL^{-H}L^{-1}$; multiplying both sides on the left by $L^{-1}$ and on the right by $L$ we obtain

    $L^{-1}PP^{-1}L=L^{-1}HHL^{-H}=I=(L^{-1}H)(L^{-1}H)^H$

    and since $L^{-1}H$ is square it must be unitary.

    From these expressions we have

    • $H=LU=U^HL^H$
    • $L=HU^H$
    • $H^{-1}=U^HL^{-1}$
    • $L^{-1}=UH^{-1}$.

    $U$ is the polar factor of $L^{H}$, i.e., $L^{H}=UH$, since $LL^{H}=HU^HUH^H=H^2=P$.

    From $L^{H}L=UCU^H$ we have $L^{H}LU=UC=ULL^{H}$ and from $U=L^{-1}H$ we have $L=HU^H$.

    See also: choInv!, choL.

    Examples

    using PosDefManifold
    +n, t = 800, 6000
    +etol = 1e-9
    +Z=randn(t, n)
    +Y=Z'*Z
    +Yi=inv(Y)
    +
    +A, B=choInv!(copy(Y))
    +norm(A*A'-Y)/√n < etol ? println(" ⭐ ") : println(" ⛔ ")
    +norm(B*B'-Yi)/√n < etol ? println(" ⭐ ") : println(" ⛔ ")
    +
    +A, D, B=choInv!(copy(Y); kind=:LDLt)
    +norm(Y-A*D*A')/√n < etol ? println(" ⭐ ") : println(" ⛔ ")
    +norm(Yi-B*inv(D)*B')/√n < etol ? println(" ⭐ ") : println(" ⛔ ")
    +
    +# repeat the test for complex matrices
    +Z=randn(ComplexF64, t, n)
    +Y=Z'*Z
    +Yi=inv(Y)
    +
    +A, B=choInv!(copy(Y))
    +norm(A*A'-Y)/√n < etol ? println(" ⭐ ") : println(" ⛔ ")
    +norm(B*B'-Yi)/√n < etol ? println(" ⭐ ") : println(" ⛔ ")
    +
    +A, D, B=choInv!(copy(Y); kind=:LDLt)
    +norm(Y-A*D*A')/√n < etol ? println(" ⭐ ") : println(" ⛔ ")
    +norm(Yi-B*inv(D)*B')/√n < etol ? println(" ⭐ ") : println(" ⛔ ")
    source
    PosDefManifold.choInv!Function
    choInv!(P::AbstractArray{T};
    +	kind::Symbol = :LLt, tol::Real = √eps(T)) where T<:RealOrComplex

    The same thing as choInv, but destroys the input matrix. This function does nt require copying the input matrix, thus it is slightly faster.

    source
    diff --git a/docs/build/riemannianGeometry/index.html b/docs/build/riemannianGeometry/index.html index 6412f6e..e8cb7e3 100644 --- a/docs/build/riemannianGeometry/index.html +++ b/docs/build/riemannianGeometry/index.html @@ -1,32 +1,32 @@ -riemannianGeometry.jl · PosDefManifold

    riemannianGeometry.jl

    This is the fundamental unit of PosDefManifold. It contains functions for manipulating points in the Riemannian manifold of Symmetric Positive Definite (SPD) or Hermitian Positive Definite (HPD) matrices. In Julia those are Hermitian matrices, see typecasting matrices.

    The functions are divided in six categories:

    CategoryOutput
    1. Geodesic equationsinterpolation, extrapolation, weighted mean of two matrices, ...
    2. Distanceslength of geodesics
    3. Graphs and Laplaciansinter-distance matrices, spectral embedding, eigenmaps, ...
    4. Meansmid-points of geodesics, Fréchet means of several points, midrange,...
    5. Tangent Space operationsmaps from the manifold to the tangent space and viceversa, parallel transport,...
    6. Procrustes problemsdata matching, transfer learning (domain adaptation), ...

    Geodesic equations

    FunctionDescription
    geodesicGeodesic equations (weighted mean of two positive definite matrices) for any metric

    PosDefManifold.geodesicFunction
    (1) geodesic(metric::Metric, P::ℍ{T}, Q::ℍ{T}, a::Real) where T<:RealOrComplex
    -(2) geodesic(metric::Metric, D::𝔻{S}, E::𝔻{S}, a::Real) where S<:Real

    (1) Move along the geodesic from point $P$ to point $Q$ (two positive definite matrices) with arclegth $0<=a<=1$, using the specified metric, of type Metric::Enumerated type.

    For all metrics,

    • with $a=0$ we stay at $P$,
    • with $a=1$ we move up to $Q$,
    • with $a=1/2$ we move to the mid-point of $P$ and $Q$ (mean).

    Using the Fisher metric, argument $a$ can be any real number, for instance:

    • with $0<a<1$ we move toward $Q$ (attraction),
    • with $a>1$ we move over and beyond $Q$ (extrapolation),
    • with $a<0$ we move back away from Q (repulsion).

    $P$ and $Q$ must be flagged by julia as Hermitian. See typecasting matrices.

    The Fisher geodesic move is computed by the Cholesky-Schur algorithm given in Eq. 4.2 by Iannazzo(2016)🎓. If $Q=I$, the Fisher geodesic move is simply $P^a$ (no need to call this funtion).

    Nota Bene

    For the logdet zero and Jeffrey metric no closed form expression for the geodesic is available to the best of authors' knowledge, so in this case the geodesic is found as the weighted mean using the mean function. For the Von Neumann not even an expression for the mean is available, so in this case the geodesic is not provided and a warning is printed.

    (2) Like in (1), but for two real positive definite diagonal matrices $D$ and $E$.

    Maths

    For points $P$, $Q$ and arclength $a$, letting $b=1-a$, the geodesic equations for the supported metrics are:

    Metricgeodesic equation
    Euclidean$bP + aQ$
    invEuclidean$\big(bP^{-1} + aQ^{-1}\big)^{-1}$
    ChoEuclidean$TT^*$, where $T=bL_P + aL_Q$
    logEuclidean$\text{exp}\big(b\hspace{2pt}\text{log}(P) + a\hspace{2pt}\text{log}(Q)\big)$
    logCholesky$TT^*$, where $T=S_P+a(S_Q-S_P)+D_P\hspace{2pt}\text{exp}\big(a(\text{log}D_Q-\text{log}D_P)\big)$
    Fisher$P^{1/2} \big(P^{-1/2} Q P^{-1/2}\big)^a P^{1/2}$
    logdet0uses weighted mean algorithm logdet0Mean
    Jeffreyuses weighted mean mean
    VonNeumannN.A.
    Wasserstein$b^2P+a^2Q +ab\big[(PQ)^{1/2} +(QP)^{1/2}\big]$

    legend: $L_X$, $S_X$ and $D_X$ are the Cholesky lower triangle of $X$, its strictly lower triangular part and diagonal part, respectively (hence, $S_X+D_X=L_X$, $L_XL_X^*=X$).

    See also: mean.

    Examples

    using PosDefManifold
    +riemannianGeometry.jl · PosDefManifold

    riemannianGeometry.jl

    This is the fundamental unit of PosDefManifold. It contains functions for manipulating points in the Riemannian manifold of Symmetric Positive Definite (SPD) or Hermitian Positive Definite (HPD) matrices. In Julia those are Hermitian matrices, see typecasting matrices.

    The functions are divided in six categories:

    CategoryOutput
    1. Geodesic equationsinterpolation, extrapolation, weighted mean of two matrices, ...
    2. Distanceslength of geodesics
    3. Graphs and Laplaciansinter-distance matrices, spectral embedding, eigenmaps, ...
    4. Meansmid-points of geodesics, Fréchet means of several points, midrange,...
    5. Tangent Space operationsmaps from the manifold to the tangent space and viceversa, parallel transport,...
    6. Procrustes problemsdata matching, transfer learning (domain adaptation), ...

    Geodesic equations

    FunctionDescription
    geodesicGeodesic equations (weighted mean of two positive definite matrices) for any metric

    PosDefManifold.geodesicFunction
    (1) geodesic(metric::Metric, P::ℍ{T}, Q::ℍ{T}, a::Real) where T<:RealOrComplex
    +(2) geodesic(metric::Metric, D::𝔻{S}, E::𝔻{S}, a::Real) where S<:Real

    (1) Move along the geodesic from point $P$ to point $Q$ (two positive definite matrices) with arclegth $0<=a<=1$, using the specified metric, of type Metric::Enumerated type.

    For all metrics,

    • with $a=0$ we stay at $P$,
    • with $a=1$ we move up to $Q$,
    • with $a=1/2$ we move to the mid-point of $P$ and $Q$ (mean).

    Using the Fisher metric, argument $a$ can be any real number, for instance:

    • with $0<a<1$ we move toward $Q$ (attraction),
    • with $a>1$ we move over and beyond $Q$ (extrapolation),
    • with $a<0$ we move back away from Q (repulsion).

    $P$ and $Q$ must be flagged by julia as Hermitian. See typecasting matrices.

    The Fisher geodesic move is computed by the Cholesky-Schur algorithm given in Eq. 4.2 by Iannazzo(2016)🎓. If $Q=I$, the Fisher geodesic move is simply $P^a$ (no need to call this funtion).

    Nota Bene

    For the logdet zero and Jeffrey metric no closed form expression for the geodesic is available to the best of authors' knowledge, so in this case the geodesic is found as the weighted mean using the mean function. For the Von Neumann not even an expression for the mean is available, so in this case the geodesic is not provided and a warning is printed.

    (2) Like in (1), but for two real positive definite diagonal matrices $D$ and $E$.

    Maths

    For points $P$, $Q$ and arclength $a$, letting $b=1-a$, the geodesic equations for the supported metrics are:

    Metricgeodesic equation
    Euclidean$bP + aQ$
    invEuclidean$\big(bP^{-1} + aQ^{-1}\big)^{-1}$
    ChoEuclidean$TT^*$, where $T=bL_P + aL_Q$
    logEuclidean$\text{exp}\big(b\hspace{2pt}\text{log}(P) + a\hspace{2pt}\text{log}(Q)\big)$
    logCholesky$TT^*$, where $T=S_P+a(S_Q-S_P)+D_P\hspace{2pt}\text{exp}\big(a(\text{log}D_Q-\text{log}D_P)\big)$
    Fisher$P^{1/2} \big(P^{-1/2} Q P^{-1/2}\big)^a P^{1/2}$
    logdet0uses weighted mean algorithm logdet0Mean
    Jeffreyuses weighted mean mean
    VonNeumannN.A.
    Wasserstein$b^2P+a^2Q +ab\big[(PQ)^{1/2} +(QP)^{1/2}\big]$

    legend: $L_X$, $S_X$ and $D_X$ are the Cholesky lower triangle of $X$, its strictly lower triangular part and diagonal part, respectively (hence, $S_X+D_X=L_X$, $L_XL_X^*=X$).

    See also: mean.

    Examples

    using PosDefManifold
     P=randP(10)
     Q=randP(10)
     # Wasserstein mean
     M=geodesic(Wasserstein, P, Q, 0.5)
     # extrapolate suing the Fisher metric
    -E=geodesic(Fisher, P, Q, 2)
    source

    Distances

    FunctionDescription
    distanceSqr, distance²Squared distance between positive definite matrices
    distanceDistance between positive definite matrices

    Distances

    FunctionDescription
    distanceSqr, distance²Squared distance between positive definite matrices
    distanceDistance between positive definite matrices

    PosDefManifold.distanceSqrFunction
    (1) distanceSqr(metric::Metric, P::ℍ{T}) where T<:RealOrComplex
     (2) distanceSqr(metric::Metric, P::ℍ{T}, Q::ℍ{T}) where T<:RealOrComplex
     (3) distanceSqr(metric::Metric, D::𝔻{S}) where S<:Real
    -(4) distanceSqr(metric::Metric, D::𝔻{S}, E::𝔻{S}) where S<:Real

    alias: distance²

    (1) Return $δ^2(P, I)$, the square of the distance (or divergence) of positive definite matrix $P$ from the the identity matrix. See distance from the origin.

    (2) Return $δ^2(P, Q)$, the square of the distance (or divergence) between two positive definite matrices $P$ and $Q$. See distance.

    In both cases the distance function $δ$ is induced by the argument metric of type Metric::Enumerated type.

    $P$ in (1) and $P$, $Q$ in (2) must be flagged by julia as Hermitian. See typecasting matrices.

    (3) and (4) are specialized methods of (1) and (2), respectively, for real positive definite Diagonal matrices. See ℍVector type and 𝔻Vector type.

    Maths

    For point $P$ the squared distances from the identity for the supported metrics are:

    MetricSquared Distance from the identity
    Euclidean$∥P-I∥^2$
    invEuclidean$∥P^{-1}-I∥^2$
    ChoEuclidean$∥L_P-I∥^2$
    logEuclidean$∥\textrm{log}P∥^2$
    logCholesky$∥S_P∥^2+∥\textrm{log}D_P∥^2$
    Fisher$∥\textrm{log}P∥^2$
    logdet0$\textrm{logdet}\frac{1}{2}(P+I) - \frac{1}{2}\textrm{logdet}(P)$
    Jeffrey$\frac{1}{2}\textrm{tr}(P+P^{-1})-n$
    VonNeumann$\frac{1}{2}\textrm{tr}(P\textrm{log}P-\textrm{log}P)$
    Wasserstein$\textrm{tr}(P+I) -2\textrm{tr}(P^{1/2})$

    For points $P$ and $Q$ their squared distances for the supported metrics are:

    MetricSquared Distance
    Euclidean$∥P-Q∥^2$
    invEuclidean$∥P^{-1}-Q^{-1}∥^2$
    ChoEuclidean$∥ L_P - L_Q ∥^2$
    logEuclidean$∥\textrm{log}P-\textrm{log}Q∥^2$
    logCholesky$∥S_P-S_Q∥^2+∥\textrm{log}D_P-\textrm{log}D_Q∥^2$
    Fisher$∥\textrm{log}(P^{-1/2}QP^{-1/2})∥^2$
    logdet0$\textrm{logdet}\frac{1}{2}(P+Q) - \frac{1}{2}\textrm{logdet}(PQ)$
    Jeffrey$\frac{1}{2}\textrm{tr}(Q^{-1}P+P^{-1}Q)-n$
    VonNeumann$\frac{1}{2}\textrm{tr}(P\textrm{log}P-P\textrm{log}Q+Q\textrm{log}Q-Q\textrm{log}P)$
    Wasserstein$\textrm{tr}(P+Q) -2\textrm{tr}(P^{1/2}QP^{1/2})^{1/2}$

    legend: $L_X$, $S_X$ and $D_X$ are the Cholesky lower triangle of $X$, its strictly lower triangular part and diagonal part, respectively (hence, $S_X+D_X=L_X$, $L_XL_X^*=X$).

    See also: distanceSqrMat.

    Examples (1)

    using PosDefManifold
    +(4) distanceSqr(metric::Metric, D::𝔻{S}, E::𝔻{S}) where S<:Real

    alias: distance²

    (1) Return $δ^2(P, I)$, the square of the distance (or divergence) of positive definite matrix $P$ from the the identity matrix. See distance from the origin.

    (2) Return $δ^2(P, Q)$, the square of the distance (or divergence) between two positive definite matrices $P$ and $Q$. See distance.

    In both cases the distance function $δ$ is induced by the argument metric of type Metric::Enumerated type.

    $P$ in (1) and $P$, $Q$ in (2) must be flagged by julia as Hermitian. See typecasting matrices.

    (3) and (4) are specialized methods of (1) and (2), respectively, for real positive definite Diagonal matrices. See ℍVector type and 𝔻Vector type.

    Maths

    For point $P$ the squared distances from the identity for the supported metrics are:

    MetricSquared Distance from the identity
    Euclidean$∥P-I∥^2$
    invEuclidean$∥P^{-1}-I∥^2$
    ChoEuclidean$∥L_P-I∥^2$
    logEuclidean$∥\textrm{log}P∥^2$
    logCholesky$∥S_P∥^2+∥\textrm{log}D_P∥^2$
    Fisher$∥\textrm{log}P∥^2$
    logdet0$\textrm{logdet}\frac{1}{2}(P+I) - \frac{1}{2}\textrm{logdet}(P)$
    Jeffrey$\frac{1}{2}\textrm{tr}(P+P^{-1})-n$
    VonNeumann$\frac{1}{2}\textrm{tr}(P\textrm{log}P-\textrm{log}P)$
    Wasserstein$\textrm{tr}(P+I) -2\textrm{tr}(P^{1/2})$

    For points $P$ and $Q$ their squared distances for the supported metrics are:

    MetricSquared Distance
    Euclidean$∥P-Q∥^2$
    invEuclidean$∥P^{-1}-Q^{-1}∥^2$
    ChoEuclidean$∥ L_P - L_Q ∥^2$
    logEuclidean$∥\textrm{log}P-\textrm{log}Q∥^2$
    logCholesky$∥S_P-S_Q∥^2+∥\textrm{log}D_P-\textrm{log}D_Q∥^2$
    Fisher$∥\textrm{log}(P^{-1/2}QP^{-1/2})∥^2$
    logdet0$\textrm{logdet}\frac{1}{2}(P+Q) - \frac{1}{2}\textrm{logdet}(PQ)$
    Jeffrey$\frac{1}{2}\textrm{tr}(Q^{-1}P+P^{-1}Q)-n$
    VonNeumann$\frac{1}{2}\textrm{tr}(P\textrm{log}P-P\textrm{log}Q+Q\textrm{log}Q-Q\textrm{log}P)$
    Wasserstein$\textrm{tr}(P+Q) -2\textrm{tr}(P^{1/2}QP^{1/2})^{1/2}$

    legend: $L_X$, $S_X$ and $D_X$ are the Cholesky lower triangle of $X$, its strictly lower triangular part and diagonal part, respectively (hence, $S_X+D_X=L_X$, $L_XL_X^*=X$).

    See also: distanceSqrMat.

    Examples (1)

    using PosDefManifold
     P=randP(10)
     d=distanceSqr(Wasserstein, P)
     e=distanceSqr(Fisher, P)
     metric=Metric(Int(logdet0)) # or metric=logdet0
     s=string(metric) # check what is the current metric
    -f=distance²(metric, P) #using the alias distance²

    Examples (2)

    using PosDefManifold
    +f=distance²(metric, P) #using the alias distance²

    Examples (2)

    using PosDefManifold
     P=randP(10)
     Q=randP(10)
     d=distanceSqr(logEuclidean, P, Q)
    -e=distance²(Jeffrey, P, Q)
    source
    PosDefManifold.distanceFunction
    (1) distance(metric::Metric, P::ℍ{T}) where T<:RealOrComplex
     (2) distance(metric::Metric, P::ℍ{T}, Q::ℍ{T}) where T<:RealOrComplex
     (3) distance(metric::Metric, D::𝔻{S}) where S<:Real
    -(4) distance(metric::Metric, D::𝔻{S}, E::𝔻{S}) where S<:Real

    (1) Return $δ(P, I)$, the distance between positive definite matrix $P$ and the identity matrix.

    (2) Return $δ(P, Q)$, the distance between positive definite matrices $P$ and $Q$.

    (3) and (4) are specialized methods of (1) and (2), respectively, for real positive definite Diagonal matrices.

    This is the square root of distanceSqr and is invoked with the same syntax therein.

    See also: distanceMat.

    source

    Graphs and Laplacians

    FunctionDescription
    distanceSqrMat, distance²MatLower triangular matrix of all squared inter-distances
    distanceMatLower triangular matrix of all inter-distances
    laplacianLaplacian of a squared inter-distances matrix
    laplacianEigenMaps, laplacianEMEigen maps (eigenvectors) of a Laplacian
    spectralEmbedding, spEmbSpectral Embedding (the above functions run in series)

    PosDefManifold.distanceSqrMatFunction
        (1) distanceSqrMat(metric::Metric, 𝐏::ℍVector;
    +(4) distance(metric::Metric, D::𝔻{S}, E::𝔻{S}) where S<:Real

    (1) Return $δ(P, I)$, the distance between positive definite matrix $P$ and the identity matrix.

    (2) Return $δ(P, Q)$, the distance between positive definite matrices $P$ and $Q$.

    (3) and (4) are specialized methods of (1) and (2), respectively, for real positive definite Diagonal matrices.

    This is the square root of distanceSqr and is invoked with the same syntax therein.

    See also: distanceMat.

    source

    Graphs and Laplacians

    FunctionDescription
    distanceSqrMat, distance²MatLower triangular matrix of all squared inter-distances
    distanceMatLower triangular matrix of all inter-distances
    laplacianLaplacian of a squared inter-distances matrix
    laplacianEigenMaps, laplacianEMEigen maps (eigenvectors) of a Laplacian
    spectralEmbedding, spEmbSpectral Embedding (the above functions run in series)

    PosDefManifold.distanceSqrMatFunction
        (1) distanceSqrMat(metric::Metric, 𝐏::ℍVector;
         <⏩=true>)
     
         (2) distanceSqrMat(type::Type{T}, metric::Metric, 𝐏::ℍVector;
    -    <⏩=true>) where T<:AbstractFloat

    alias: distance²Mat

    Given a 1d array $𝐏$ of $k$ positive definite matrices ${P_1,...,P_k}$ of ℍVector type, create the $k⋅k$ real LowerTriangular matrix comprising elements $δ^2(P_i, P_j)\textrm{, for all }i>=j$.

    This is the lower triangular matrix holding all squared inter-distances (zero on diagonal), using the specified metric, of type Metric::Enumerated type, giving rise to distance function $δ$. See distanceSqr.

    Only the lower triangular part is computed in order to optimize memory use.

    By default, the result matrix is of type Float32. The type can be changed to another real type using method (2).

    <optional keyword arguments>:

    • if ⏩=true (default) the computation of inter-distances is multi-threaded.
    Nota Bene

    Multi-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.

    See: distance.

    See also: laplacian, laplacianEigenMaps, spectralEmbedding.

    Examples

    using PosDefManifold
    +    <⏩=true>) where T<:AbstractFloat

    alias: distance²Mat

    Given a 1d array $𝐏$ of $k$ positive definite matrices ${P_1,...,P_k}$ of ℍVector type, create the $k⋅k$ real LowerTriangular matrix comprising elements $δ^2(P_i, P_j)\textrm{, for all }i>=j$.

    This is the lower triangular matrix holding all squared inter-distances (zero on diagonal), using the specified metric, of type Metric::Enumerated type, giving rise to distance function $δ$. See distanceSqr.

    Only the lower triangular part is computed in order to optimize memory use.

    By default, the result matrix is of type Float32. The type can be changed to another real type using method (2).

    <optional keyword arguments>:

    • if ⏩=true (default) the computation of inter-distances is multi-threaded.
    Nota Bene

    Multi-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.

    See: distance.

    See also: laplacian, laplacianEigenMaps, spectralEmbedding.

    Examples

    using PosDefManifold
     # Generate a set of 8 random 10x10 SPD matrices
     Pset=randP(10, 8) # or, using unicode: 𝐏=randP(10, 8)
     # Compute the squared inter-distance matrix according to the log Euclidean metric.
    @@ -38,11 +38,11 @@
     Δ²64=distanceSqrMat(Float64, logEuclidean, Pset)
     
     # Get the full matrix of inter-distances
    -fullΔ²=Hermitian(Δ², :L)
    source
    PosDefManifold.distanceMatFunction
        (1) distanceMat(metric::Metric, 𝐏::ℍVector;
         <⏩=true>)
     
         (2) distanceMat(type::Type{T}, metric::Metric, 𝐏::ℍVector;
    -    <⏩=true>) where T<:AbstractFloat

    Given a 1d array $𝐏$ of $k$ positive definite matrices ${P_1,...,P_k}$ of ℍVector type, create the $k⋅k$ real LowerTriangular matrix comprising elements $δ(P_i, P_j)\textrm{, for all }i>=j$.

    This is the lower triangular matrix holding all inter-distances (zero on diagonal), using the specified metric, of type Metric::Enumerated type, giving rise to distance $δ$. See distance.

    Only the lower triangular part is computed in order to optimize memory use.

    By default, the result matrix is of type Float32. The type can be changed to another real type using method (2).

    The elements of this matrix are the square root of distanceSqrMat.

    <optional keyword arguments>:

    • if ⏩=true the computation of inter-distances is multi-threaded.
    Nota Bene

    Multi-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.

    See: distance.

    Examples

    using PosDefManifold
    +    <⏩=true>) where T<:AbstractFloat

    Given a 1d array $𝐏$ of $k$ positive definite matrices ${P_1,...,P_k}$ of ℍVector type, create the $k⋅k$ real LowerTriangular matrix comprising elements $δ(P_i, P_j)\textrm{, for all }i>=j$.

    This is the lower triangular matrix holding all inter-distances (zero on diagonal), using the specified metric, of type Metric::Enumerated type, giving rise to distance $δ$. See distance.

    Only the lower triangular part is computed in order to optimize memory use.

    By default, the result matrix is of type Float32. The type can be changed to another real type using method (2).

    The elements of this matrix are the square root of distanceSqrMat.

    <optional keyword arguments>:

    • if ⏩=true the computation of inter-distances is multi-threaded.
    Nota Bene

    Multi-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.

    See: distance.

    Examples

    using PosDefManifold
     # Generate a set of 4 random 10x10 SPD matrices
     Pset=randP(10, 4) # or, using unicode: 𝐏=randP(10, 4)
     Δ=distanceMat(Fisher, Pset)
    @@ -51,8 +51,8 @@
     Δ64=distanceMat(Float64, Fisher, Pset)
     
     # Get the full matrix of inter-distances
    -fullΔ=Hermitian(Δ, :L)
    source
    PosDefManifold.laplacianFunction
    laplacian(Δ²::𝕃{S}, epsilon::Real=0;
    -          <densityInvariant=false>) where S<:Real

    Given a LowerTriangular matrix of squared inter-distances $Δ^2$, return the lower triangular part of the so-called normalized Laplacian or density-invariant normalized Laplacian, which in both cases is a symmetric Laplacian. The elements of the Laplacian are of the same type as the elements of $Δ^2$. The result is a LowerTriangular matrix.

    The definition of Laplacian given by Lafon (2004)🎓 is implemented:

    First, a Gaussian radial basis functions, known as Gaussian kernel or heat kernel, is applied to all elements of $Δ^2$, such as

    $W_{ij} = exp\bigg(\frac{\displaystyle{-Δ^2_{ij}}}{\displaystyle{2ε}}\bigg)$,

    where $ε$ is the bandwidth of the kernel.

    If <optional keyword argument> densityInvariant=true is used, then the density-invariant transformation is applied

    $W <- E^{-1}WE^{-1}$

    where $E$ is the diagonal matrix holding on the main diagonal the sum of the rows (or columns) of $W$.

    Finally, the normalized Laplacian (density-invariant or not) is defined as

    $Ω = D^{-1/2}WD^{-1/2}$,

    where $D$ is the diagonal matrix holding on the main diagonal the sum of the rows (or columns) of $W$.

    If you do not provide argument epsilon, the bandwidth $ε$ is set to the median of the elements of squared distance matrix $Δ^2_{ij}$. Another educated guess is the dimension of the original data, that is, the data that has been used to compute the squared distance matrix. For positive definite matrices this is $n(n-1)/2$, where $n$ is the dimension of the matrices. Still another is the dimension of the ensuing spectralEmbedding space. Keep in mind that by tuning the epsilon parameter (which must be positive) you can control both the rate of compression of the embedding space and the spread of points in the embedding space. See Coifman et al. (2008)🎓 for a discussion on $ε$.

    Nota Bene

    The Laplacian as here defined can be requested for any input matrix of squared inter-distances, for example, those obtained on scalars or on vectors using appropriate metrics. In any case, only the lower triangular part of the Laplacian is taken as input. See typecasting matrices.

    See also: distanceSqrMat, laplacianEigenMaps, spectralEmbedding.

    Examples

    using PosDefManifold
    +fullΔ=Hermitian(Δ, :L)
    source
    PosDefManifold.laplacianFunction
    laplacian(Δ²::𝕃{S}, epsilon::Real=0;
    +          <densityInvariant=false>) where S<:Real

    Given a LowerTriangular matrix of squared inter-distances $Δ^2$, return the lower triangular part of the so-called normalized Laplacian or density-invariant normalized Laplacian, which in both cases is a symmetric Laplacian. The elements of the Laplacian are of the same type as the elements of $Δ^2$. The result is a LowerTriangular matrix.

    The definition of Laplacian given by Lafon (2004)🎓 is implemented:

    First, a Gaussian radial basis functions, known as Gaussian kernel or heat kernel, is applied to all elements of $Δ^2$, such as

    $W_{ij} = exp\bigg(\frac{\displaystyle{-Δ^2_{ij}}}{\displaystyle{2ε}}\bigg)$,

    where $ε$ is the bandwidth of the kernel.

    If <optional keyword argument> densityInvariant=true is used, then the density-invariant transformation is applied

    $W \leftarrow E^{-1}WE^{-1}$

    where $E$ is the diagonal matrix holding on the main diagonal the sum of the rows (or columns) of $W$.

    Finally, the normalized Laplacian (density-invariant or not) is defined as

    $Ω = D^{-1/2}WD^{-1/2}$,

    where $D$ is the diagonal matrix holding on the main diagonal the sum of the rows (or columns) of $W$.

    If you do not provide argument epsilon, the bandwidth $ε$ is set to the median of the elements of squared distance matrix $Δ^2_{ij}$. Another educated guess is the dimension of the original data, that is, the data that has been used to compute the squared distance matrix. For positive definite matrices this is $n(n-1)/2$, where $n$ is the dimension of the matrices. Still another is the dimension of the ensuing spectralEmbedding space. Keep in mind that by tuning the epsilon parameter (which must be positive) you can control both the rate of compression of the embedding space and the spread of points in the embedding space. See Coifman et al. (2008)🎓 for a discussion on $ε$.

    Nota Bene

    The Laplacian as here defined can be requested for any input matrix of squared inter-distances, for example, those obtained on scalars or on vectors using appropriate metrics. In any case, only the lower triangular part of the Laplacian is taken as input. See typecasting matrices.

    See also: distanceSqrMat, laplacianEigenMaps, spectralEmbedding.

    Examples

    using PosDefManifold
     # Generate a set of 4 random 10x10 SPD matrices
     Pset=randP(10, 4) # or, using unicode: 𝐏=randP(10, 4)
     Δ²=distanceSqrMat(Fisher, Pset)
    @@ -66,18 +66,18 @@
     myεFactor=0.1
     med=Statistics.median([Δ²[i, j] for j=1:r-1 for i=j+1:r])
     ε=2*myεFactor*med
    -Ω=laplacian(Δ², ε; densityInvariant=true)
    source
    PosDefManifold.laplacianEigenMapsFunction
        laplacianEigenMaps(Ω::𝕃{S}, q::Int;
         <
         tol::Real=0.,
         maxiter::Int=300,
    -    verbose=false >) where S<:Real

    alias: laplacianEM

    Given the lower triangular part of a Laplacian $Ω$ (see laplacian ) return the eigen maps in $q$ dimensions, i.e., the $q$ eigenvectors of the Laplacian associated with the largest $q$ eigenvalues, excluding the first (which is always equal to 1.0). The eigenvectors are of the same type as $Ω$. They are all divided element-wise by the first eigenvector (see Lafon, 2004🎓).

    The eigenvectors of the Laplacian are computed by the power iterations+modified Gram-Schmidt method (see powerIterations), allowing the execution of this function for Laplacian matrices of very large size.

    Return the 4-tuple $(Λ, U, iterations, convergence)$, where:

    • $Λ$ is a $q⋅q$ diagonal matrix holding on diagonal the eigenvalues corresponding to the $q$ dimensions of the Laplacian eigen maps,
    • $U$ holds in columns the $q$ eigenvectors holding the $q$ coordinates of the points in the embedding space,
    • $iterations$ is the number of iterations executed by the power method,
    • $convergence$ is the convergence attained by the power method.

    Using the notion of Laplacian, spectral embedding seek a low-dimension representation of the data emphasizing local neighbothood information while neglecting long-distance information. The embedding is non-linear, however the embedding space is Euclidean. The eigenvectors of $U$ holds the coordinates of the points in the embedding space (typically two- or three-dimensional for plotting or more for clustering). Spectral embedding is done for plotting data in low-dimension, clustering, imaging, classification, following their trajectories over time or other dimensions, and much more. For examples of applications see Ridrigues et al. (2018) 🎓 and references therein.

    Arguments:

    • $Ω$ is a real LowerTriangular normalized Laplacian obtained by the laplacian function,
    • $q$ is the dimension of the Laplacian eigen maps;
    • The following are <optional keyword arguments> for the power iterations:
      • tol is the tolerance for convergence (see below),
      • maxiter is the maximum number of iterations allowed,
      • if verbose is true, the convergence at all iterations will be printed.
    Nota Bene

    The maximum value of $q$ that can be requested is $n-1$, where $n$ is the size of the Laplacian. In general, $q=2$ or $q=3$ is requested.

    $tol$ defaults to the square root of Base.eps of the (real) type of $Ω$. This corresponds to requiring equality for the convergence criterion over two successive power iterations of about half of the significant digits.

    See also: distanceSqrMat, laplacian, spectralEmbedding.

    Examples

    using PosDefManifold
    +    verbose=false >) where S<:Real

    alias: laplacianEM

    Given the lower triangular part of a Laplacian $Ω$ (see laplacian ) return the eigen maps in $q$ dimensions, i.e., the $q$ eigenvectors of the Laplacian associated with the largest $q$ eigenvalues, excluding the first (which is always equal to 1.0). The eigenvectors are of the same type as $Ω$. They are all divided element-wise by the first eigenvector (see Lafon, 2004🎓).

    The eigenvectors of the Laplacian are computed by the power iterations+modified Gram-Schmidt method (see powerIterations), allowing the execution of this function for Laplacian matrices of very large size.

    Return the 4-tuple $(Λ, U, iterations, convergence)$, where:

    • $Λ$ is a $q⋅q$ diagonal matrix holding on diagonal the eigenvalues corresponding to the $q$ dimensions of the Laplacian eigen maps,
    • $U$ holds in columns the $q$ eigenvectors holding the $q$ coordinates of the points in the embedding space,
    • $iterations$ is the number of iterations executed by the power method,
    • $convergence$ is the convergence attained by the power method.

    Using the notion of Laplacian, spectral embedding seek a low-dimension representation of the data emphasizing local neighbothood information while neglecting long-distance information. The embedding is non-linear, however the embedding space is Euclidean. The eigenvectors of $U$ holds the coordinates of the points in the embedding space (typically two- or three-dimensional for plotting or more for clustering). Spectral embedding is done for plotting data in low-dimension, clustering, imaging, classification, following their trajectories over time or other dimensions, and much more. For examples of applications see Ridrigues et al. (2018) 🎓 and references therein.

    Arguments:

    • $Ω$ is a real LowerTriangular normalized Laplacian obtained by the laplacian function,
    • $q$ is the dimension of the Laplacian eigen maps;
    • The following are <optional keyword arguments> for the power iterations:
      • tol is the tolerance for convergence (see below),
      • maxiter is the maximum number of iterations allowed,
      • if verbose is true, the convergence at all iterations will be printed.
    Nota Bene

    The maximum value of $q$ that can be requested is $n-1$, where $n$ is the size of the Laplacian. In general, $q=2$ or $q=3$ is requested.

    $tol$ defaults to the square root of Base.eps of the (real) type of $Ω$. This corresponds to requiring equality for the convergence criterion over two successive power iterations of about half of the significant digits.

    See also: distanceSqrMat, laplacian, spectralEmbedding.

    Examples

    using PosDefManifold
     # Generate a set of 4 random 10x10 SPD matrices
     Pset=randP(10, 4)
     Δ²=distanceSqrMat(Fisher, Pset)
     Ω=laplacian(Δ²)
     evalues, maps, iterations, convergence=laplacianEM(Ω, 2)
     evalues, maps, iterations, convergence=laplacianEM(Ω, 2; verbose=true)
    -evalues, maps, iterations, convergence=laplacianEM(Ω, 2; verbose=true, maxiter=500)
    source
    PosDefManifold.spectralEmbeddingFunction
        (1) spectralEmbedding(metric::Metric, 𝐏::ℍVector, q::Int, epsilon::Real=0;
    +evalues, maps, iterations, convergence=laplacianEM(Ω, 2; verbose=true, maxiter=500)
    source
    PosDefManifold.spectralEmbeddingFunction
        (1) spectralEmbedding(metric::Metric, 𝐏::ℍVector, q::Int, epsilon::Real=0;
         <
         tol::Real=0.,
         maxiter::Int=300,
    @@ -86,7 +86,7 @@
         ⏩=true >)
     
         (2) spectralEmbedding(type::Type{T}, metric::Metric, 𝐏::ℍVector, q::Int, epsilon::Real=0;
    -    < same optional keyword arguments as in (1) >) where T<:Real

    alias: spEmb

    Given a 1d array $𝐏$ of $k$ positive definite matrices ${P_1,...,P_k}$ (real or complex), compute its eigen maps in $q$ dimensions.

    This function runs one after the other the functions:

    By default all computations above are done with Float32 precision. Another real type can be requested using method (2), where the type argument is defined.

    Return the 4-tuple (Λ, U, iterations, convergence), where:

    • $Λ$ is a $q⋅q$ diagonal matrix holding on diagonal the eigenvalues corresponding to the $q$ dimensions of the Laplacian eigen maps,
    • $U$ holds in columns the $q$ eigenvectors holding the $q$ coordinates of the points in the embedding space,
    • $iterations$ is the number of iterations executed by the power method,
    • $convergence$ is the convergence attained by the power method.

    Arguments:

    • metric is the metric of type Metric::Enumerated type used for computing the inter-distances,
    • $𝐏$ is a 1d array of $k$ positive matrices of ℍVector type,
    • $q$ is the dimension of the Laplacian eigen maps,
    • $epsilon$ is the bandwidth of the Laplacian (see laplacian);
    • The following <optional keyword argument> applyies for computing the inter-distances:
      • if ⏩=true (default) the computation of inter-distances is multi-threaded.
    • The following <optional keyword argument> applyies to the computation of the Laplacian by the laplacian function:
      • if densityInvariant=true the density-invariant Laplacian is computed (see laplacian).
    • The following are <optional keyword arguments> for the power method iterative algorithm invoked by laplacianEigenMaps:
      • tol is the tolerance for convergence of the power method (see below),
      • maxiter is the maximum number of iterations allowed for the power method,
      • if verbose=true the convergence at all iterations will be printed;
    Nota Bene

    $tol$ defaults to the square root of Base.eps of the Float32 type (1) or of the type passed as argumant (2). This corresponds to requiring equality for the convergence criterion over two successive power iterations of about half of the significant digits.

    Multi-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.

    See also: distanceSqrMat, laplacian, laplacianEigenMaps.

    Examples

    using PosDefManifold
    +    < same optional keyword arguments as in (1) >) where T<:Real

    alias: spEmb

    Given a 1d array $𝐏$ of $k$ positive definite matrices ${P_1,...,P_k}$ (real or complex), compute its eigen maps in $q$ dimensions.

    This function runs one after the other the functions:

    By default all computations above are done with Float32 precision. Another real type can be requested using method (2), where the type argument is defined.

    Return the 4-tuple (Λ, U, iterations, convergence), where:

    • $Λ$ is a $q⋅q$ diagonal matrix holding on diagonal the eigenvalues corresponding to the $q$ dimensions of the Laplacian eigen maps,
    • $U$ holds in columns the $q$ eigenvectors holding the $q$ coordinates of the points in the embedding space,
    • $iterations$ is the number of iterations executed by the power method,
    • $convergence$ is the convergence attained by the power method.

    Arguments:

    • metric is the metric of type Metric::Enumerated type used for computing the inter-distances,
    • $𝐏$ is a 1d array of $k$ positive matrices of ℍVector type,
    • $q$ is the dimension of the Laplacian eigen maps,
    • $epsilon$ is the bandwidth of the Laplacian (see laplacian);
    • The following <optional keyword argument> applyies for computing the inter-distances:
      • if ⏩=true (default) the computation of inter-distances is multi-threaded.
    • The following <optional keyword argument> applyies to the computation of the Laplacian by the laplacian function:
      • if densityInvariant=true the density-invariant Laplacian is computed (see laplacian).
    • The following are <optional keyword arguments> for the power method iterative algorithm invoked by laplacianEigenMaps:
      • tol is the tolerance for convergence of the power method (see below),
      • maxiter is the maximum number of iterations allowed for the power method,
      • if verbose=true the convergence at all iterations will be printed;
    Nota Bene

    $tol$ defaults to the square root of Base.eps of the Float32 type (1) or of the type passed as argumant (2). This corresponds to requiring equality for the convergence criterion over two successive power iterations of about half of the significant digits.

    Multi-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.

    See also: distanceSqrMat, laplacian, laplacianEigenMaps.

    Examples

    using PosDefManifold
     # Generate a set of k random 10x10 SPD matrices
     k=10
     Pset=randP(10, k)
    @@ -111,7 +111,7 @@
     # try a different value of epsilon
     evalues, maps, iter, conv=spEmb(Fisher, Pset, k-1, 0.01; maxiter=1000)
     plot(maps[:, 1], maps[:, 2], seriestype=:scatter, title="Spectral Embedding", label="Pset")
    -# see the example in `Laplacian` function for more on this
    source

    Means

    FunctionDescription
    meanWeighted Fréchet mean (wFm) of a scalar or matrix set using any metric
    meansAs above for several sets at once
    generalizedMeanGeneralized wFm of a matrix set
    geometricMean, gMeanwFm of a matrix set minimizing the dispersion according to the Fisher metric (iterative)
    geometricpMean, gpMeanrobust wFm of a matrix set minimizing the p-dispersion according to the Fisher metric (iterative)
    logdet0Mean, ld0MeanwFm of a matrix set according to the logdet0 metric (iterative)
    wasMeanwFm of a matrix set according to the Wasserstein metric (iterative)
    powerMeanPower wFm of a matrix set (iterative)
    inductiveMean, indMeanRecursive Fréchet mean of a matrix set (constructive)
    midrangeGeometric midrange of two matrices

    Statistics.meanFunction
        (1) mean(metric::Metric, P::ℍ{T}, Q::ℍ{T}) where T<:RealOrComplex
    +# see the example in `Laplacian` function for more on this
    source

    Means

    FunctionDescription
    meanWeighted Fréchet mean (wFm) of a scalar or matrix set using any metric
    meansAs above for several sets at once
    generalizedMeanGeneralized wFm of a matrix set
    geometricMean, gMeanwFm of a matrix set minimizing the dispersion according to the Fisher metric (iterative)
    geometricpMean, gpMeanrobust wFm of a matrix set minimizing the p-dispersion according to the Fisher metric (iterative)
    logdet0Mean, ld0MeanwFm of a matrix set according to the logdet0 metric (iterative)
    wasMeanwFm of a matrix set according to the Wasserstein metric (iterative)
    powerMeanPower wFm of a matrix set (iterative)
    inductiveMean, indMeanRecursive Fréchet mean of a matrix set (constructive)
    midrangeGeometric midrange of two matrices

    Statistics.meanFunction
        (1) mean(metric::Metric, P::ℍ{T}, Q::ℍ{T}) where T<:RealOrComplex
     
         (2) mean(metric::Metric, D::𝔻{T}, E::𝔻{T}) where T<:Real
     
    @@ -125,7 +125,7 @@
             ⏩=true >)
     
         (4) mean(metric::Metric, 𝐃::𝔻Vector;
    -    < same optional keyword arguments as in (3) >)

    (1) Mean of two positive definite matrices, passed in arbitrary order as arguments $P$ and $Q$, using the specified metric of type Metric::Enumerated type. The order is arbitrary as all metrics implemented in PosDefManifold are symmetric. This is the midpoint of the geodesic. For the weighted mean of two positive definite matrices use instead the geodesic function. $P$ and $Q$ must be flagged as Hermitian. See typecasting matrices.

    (2) Like in (1), but for two real diagonal positive definite matrices $D$ and $E$.

    (3) Fréchet mean of an 1d array $𝐏$ of $k$ positive definite matrices $𝐏={P_1,...,P_k}$ of ℍVector type, with optional non-negative real weights $w={w_1,...,w_k}$ and using the specified metricas in (1).

    (4) Fréchet mean of an 1d array $𝐃$ of $k$ positive definite matrices $𝐃={D_1,...,D_k}$ of 𝔻Vector type, with optional non-negative real weights $w={w_1,...,w_k}$ and using the specified metricas in (1).

    If you don't pass a weight vector with <optional keyword argument> $w$, return the unweighted mean.

    If <optional keyword argument> ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time.

    Adopting the Fisher, logdet0 and Wasserstein metric in (3) and the logdet0 metric in (4), the mean is computed by means of an iterative algorithm. A particular initialization for these algorithms can be provided passing an Hermitian matrix as <optional keyword argument> init. The convergence for these algorithm is required with a tolerance given by <optional keyword argument> tol. if verbose=true the covergence attained at each iteration is printed. Other information such as if the algorithm has diverged is also printed. For more options in computing these means call directly functions geometricMean, logdet0Mean and wasMean, which are called hereby. For the meaning of the tol default value see the documentation of these functions. See also the robust mean function geometricpMean, which cannot be called from here. Notice that arguments init and tol have an effect only for the aferomentioned metrics in methods (3) and (4).

    For (3) and (4), if ⏩=true (default), the computation of the mean is multi-threaded for all metrics.

    Nota Bene

    Multi-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.

    Math

    The Fréchet mean of a set of $k$ matrices ${P_1, P_2,..., P_k}$ weighted by ${w_1, w_2,..., w_k}:\sum_{i=1}^{k}w_i=1$ for the supported metrics are, for those with closed form expression:

    Metricweighted Fréchet mean
    Euclidean$\sum_{i=1}^{k}w_i P_i$
    invEuclidean$\big(\sum_{i=1}^{k}w_i P_i^{-1}\big)^{-1}$
    ChoEuclidean$TT^*$, where $T=bL_P + aL_Q$
    logEuclidean$\textrm{exp}\big(\sum_{i=1}^{k}w_i\hspace{1pt} \textrm{log}P_i \big)$
    logCholesky$TT^*$, where $T=\sum_{i=1}^{k}(w_kS_k)+\sum_{i=1}^{k}(w_k\textrm{log}D_k)$
    Jeffrey$A^{1/2}\big(A^{-1/2}HA^{-1/2}\big)^{1/2}A^{1/2}$

    and for those that are found by an iterative algorithm and that verify an equation:

    Metricequation verified by the weighted Fréchet mean
    Fisher$\sum_{i=1}^{k}w_i\textrm{log}\big(G^{-1/2} P_k G^{-1/2}\big)=0.$
    logdet0$\sum_{i=1}^{k}w_i\big(\frac{1}{2}P_i+\frac{1}{2}G\big)^{-1}=G^{-1}$
    VonNeumannN.A.
    Wasserstein$G=\sum_{i=1}^{k}w_i\big( G^{1/2} P_i G^{1/2}\big)^{1/2}$

    legend: $L_X$, $S_X$ and $D_X$ are the Cholesky lower triangle of $X$, its strictly lower triangular part and diagonal part, respectively (hence, $S_X+D_X=L_X$, $L_XL_X^*=X$). $A$ and $H$ are the weighted arithmetic and weighted harmonic mean, respectively.

    See: geodesic, mean, Fréchet mean.

    Examples

    using LinearAlgebra, Statistics, PosDefManifold
    +    < same optional keyword arguments as in (3) >)

    (1) Mean of two positive definite matrices, passed in arbitrary order as arguments $P$ and $Q$, using the specified metric of type Metric::Enumerated type. The order is arbitrary as all metrics implemented in PosDefManifold are symmetric. This is the midpoint of the geodesic. For the weighted mean of two positive definite matrices use instead the geodesic function. $P$ and $Q$ must be flagged as Hermitian. See typecasting matrices.

    (2) Like in (1), but for two real diagonal positive definite matrices $D$ and $E$.

    (3) Fréchet mean of an 1d array $𝐏$ of $k$ positive definite matrices $𝐏={P_1,...,P_k}$ of ℍVector type, with optional non-negative real weights $w={w_1,...,w_k}$ and using the specified metricas in (1).

    (4) Fréchet mean of an 1d array $𝐃$ of $k$ positive definite matrices $𝐃={D_1,...,D_k}$ of 𝔻Vector type, with optional non-negative real weights $w={w_1,...,w_k}$ and using the specified metricas in (1).

    If you don't pass a weight vector with <optional keyword argument> $w$, return the unweighted mean.

    If <optional keyword argument> ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time.

    Adopting the Fisher, logdet0 and Wasserstein metric in (3) and the logdet0 metric in (4), the mean is computed by means of an iterative algorithm. A particular initialization for these algorithms can be provided passing an Hermitian matrix as <optional keyword argument> init. The convergence for these algorithm is required with a tolerance given by <optional keyword argument> tol. if verbose=true the covergence attained at each iteration is printed. Other information such as if the algorithm has diverged is also printed. For more options in computing these means call directly functions geometricMean, logdet0Mean and wasMean, which are called hereby. For the meaning of the tol default value see the documentation of these functions. See also the robust mean function geometricpMean, which cannot be called from here. Notice that arguments init and tol have an effect only for the aferomentioned metrics in methods (3) and (4).

    For (3) and (4), if ⏩=true (default), the computation of the mean is multi-threaded for all metrics.

    Nota Bene

    Multi-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.

    Math

    The Fréchet mean of a set of $k$ matrices ${P_1, P_2,..., P_k}$ weighted by ${w_1, w_2,..., w_k}:\sum_{i=1}^{k}w_i=1$ for the supported metrics are, for those with closed form expression:

    Metricweighted Fréchet mean
    Euclidean$\sum_{i=1}^{k}w_i P_i$
    invEuclidean$\big(\sum_{i=1}^{k}w_i P_i^{-1}\big)^{-1}$
    ChoEuclidean$TT^*$, where $T=bL_P + aL_Q$
    logEuclidean$\textrm{exp}\big(\sum_{i=1}^{k}w_i\hspace{1pt} \textrm{log}P_i \big)$
    logCholesky$TT^*$, where $T=\sum_{i=1}^{k}(w_kS_k)+\sum_{i=1}^{k}(w_k\textrm{log}D_k)$
    Jeffrey$A^{1/2}\big(A^{-1/2}HA^{-1/2}\big)^{1/2}A^{1/2}$

    and for those that are found by an iterative algorithm and that verify an equation:

    Metricequation verified by the weighted Fréchet mean
    Fisher$\sum_{i=1}^{k}w_i\textrm{log}\big(G^{-1/2} P_k G^{-1/2}\big)=0.$
    logdet0$\sum_{i=1}^{k}w_i\big(\frac{1}{2}P_i+\frac{1}{2}G\big)^{-1}=G^{-1}$
    VonNeumannN.A.
    Wasserstein$G=\sum_{i=1}^{k}w_i\big( G^{1/2} P_i G^{1/2}\big)^{1/2}$

    legend: $L_X$, $S_X$ and $D_X$ are the Cholesky lower triangle of $X$, its strictly lower triangular part and diagonal part, respectively (hence, $S_X+D_X=L_X$, $L_XL_X^*=X$). $A$ and $H$ are the weighted arithmetic and weighted harmonic mean, respectively.

    See: geodesic, mean, Fréchet mean.

    Examples

    using LinearAlgebra, Statistics, PosDefManifold
     # Generate 2 random 3x3 SPD matrices
     P=randP(3)
     Q=randP(3)
    @@ -150,58 +150,58 @@
     using BenchmarkTools
     Pset=randP(20, 160)
     @benchmark(mean(logEuclidean, Pset; ⏩=false)) # single-threaded
    -@benchmark(mean(logEuclidean, Pset)) # multi-threaded
    source
    mean(metric::Metric, ν::Vector{T}) where T<:RealOrComplex

    Mean of $k$ real or complex scalars, using the specified metric of type Metric::Enumerated type. Note that using the Fisher, logEuclidean and Jeffrey metric, the resulting mean is the scalar geometric mean. Note also that the code of this method is in unit statistics.jl, while the code for all the others is in unit riemannianGeometry.jl.

    Examples

    using PosDefManifold
    +@benchmark(mean(logEuclidean, Pset)) # multi-threaded
    source
    mean(metric::Metric, ν::Vector{T}) where T<:RealOrComplex

    Mean of $k$ real or complex scalars, using the specified metric of type Metric::Enumerated type. Note that using the Fisher, logEuclidean and Jeffrey metric, the resulting mean is the scalar geometric mean. Note also that the code of this method is in unit statistics.jl, while the code for all the others is in unit riemannianGeometry.jl.

    Examples

    using PosDefManifold
     # Generate 10 random numbers distributed as a chi-square with 2 df.
     ν=[randχ²(2) for i=1:10]
     arithmetic_mean=mean(Euclidean, ν)
     geometric_mean=mean(Fisher, ν)
     harmonic_mean=mean(invEuclidean, ν)
    -harmonic_mean<=geometric_mean<=arithmetic_mean # AGH inequality
    source
    PosDefManifold.meansFunction
        (1) means(metric::Metric, 𝒫::ℍVector₂;
    +harmonic_mean<=geometric_mean<=arithmetic_mean # AGH inequality
    source
    PosDefManifold.meansFunction
        (1) means(metric::Metric, 𝒫::ℍVector₂;
         <⏩=true>)
     
         (2) means(metric::Metric, 𝒟::𝔻Vector₂;
    -    <⏩=true>)

    (1) Given a 2d array $𝒫$ of positive definite matrices as an ℍVector₂ type compute the Fréchet mean for as many ℍVector type objects as hold in $𝒫$, using the specified metric of type Metric::Enumerated type. Return the means in a vector of Hermitian matrices, that is, as an ℍVector type.

    (2) Given a 2d array $𝒟$ of real positive definite matrices as an 𝔻Vector₂ type compute the Fréchet mean for as many 𝔻Vector type objects as hold in $𝒟$, using the specified metric of type Metric::Enumerated type. Return the means in a vector of Diagonal matrices, that is, as a 𝔻Vector type.

    The weigted Fréchet mean is not supported in this function.

    If ⏩=true (default) the computation of the means is multi-threaded.

    Nota Bene

    Multi-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.

    See also: mean.

    Examples

     using PosDefManifold
    - # Generate a set of 4 random 3x3 SPD matrices
    - Pset=randP(3, 4) # or, using unicode: 𝐏=randP(3, 4)
    - # Generate a set of 40 random 4x4 SPD matrices
    - Qset=randP(3, 40) # or, using unicode: 𝐐=randP(3, 40)
    - # listing directly ℍVector objects
    - means(logEuclidean, ℍVector₂([Pset, Qset])) # or: means(logEuclidean, ℍVector₂([𝐏, 𝐐]))
    - # note that [𝐏, 𝐐] is actually a ℍVector₂ type object
    +    <⏩=true>)

    (1) Given a 2d array $𝒫$ of positive definite matrices as an ℍVector₂ type compute the Fréchet mean for as many ℍVector type objects as hold in $𝒫$, using the specified metric of type Metric::Enumerated type. Return the means in a vector of Hermitian matrices, that is, as an ℍVector type.

    (2) Given a 2d array $𝒟$ of real positive definite matrices as an 𝔻Vector₂ type compute the Fréchet mean for as many 𝔻Vector type objects as hold in $𝒟$, using the specified metric of type Metric::Enumerated type. Return the means in a vector of Diagonal matrices, that is, as a 𝔻Vector type.

    The weigted Fréchet mean is not supported in this function.

    If ⏩=true (default) the computation of the means is multi-threaded.

    Nota Bene

    Multi-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.

    See also: mean.

    Examples

    using PosDefManifold
    +# Generate a set of 4 random 3x3 SPD matrices
    +Pset=randP(3, 4) # or, using unicode: 𝐏=randP(3, 4)
    +# Generate a set of 40 random 4x4 SPD matrices
    +Qset=randP(3, 40) # or, using unicode: 𝐐=randP(3, 40)
    +# listing directly ℍVector objects
    +means(logEuclidean, ℍVector₂([Pset, Qset])) # or: means(logEuclidean, ℍVector₂([𝐏, 𝐐]))
    +# note that [𝐏, 𝐐] is actually a ℍVector₂ type object
     
    - # creating and passing an object of ℍVector₂ type
    - sets=ℍVector₂(undef, 2) # or: 𝒫=ℍVector₂(undef, 2)
    - sets[1]=Pset # or: 𝒫[1]=𝐏
    - sets[2]=Qset # or: 𝒫[2]=𝐐
    - means(logEuclidean, sets) # or: means(logEuclidean, 𝒫)
    +# creating and passing an object of ℍVector₂ type
    +sets=ℍVector₂(undef, 2) # or: 𝒫=ℍVector₂(undef, 2)
    +sets[1]=Pset # or: 𝒫[1]=𝐏
    +sets[2]=Qset # or: 𝒫[2]=𝐐
    +means(logEuclidean, sets) # or: means(logEuclidean, 𝒫)
     
    - # going multi-threated
    +# going multi-threated
     
    - # first, create 20 sets of 200 50x50 SPD matrices
    - sets=ℍVector₂([randP(50, 200) for i=1:20])
    +# first, create 20 sets of 200 50x50 SPD matrices
    +sets=ℍVector₂([randP(50, 200) for i=1:20])
     
    - # How much computing time we save ?
    - # (example min time obtained with 4 threads & 4 BLAS threads)
    - using BenchmarkTools
    +# How much computing time we save ?
    +# (example min time obtained with 4 threads & 4 BLAS threads)
    +using BenchmarkTools
     
    - # non multi-threaded, mean with closed-form solution
    - @benchmark(means(logEuclidean, sets; ⏩=false)) # (6.196 s)
    +# non multi-threaded, mean with closed-form solution
    +@benchmark(means(logEuclidean, sets; ⏩=false)) # (6.196 s)
     
    - # multi-threaded, mean with closed-form solution
    - @benchmark(means(logEuclidean, sets)) # (1.897 s)
    +# multi-threaded, mean with closed-form solution
    +@benchmark(means(logEuclidean, sets)) # (1.897 s)
     
    - sets=ℍVector₂([randP(10, 200) for i=1:10])
    +sets=ℍVector₂([randP(10, 200) for i=1:10])
     
    - # non multi-threaded, mean with iterative solution
    - # wait a bit
    - @benchmark(means(Fisher, sets; ⏩=false)) # (4.672 s )
    +# non multi-threaded, mean with iterative solution
    +# wait a bit
    +@benchmark(means(Fisher, sets; ⏩=false)) # (4.672 s )
     
    - # multi-threaded, mean with iterative solution
    - @benchmark(means(Fisher, sets)) # (1.510 s)
    source
    PosDefManifold.generalizedMeanFunction
        generalizedMean(𝐏::Union{ℍVector, 𝔻Vector}, p::Real;
    +# multi-threaded, mean with iterative solution
    +@benchmark(means(Fisher, sets)) # (1.510 s)
    source
    PosDefManifold.generalizedMeanFunction
        generalizedMean(𝐏::Union{ℍVector, 𝔻Vector}, p::Real;
         <
         w::Vector=[],
         ✓w=true,
    -    ⏩=true >)

    Given a 1d array $𝐏={P_1,...,P_k}$ of $k$ positive definite matrices of ℍVector type or real positive definite diagonal matrices of 𝔻Vector type and optional non-negative real weights vector $w={w_1,...,w_k}$, return the weighted generalized means $G$ with real parameter $p$, that is,

    $G=\big(\sum_{i=1}^{k}w_iP_i^p\big)^{1/p}$.

    If you don't pass a weight vector with <optional keyword argument> $w$, return the unweighted generalized mean

    $G=\big(\sum_{i=1}^{k}P_i^p\big)^{1/p}$.

    If <optional keyword argument> ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the weights each time.

    If <optional key argmuent> ⏩=true the computation of the generalized mean is multi-threaded.

    Nota Bene

    Multi-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.

    The following special cases for parameter $p$ are noteworthy:

    Notice that when matrices in 𝐏 all pair-wise commute, for instance if the matrices are diagonal, the generalized means coincide with the power means for any $p∈[-1, 1]$ and for $p=0.5$ it coincides also with the Wasserstein mean. For this reason the generalized means are used as default initialization of both the powerMean and wasMean algorithm.

    See: generalized means.

    See also: powerMean, wasMean, mean.

    Examples

    using LinearAlgebra, Statistics, PosDefManifold
    +    ⏩=true >)

    Given a 1d array $𝐏={P_1,...,P_k}$ of $k$ positive definite matrices of ℍVector type or real positive definite diagonal matrices of 𝔻Vector type and optional non-negative real weights vector $w={w_1,...,w_k}$, return the weighted generalized means $G$ with real parameter $p$, that is,

    $G=\big(\sum_{i=1}^{k}w_iP_i^p\big)^{1/p}$.

    If you don't pass a weight vector with <optional keyword argument> $w$, return the unweighted generalized mean

    $G=\big(\sum_{i=1}^{k}P_i^p\big)^{1/p}$.

    If <optional keyword argument> ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the weights each time.

    If <optional key argmuent> ⏩=true the computation of the generalized mean is multi-threaded.

    Nota Bene

    Multi-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.

    The following special cases for parameter $p$ are noteworthy:

    Notice that when matrices in 𝐏 all pair-wise commute, for instance if the matrices are diagonal, the generalized means coincide with the power means for any $p∈[-1, 1]$ and for $p=0.5$ it coincides also with the Wasserstein mean. For this reason the generalized means are used as default initialization of both the powerMean and wasMean algorithm.

    See: generalized means.

    See also: powerMean, wasMean, mean.

    Examples

    using LinearAlgebra, Statistics, PosDefManifold
     # Generate a set of 4 random 3x3 SPD matrices
     Pset=randP(3, 4) # or, using unicode: 𝐏=randP(3, 4)
     
    @@ -222,7 +222,7 @@
     using BenchmarkTools
     Pset=randP(20, 160)
     @benchmark(generalizedMean(Pset; ⏩=false)) # single-threaded
    -@benchmark(generalizedMean(Pset)) # multi-threaded
    source
    PosDefManifold.geometricMeanFunction
        geometricMean(𝐏::Union{ℍVector, 𝔻Vector};
         <
         w::Vector=[],
         ✓w=true,
    @@ -231,7 +231,7 @@
         maxiter::Int=500,
         adaptStepSize::Bool=true,
         verbose=false,
    -    ⏩=true >)

    alias: gmean

    Given a 1d array $𝐏={P_1,...,P_k}$ of $k$ positive definite matrices of ℍVector type or diagonal matrices of 𝔻Vector type and optional non-negative real weights vector $w={w_1,...,w_k}$, return the 3-tuple $(G, iter, conv)$, where $G$ is the mean according to the Fisher metric and $iter$, $conv$ are the number of iterations and convergence attained by the algorithm. Mean $G$ is the unique positive definite matrix satisfying

    $\sum_{i=1}^{k}w_i\textrm{log}\big(G^{-1/2} P_i G^{-1/2}\big)=0.$

    For estimating it, this function implements the well-known gradient descent algorithm, but with an exponential decaying step size $ς$, yielding iterations

    $G ←G^{1/2}\textrm{exp}\big(ς\sum_{i=1}^{k}w_i\textrm{log}(G^{-1/2} P_i G^{-1/2})\big)G^{1/2}.$

    If you don't pass a weight vector with <optional keyword argument> $w$, return the unweighted geometric mean.

    If <optional keyword argument> ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time.

    The following are more <optional keyword arguments>:

    • init is a matrix to be used as initialization for the mean. If no matrix is provided, the log Euclidean mean will be used,
    • tol is the tolerance for the convergence (see below).
    • maxiter is the maximum number of iterations allowed
    • if verbose=true, the convergence attained at each iteration and the step size $ς$ is printed. Also, a warning is printed if convergence is not attained.
    • if ⏩=true the iterations are multi-threaded (see below).
    • if adaptStepSize=false the step size ς is fixed to 1 at all iterations.

    If the input is a 1d array of $k$ real positive definite diagonal matrices the solution is available in closed-form as the log Euclidean mean, hence the <optional keyword arguments> init, tol and verbose have no effect and return the 3-tuple $(G, 1, 0)$. See the log Euclidean metric.

    Nota Bene

    Multi-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.

    In normal circumstances this algorithm converges monothonically. If the algorithm diverges and verbose is true a warning is printed indicating the iteration when this happened.

    The exponential decaying step size features a faster convergence rate as compared to the fixed step size $ς=1$ that is usually adopted. The decaying rate is inversely proportional to maxiter, thus, increase/decrease maxiter in order to set a slower/faster decaying rate. maxiter should not be set too low though.

    $tol$ defaults to the square root of Base.eps of the nearest real type of data input $𝐏$. This corresponds to requiring the norm of the satisfying matrix equation divided by the number of elements to vanish for about half the significant digits.

    See: Fisher metric.

    See also: geometricpMean, powerMean, wasMean, logdet0Mean, mean.

    Examples

    using LinearAlgebra, PosDefManifold
    +    ⏩=true >)

    alias: gmean

    Given a 1d array $𝐏={P_1,...,P_k}$ of $k$ positive definite matrices of ℍVector type or diagonal matrices of 𝔻Vector type and optional non-negative real weights vector $w={w_1,...,w_k}$, return the 3-tuple $(G, iter, conv)$, where $G$ is the mean according to the Fisher metric and $iter$, $conv$ are the number of iterations and convergence attained by the algorithm. Mean $G$ is the unique positive definite matrix satisfying

    $\sum_{i=1}^{k}w_i\textrm{log}\big(G^{-1/2} P_i G^{-1/2}\big)=0.$

    For estimating it, this function implements the well-known gradient descent algorithm, but with an exponential decaying step size $ς$, yielding iterations

    $G ←G^{1/2}\textrm{exp}\big(ς\sum_{i=1}^{k}w_i\textrm{log}(G^{-1/2} P_i G^{-1/2})\big)G^{1/2}.$

    If you don't pass a weight vector with <optional keyword argument> $w$, return the unweighted geometric mean.

    If <optional keyword argument> ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time.

    The following are more <optional keyword arguments>:

    • init is a matrix to be used as initialization for the mean. If no matrix is provided, the log Euclidean mean will be used,
    • tol is the tolerance for the convergence (see below).
    • maxiter is the maximum number of iterations allowed
    • if verbose=true, the convergence attained at each iteration and the step size $ς$ is printed. Also, a warning is printed if convergence is not attained.
    • if ⏩=true the iterations are multi-threaded (see below).
    • if adaptStepSize=false the step size ς is fixed to 1 at all iterations.

    If the input is a 1d array of $k$ real positive definite diagonal matrices the solution is available in closed-form as the log Euclidean mean, hence the <optional keyword arguments> init, tol and verbose have no effect and return the 3-tuple $(G, 1, 0)$. See the log Euclidean metric.

    Nota Bene

    Multi-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.

    In normal circumstances this algorithm converges monothonically. If the algorithm diverges and verbose is true a warning is printed indicating the iteration when this happened.

    The exponential decaying step size features a faster convergence rate as compared to the fixed step size $ς=1$ that is usually adopted. The decaying rate is inversely proportional to maxiter, thus, increase/decrease maxiter in order to set a slower/faster decaying rate. maxiter should not be set too low though.

    $tol$ defaults to the square root of Base.eps of the nearest real type of data input $𝐏$. This corresponds to requiring the norm of the satisfying matrix equation divided by the number of elements to vanish for about half the significant digits.

    See: Fisher metric.

    See also: geometricpMean, powerMean, wasMean, logdet0Mean, mean.

    Examples

    using LinearAlgebra, PosDefManifold
     # Generate a set of 4 random 3x3 SPD matrices
     Pset=randP(3, 4) # or, using unicode: 𝐏=randP(3, 4)
     
    @@ -265,7 +265,7 @@
     push!(Pset, G)
     Λ, U, iter, conv=spectralEmbedding(Fisher, Pset, 2; verbose=true)
     plot(U[1:k, 1], U[1:k, 2], seriestype=:scatter, title="Spectral Embedding", label="Pset")
    -plot!(U[k+1:k+1, 1], U[k+1:k+1, 2], seriestype=:scatter, label="mean")
    source
    PosDefManifold.geometricpMeanFunction
        geometricpMean(𝐏::ℍVector, p::Real=0.5;
         <
         w::Vector=[],
         ✓w=true,
    @@ -274,7 +274,7 @@
         maxiter::Int=500,
         adaptStepSize=true,
         verbose=false,
    -    ⏩=true >)

    alias: gpmean

    Given a 1d array $𝐏={P_1,...,P_k}$ of $k$ positive definite matrices of ℍVector type, a real parameter $0<p<=1$ and optional non-negative real weights vector $w={w_1,...,w_k}$, return the 3-tuple $(G, iter, conv)$, where $G$ is the p-mean, i.e., the mean according to the Fisher metric minimizing the p-dispersion (see below) and $iter$, $conv$ are the number of iterations and convergence attained by the algorithm.

    This function implements the p-dispersion gradient descent algorithm with step-size $ς$ (to be published), yielding iterations

    $G ←G^{1/2}\textrm{exp}\big(ς\sum_{i=1}^{k}pδ^2(G, P_i)^{p-1}w_i\textrm{log}(G^{-1/2} P_i G^{-1/2})\big)G^{1/2}.$

    • if $p=1$ this yields the geometric mean (implemented specifically in geometricMean).
    • if $p=0.5$ this yields the geometric median (default).

    If you don't pass a weight vector with <optional keyword argument> $w$, return the unweighted geometric-p mean.

    If <optional keyword argument> ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time.

    The following are more <optional keyword arguments>:

    • init is a matrix to be used as initialization for the mean. If no matrix is provided, the log Euclidean mean will be used,
    • tol is the tolerance for the convergence (see below).
    • maxiter is the maximum number of iterations allowed.
    • if adaptStepSize=true (default) the step size $ς$ for the gradient descent is adapted at each iteration (see below).
    • if verbose=true, the step-size and convergence attained at each iteration is printed. Also, a warning is printed if convergence is not attained.
    • if ⏩=true the iterations are multi-threaded (see below).
    Nota Bene

    Multi-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.

    If the algorithm diverges and verbose is true a warning is printed indicating the iteration when this happened. This algorithm may temporary diverge, still reach convergence. Overall, while all other iterative algorithms implemented in PosDefMaifold are very stable, this is not.

    The smaller the parameter $p$ is, the slower and less likely the convergence is. If the algorithm does not converge, try increasing $p$, initializing the algorithm with the output of geometricMean and/or eliminating the otliers from the input set $𝐏$.

    If adaptStepSize is true (default) the step-size $ς$ is adapted at each iteration, otherwise a fixed step size $ς=1$ is used. Adapting the step size in general hastens convergence and improves the convergence behavior.

    $tol$ defaults to the square root of Base.eps of the nearest real type of data input $𝐏$. This corresponds to requiring the norm of the satisfying matrix equation divided by the number of elements to vanish for about half the significant digits.

    See: Fisher metric.

    See also: geometricMean, powerMean, wasMean, logdet0Mean, mean.

    Examples

    using LinearAlgebra, PosDefManifold, Plots
    +    ⏩=true >)

    alias: gpmean

    Given a 1d array $𝐏={P_1,...,P_k}$ of $k$ positive definite matrices of ℍVector type, a real parameter $0<p<=1$ and optional non-negative real weights vector $w={w_1,...,w_k}$, return the 3-tuple $(G, iter, conv)$, where $G$ is the p-mean, i.e., the mean according to the Fisher metric minimizing the p-dispersion (see below) and $iter$, $conv$ are the number of iterations and convergence attained by the algorithm.

    This function implements the p-dispersion gradient descent algorithm with step-size $ς$ (to be published), yielding iterations

    $G ←G^{1/2}\textrm{exp}\big(ς\sum_{i=1}^{k}pδ^2(G, P_i)^{p-1}w_i\textrm{log}(G^{-1/2} P_i G^{-1/2})\big)G^{1/2}.$

    • if $p=1$ this yields the geometric mean (implemented specifically in geometricMean).
    • if $p=0.5$ this yields the geometric median (default).

    If you don't pass a weight vector with <optional keyword argument> $w$, return the unweighted geometric-p mean.

    If <optional keyword argument> ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time.

    The following are more <optional keyword arguments>:

    • init is a matrix to be used as initialization for the mean. If no matrix is provided, the log Euclidean mean will be used,
    • tol is the tolerance for the convergence (see below).
    • maxiter is the maximum number of iterations allowed.
    • if adaptStepSize=true (default) the step size $ς$ for the gradient descent is adapted at each iteration (see below).
    • if verbose=true, the step-size and convergence attained at each iteration is printed. Also, a warning is printed if convergence is not attained.
    • if ⏩=true the iterations are multi-threaded (see below).
    Nota Bene

    Multi-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.

    If the algorithm diverges and verbose is true a warning is printed indicating the iteration when this happened. This algorithm may temporary diverge, still reach convergence. Overall, while all other iterative algorithms implemented in PosDefMaifold are very stable, this is not.

    The smaller the parameter $p$ is, the slower and less likely the convergence is. If the algorithm does not converge, try increasing $p$, initializing the algorithm with the output of geometricMean and/or eliminating the otliers from the input set $𝐏$.

    If adaptStepSize is true (default) the step-size $ς$ is adapted at each iteration, otherwise a fixed step size $ς=1$ is used. Adapting the step size in general hastens convergence and improves the convergence behavior.

    $tol$ defaults to the square root of Base.eps of the nearest real type of data input $𝐏$. This corresponds to requiring the norm of the satisfying matrix equation divided by the number of elements to vanish for about half the significant digits.

    See: Fisher metric.

    See also: geometricMean, powerMean, wasMean, logdet0Mean, mean.

    Examples

    using LinearAlgebra, PosDefManifold, Plots
     
     # This examples show that this algorithm is more robust to outliers
     # as compared to the standard geometric mean algorithm
    @@ -325,7 +325,7 @@
     using BenchmarkTools
     Pset=randP(20, 120)
     @benchmark(geometricpMean(Pset; ⏩=true)) # single-threaded
    -@benchmark(geometricpMean(Pset)) # multi-threaded
    source
    PosDefManifold.logdet0MeanFunction
        logdet0Mean(𝐏::Union{ℍVector, 𝔻Vector};
         <
         w::Vector=[],
         ✓w=true,
    @@ -333,7 +333,7 @@
         tol::Real=0.,
         maxiter::Int=500,
         verbose=false,
    -    ⏩=true >)

    alias: ld0Mean

    Given a 1d array $𝐏={P_1,...,P_k}$ of $k$ positive definite matrices of ℍVector type or real positive definite diagonal matrices of 𝔻Vector type and optional non-negative real weights vector $w={w_1,...,w_k}$, return the 3-tuple $(G, iter, conv)$, where $G$ is the mean according to the logdet zero metric and $iter$, $conv$ are the number of iterations and convergence attained by the algorithm. Mean $G$ is the unique positive definite matrix satisfying

    $\sum_{i=1}^{k}w_i\big(\frac{1}{2}P_i+\frac{1}{2}G\big)^{-1}-G^{-1}=0$.

    For estimating it, this function implements the fixed-point iteration algorithm suggested by (Moakher, 2012, p315)🎓, yielding iterations

    $G ← \frac{1}{2}\big(\sum_{i=1}^{k}w_i(P_i+G)^{-1}\big)^{-1}$.

    If you don't pass a weight vector with <optional keyword argument> $w$, return the unweighted logdet zero mean.

    If <optional keyword argument> ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time.

    The following are more <optional keyword arguments>:

    • init is a matrix to be used as initialization for the mean. If no matrix is provided, the log Euclidean mean will be used,
    • tol is the tolerance for the convergence (see below).
    • maxiter is the maximum number of iterations allowed.
    • if verbose=true, the convergence attained at each iteration is printed and a warning is printed if convergence is not attained.
    • if ⏩=true the iterations are multi-threaded (see below).
    Nota Bene

    Multi-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.

    In normal circumstances this algorithm converges monothonically. If the algorithm diverges and verbose is true a warning is printed indicating the iteration when this happened.

    $tol$ defaults to 100 times the square root of Base.eps of the nearest real type of data input $𝐏$. This corresponds to requiring the square root of the relative convergence criterion over two successive iterations to vanish for about half the significant digits minus 2.

    See: logdet zero metric, modified Bhattacharyya mean.

    See also: powerMean, wasMean, logdet0Mean, mean.

    Examples

    using LinearAlgebra, PosDefManifold
    +    ⏩=true >)

    alias: ld0Mean

    Given a 1d array $𝐏={P_1,...,P_k}$ of $k$ positive definite matrices of ℍVector type or real positive definite diagonal matrices of 𝔻Vector type and optional non-negative real weights vector $w={w_1,...,w_k}$, return the 3-tuple $(G, iter, conv)$, where $G$ is the mean according to the logdet zero metric and $iter$, $conv$ are the number of iterations and convergence attained by the algorithm. Mean $G$ is the unique positive definite matrix satisfying

    $\sum_{i=1}^{k}w_i\big(\frac{1}{2}P_i+\frac{1}{2}G\big)^{-1}-G^{-1}=0$.

    For estimating it, this function implements the fixed-point iteration algorithm suggested by (Moakher, 2012, p315)🎓, yielding iterations

    $G ← \frac{1}{2}\big(\sum_{i=1}^{k}w_i(P_i+G)^{-1}\big)^{-1}$.

    If you don't pass a weight vector with <optional keyword argument> $w$, return the unweighted logdet zero mean.

    If <optional keyword argument> ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time.

    The following are more <optional keyword arguments>:

    • init is a matrix to be used as initialization for the mean. If no matrix is provided, the log Euclidean mean will be used,
    • tol is the tolerance for the convergence (see below).
    • maxiter is the maximum number of iterations allowed.
    • if verbose=true, the convergence attained at each iteration is printed and a warning is printed if convergence is not attained.
    • if ⏩=true the iterations are multi-threaded (see below).
    Nota Bene

    Multi-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.

    In normal circumstances this algorithm converges monothonically. If the algorithm diverges and verbose is true a warning is printed indicating the iteration when this happened.

    $tol$ defaults to 100 times the square root of Base.eps of the nearest real type of data input $𝐏$. This corresponds to requiring the square root of the relative convergence criterion over two successive iterations to vanish for about half the significant digits minus 2.

    See: logdet zero metric, modified Bhattacharyya mean.

    See also: powerMean, wasMean, logdet0Mean, mean.

    Examples

    using LinearAlgebra, PosDefManifold
     # Generate a set of 4 random 3x3 SPD matrices
     Pset=randP(3, 4) # or, using unicode: 𝐏=randP(3, 4)
     
    @@ -357,7 +357,7 @@
     using BenchmarkTools
     Pset=randP(20, 120)
     @benchmark(logdet0Mean(Pset; ⏩=false)) # single-threaded
    -@benchmark(logdet0Mean(Pset)) # multi-threaded
    source
    PosDefManifold.wasMeanFunction
        wasMean(𝐏::Union{ℍVector, 𝔻Vector};
         <
         w::Vector=[],
         ✓w=true,
    @@ -365,7 +365,7 @@
         tol::Real=0.,
         maxiter::Int=500,
         verbose=false,
    -    ⏩=true >)

    Given a 1d array $𝐏={P_1,...,P_k}$ of $k$ positive definite matrices of ℍVector type or real positive definite diagonal matrices of 𝔻Vector type and optional non-negative real weights vector $w={w_1,...,w_k}$, return the 3-tuple $(G, iter, conv)$, where $G$ is the mean according to the Wasserstein metric and $iter$, $conv$ are the number of iterations and convergence attained by the algorithm. Mean $G$ is the unique positive definite matrix satisfying

    $G=\sum_{i=1}^{k}w_i\big( G^{1/2} P_i G^{1/2}\big)^{1/2}$.

    For estimating it, this function implements the fixed-point iterative algorithm proposed by (Álvarez-Esteban et al., 2016)🎓:

    $G ← G^{-1/2}\big(\sum_{i=1}^{k} w_i(G^{1/2}P_i G^{1/2})^{1/2}\big)^2 G^{-1/2}$.

    If you don't pass a weight vector with <optional keyword argument> $w$, return the unweighted Wassertein mean.

    If <optional keyword argument> ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and they should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time.

    The following are more <optional keyword arguments>:

    • init is a matrix to be used as initialization for the mean. If no matrix is provided, the instance of generalized means with $p=0.5$ will be used,
    • tol is the tolerance for the convergence (see below).
    • maxiter is the maximum number of iterations allowed.
    • if verbose=true, the convergence attained at each iteration is printed and a warning is printed if convergence is not attained.
    • if ⏩=true the iterations are multi-threaded (see below).

    If the input is a 1d array of $k$ real positive definite diagonal matrices the solution is available in closed-form as the modified Bhattacharyya mean, hence the <optional keyword arguments> init, tol and verbose have no effect and return the 3-tuple $(G, 1, 0)$. See modified Bhattacharyya mean.

    Nota Bene

    Multi-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.

    In normal circumstances this algorithm converges monothonically. If the algorithm diverges and verbose is true a warning is printed indicating the iteration when this happened.

    $tol$ defaults to the square root of Base.eps of the nearest real type of data input $𝐏$. This corresponds to requiring the norm of the satisfying matrix equation divided by the number of elements to vanish for about half the significant digits.

    See: Wasserstein metric.

    See also: powerMean, wasMean, logdet0Mean, mean.

    Examples

    using LinearAlgebra, PosDefManifold
    +    ⏩=true >)

    Given a 1d array $𝐏={P_1,...,P_k}$ of $k$ positive definite matrices of ℍVector type or real positive definite diagonal matrices of 𝔻Vector type and optional non-negative real weights vector $w={w_1,...,w_k}$, return the 3-tuple $(G, iter, conv)$, where $G$ is the mean according to the Wasserstein metric and $iter$, $conv$ are the number of iterations and convergence attained by the algorithm. Mean $G$ is the unique positive definite matrix satisfying

    $G=\sum_{i=1}^{k}w_i\big( G^{1/2} P_i G^{1/2}\big)^{1/2}$.

    For estimating it, this function implements the fixed-point iterative algorithm proposed by (Álvarez-Esteban et al., 2016)🎓:

    $G ← G^{-1/2}\big(\sum_{i=1}^{k} w_i(G^{1/2}P_i G^{1/2})^{1/2}\big)^2 G^{-1/2}$.

    If you don't pass a weight vector with <optional keyword argument> $w$, return the unweighted Wassertein mean.

    If <optional keyword argument> ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and they should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time.

    The following are more <optional keyword arguments>:

    • init is a matrix to be used as initialization for the mean. If no matrix is provided, the instance of generalized means with $p=0.5$ will be used,
    • tol is the tolerance for the convergence (see below).
    • maxiter is the maximum number of iterations allowed.
    • if verbose=true, the convergence attained at each iteration is printed and a warning is printed if convergence is not attained.
    • if ⏩=true the iterations are multi-threaded (see below).

    If the input is a 1d array of $k$ real positive definite diagonal matrices the solution is available in closed-form as the modified Bhattacharyya mean, hence the <optional keyword arguments> init, tol and verbose have no effect and return the 3-tuple $(G, 1, 0)$. See modified Bhattacharyya mean.

    Nota Bene

    Multi-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.

    In normal circumstances this algorithm converges monothonically. If the algorithm diverges and verbose is true a warning is printed indicating the iteration when this happened.

    $tol$ defaults to the square root of Base.eps of the nearest real type of data input $𝐏$. This corresponds to requiring the norm of the satisfying matrix equation divided by the number of elements to vanish for about half the significant digits.

    See: Wasserstein metric.

    See also: powerMean, wasMean, logdet0Mean, mean.

    Examples

    using LinearAlgebra, PosDefManifold
     # Generate a set of 4 random 3x3 SPD matrices
     Pset=randP(3, 4) # or, using unicode: 𝐏=randP(3, 4)
     
    @@ -389,7 +389,7 @@
     using BenchmarkTools
     Pset=randP(20, 120)
     @benchmark(wasMean(Pset; ⏩=false)) # single-threaded
    -@benchmark(wasMean(Pset)) # multi-threaded
    source
    PosDefManifold.powerMeanFunction
        powerMean(𝐏::Union{ℍVector, 𝔻Vector}, p::Real;
         <
         w::Vector=[],
         ✓w=true,
    @@ -397,7 +397,7 @@
         tol::Real=0.,
         maxiter::Int=500,
         verbose=false,
    -    ⏩=true >)

    Given a 1d array $𝐏={P_1,...,P_k}$ of $k$ positive definite matrices of ℍVector type or real positive definite diagonal matrices of 𝔻Vector type, an optional non-negative real weights vector $w={w_1,...,w_k}$ and a real parameter p $\in[-1, 1]$, return the 3-tuple $(G, iter, conv)$, where $G$ is Lim and Palfia (2012)'s power means of order $p$ and $iter$, $conv$ are the number of iterations and convergence attained by the algorithm, respectively. Mean $G$ is the unique positive definite matrix satisfying

    $G=\sum_{i=1}^{k}(w_iG\textrm{#}_pP_i)$,

    where $G\textrm{#}_pP_i$ is the Fisher geodesic equation. In particular:

    • with $p=-1$ this is the harmonic mean (see the inverse Euclidean metric),
    • with $p=+1$ this is the arithmetic mean (see the Euclidean metric),
    • at the limit of $p$ evaluated at zero from both side this is the geometric mean (see Fisher metric).

    For estimating power means for $p\in(-1, 1)$, this function implements the fixed-point iterative algorithm of (Congedo et al., 2017b)🎓. For $p=0$ (geometric mean) this algorithm is run two times with a small positive and negative value of $p$ and the geometric mean of the two resulting means is returned, as suggested in (Congedo et al., 2017b)🎓. This way of estimating the geometric mean of a set of matrices is faster as compared to the usual gradient descent algorithm.

    If you don't pass a weight vector with <optional keyword argument> $w$, return the unweighted power mean.

    If <optional keyword argument> ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time.

    The following are more <optional keyword arguments>:

    • init is a matrix to be used as initialization for the mean. If no matrix is provided, the instance of generalized means with parameter $p$ will be used.
    • tol is the tolerance for the convergence (see below).
    • maxiter is the maximum number of iterations allowed.
    • if verbose=true, the convergence attained at each iteration is printed and a warning is printed if convergence is not attained.
    • if ⏩=true the iterations are multi-threaded.

    If the input is a 1d array of $k$ real positive definite diagonal matrices the solution is available in closed-form as the generalized mean of order p, hence the <optional keyword arguments> init, tol and verbose have no effect and return the 3-tuple $(G, 1, 0)$. See generalized means.

    Nota Bene

    Multi-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.

    In normal circumstances this algorithm converges monothonically. If the algorithm diverges and verbose is true a warning is printed indicating the iteration when this happened.

    $tol$ defaults to the square root of Base.eps of the nearest real type of data input $𝐏$. This corresponds to requiring the norm of the difference of the matrix solution over two successive iterations divided by the number of elements in the matrix to vanish for about half the significant digits.

    (2) Like in (1), but for a 1d array $𝐃={D_1,...,D_k}$ of $k$ real positive definite diagonal matrices of 𝔻Vector type. In this case the solution is available in closed-form, hence the <optional keyword arguments> init, tol and verbose have no effect and return the 3-tuple $(G, 1, 0)$. See generalized means.

    See: power means, generalized means, modified Bhattacharyya mean.

    See also: generalizedMean, wasMean, logdet0Mean, mean.

    Examples

    using LinearAlgebra, PosDefManifold
    +    ⏩=true >)

    Given a 1d array $𝐏={P_1,...,P_k}$ of $k$ positive definite matrices of ℍVector type or real positive definite diagonal matrices of 𝔻Vector type, an optional non-negative real weights vector $w={w_1,...,w_k}$ and a real parameter p $\in[-1, 1]$, return the 3-tuple $(G, iter, conv)$, where $G$ is Lim and Palfia (2012)'s power means of order $p$ and $iter$, $conv$ are the number of iterations and convergence attained by the algorithm, respectively. Mean $G$ is the unique positive definite matrix satisfying

    $G=\sum_{i=1}^{k}(w_iG\textrm{#}_pP_i)$,

    where $G\textrm{#}_pP_i$ is the Fisher geodesic equation. In particular:

    • with $p=-1$ this is the harmonic mean (see the inverse Euclidean metric),
    • with $p=+1$ this is the arithmetic mean (see the Euclidean metric),
    • at the limit of $p$ evaluated at zero from both side this is the geometric mean (see Fisher metric).

    For estimating power means for $p\in(-1, 1)$, this function implements the fixed-point iterative algorithm of (Congedo et al., 2017b)🎓. For $p=0$ (geometric mean) this algorithm is run two times with a small positive and negative value of $p$ and the geometric mean of the two resulting means is returned, as suggested in (Congedo et al., 2017b)🎓. This way of estimating the geometric mean of a set of matrices is faster as compared to the usual gradient descent algorithm.

    If you don't pass a weight vector with <optional keyword argument> $w$, return the unweighted power mean.

    If <optional keyword argument> ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time.

    The following are more <optional keyword arguments>:

    • init is a matrix to be used as initialization for the mean. If no matrix is provided, the instance of generalized means with parameter $p$ will be used.
    • tol is the tolerance for the convergence (see below).
    • maxiter is the maximum number of iterations allowed.
    • if verbose=true, the convergence attained at each iteration is printed and a warning is printed if convergence is not attained.
    • if ⏩=true the iterations are multi-threaded.

    If the input is a 1d array of $k$ real positive definite diagonal matrices the solution is available in closed-form as the generalized mean of order p, hence the <optional keyword arguments> init, tol and verbose have no effect and return the 3-tuple $(G, 1, 0)$. See generalized means.

    Nota Bene

    Multi-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.

    In normal circumstances this algorithm converges monothonically. If the algorithm diverges and verbose is true a warning is printed indicating the iteration when this happened.

    $tol$ defaults to the square root of Base.eps of the nearest real type of data input $𝐏$. This corresponds to requiring the norm of the difference of the matrix solution over two successive iterations divided by the number of elements in the matrix to vanish for about half the significant digits.

    (2) Like in (1), but for a 1d array $𝐃={D_1,...,D_k}$ of $k$ real positive definite diagonal matrices of 𝔻Vector type. In this case the solution is available in closed-form, hence the <optional keyword arguments> init, tol and verbose have no effect and return the 3-tuple $(G, 1, 0)$. See generalized means.

    See: power means, generalized means, modified Bhattacharyya mean.

    See also: generalizedMean, wasMean, logdet0Mean, mean.

    Examples

    using LinearAlgebra, PosDefManifold
     # Generate a set of 4 random 3x3 SPD matrices
     Pset=randP(3, 4) # or, using unicode: 𝐏=randP(3, 4)
     
    @@ -421,9 +421,9 @@
     using BenchmarkTools
     Pset=randP(20, 120)
     @benchmark(powerMean(Pset, 0.5; ⏩=false)) # single-threaded
    -@benchmark(powerMean(Pset, 0.5)) # multi-threaded
    source
    PosDefManifold.inductiveMeanFunction
    (1) inductiveMean(metric::Metric, 𝐏::ℍVector)
     
    -(2) inductiveMean(metric::Metric, 𝐏::ℍVector, q::Int, Q::ℍ)

    alias: indMean

    (1) Compute the Fréchet mean of 1d array $𝐏={P_1,...,P_k}$ of $k$ positive definite matrices of ℍVector type with a law of large number inductive procedure (Ho et al., 2013; Massart et al., 2018), such as 🎓

    $G_1=P_1,$

    $G_i=γ(i^{-1}, G_{(i-1)}, P_i), i=2,...,k,$

    where $γ(i^{-1}, G_{(i-1)}, P_i)$ is a step on the geodesic relying $G_{(i-1)}$ to $P_i$ with arclength $i^{-1}$ using the specified metric, of type Metric::Enumerated type.

    (2) Like (1), but for the set of matrices $𝐐 ∪ 𝐏$, where it is assumed knowledge only of the set $𝐏$, the mean of $𝐐$ (Hermitian matrix argument Q) and the number of matrices in $𝐐$ (integer argument q). This method can be used, for example, for updating a block on-line algorithm, where $𝐏$ is the incoming block, Q the previous mean estimation and q the cumulative number of matrices on which the mean has been computed on-line.

    For Fréchet means that do not have a closed form expression, this procedure features a computational complexity amounting to less than two iterations of gradient descent or fixed-point algorithms. This comes at the price of an approximation. In fact, the solution is not invariant to permutations of the matrices in array 𝐏 and convergence to the Fréchet mean with the implemented procedure is not ensured (see Massart et al., 2018)🎓.

    Since the inductive mean uses the geodesic function, it is not available for the Von Neumann metric.

    Examples

    # A set of 100 matrices for which we want to compute the mean
    +(2) inductiveMean(metric::Metric, 𝐏::ℍVector, q::Int, Q::ℍ)

    alias: indMean

    (1) Compute the Fréchet mean of 1d array $𝐏={P_1,...,P_k}$ of $k$ positive definite matrices of ℍVector type with a law of large number inductive procedure (Ho et al., 2013; Lim and Palfia, 2019; Massart et al., 2018)🎓, such as

    $G_1=P_1,$

    $G_i=γ(i^{-1}, G_{(i-1)}, P_i), i=2,...,k,$

    where $γ(i^{-1}, G_{(i-1)}, P_i)$ is a step on the geodesic relying $G_{(i-1)}$ to $P_i$ with arclength $i^{-1}$ using the specified metric, of type Metric::Enumerated type.

    (2) Like (1), but for the set of matrices $𝐐 ∪ 𝐏$, where it is assumed knowledge only of the set $𝐏$, the mean of $𝐐$ (Hermitian matrix argument Q) and the number of matrices in $𝐐$ (integer argument q). This method can be used, for example, for updating a block on-line algorithm, where $𝐏$ is the incoming block, Q the previous mean estimation and q the cumulative number of matrices on which the mean has been computed on-line.

    For Fréchet means that do not have a closed form expression, this procedure features a computational complexity amounting to less than two iterations of gradient descent or fixed-point algorithms. This comes at the price of an approximation. In fact, the solution is not invariant to permutations of the matrices in array 𝐏 and convergence to the Fréchet mean for finite samples is not ensured (see Lim and Palfia, 2019; Massart et al., 2018)🎓.

    Since the inductive mean uses the geodesic function, it is not available for the Von Neumann metric.

    Examples

    # A set of 100 matrices for which we want to compute the mean
     𝐏=randP(10, 100)
     
     𝐏1=ℍVector(collect(𝐏[i] for i=1:50)) # first 50
    @@ -441,12 +441,12 @@
     
     # average error
     norm(G-H)/(dim(G, 1)^2)
    -norm(G2-H)/(dim(G, 1)^2)
    source
    PosDefManifold.midrangeFunction
    midrange(metric::Metric, P::ℍ{T}, Q::ℍ{T}) where T<:RealOrComplex

    Midrange (average of extremal values) of positive definite matrices $P$ and $Q$. Only the Fisher metric is supported, allowing the so-called geometric midrange. This has been defined in Mostajeran et al. (2019) 🎓 as

    $P * Q = \frac{1}{\sqrt{\lambda_(min)}+\sqrt{\lambda_(max)}}\Big(Q+\sqrt{\lambda_(min)*\lambda_(max)}P\Big)$,

    where $\lambda_(min)$ and $\lambda_(max)$ are the extremal generalized eigenvalues of $P$ and $Q$.

    Examples

    P=randP(3)
    +norm(G2-H)/(dim(G, 1)^2)
    source
    PosDefManifold.midrangeFunction
    midrange(metric::Metric, P::ℍ{T}, Q::ℍ{T}) where T<:RealOrComplex

    Midrange (average of extremal values) of positive definite matrices $P$ and $Q$. Only the Fisher metric is supported, allowing the so-called geometric midrange. This has been defined in Mostajeran et al. (2019) 🎓 as

    $P * Q = \frac{1}{\sqrt{\lambda_(min)}+\sqrt{\lambda_(max)}}\Big(Q+\sqrt{\lambda_(min)*\lambda_(max)}P\Big)$,

    where $\lambda_(min)$ and $\lambda_(max)$ are the extremal generalized eigenvalues of $P$ and $Q$.

    Examples

    P=randP(3)
     Q=randP(3)
    -M=midrange(Fisher, P, Q)
    source

    Tangent Space operations

    FunctionDescription
    logMapLogarithmic map (from manifold to tangent space)
    expMapExponential map (from tangent space to manifold)
    vecPvectorization of matrices in the tangent space
    matPmatrization of matrices in the tangent space (inverse of vecp)
    parallelTransport, ptParallel transport of tangent vectors and matrices

    Tangent Space operations

    FunctionDescription
    logMapLogarithmic map (from manifold to tangent space)
    expMapExponential map (from tangent space to manifold)
    vecPvectorization of matrices in the tangent space
    matPmatrization of matrices in the tangent space (inverse of vecp)
    parallelTransport, ptParallel transport of tangent vectors and matrices

    PosDefManifold.logMapFunction
    (1) logMap(metric::Metric, P::ℍ{T}, G::ℍ{T})
     
     (2) logMap(metric::Metric, 𝐏::ℍVector, G::ℍ{T})
    -for all the above: where T<:RealOrComplex

    (1) Logaritmic Map: map a positive definite matrix $P$ from the SPD or Hermitian manifold into the tangent space at base-point $G$ using the Fisher metric.

    $P$ and $G$ must be flagged as Hermitian. See typecasting matrices.

    The map is defined as

    $Log_G(P)=S=G^{1/2}\textrm{log}\big(G^{-1/2}PG^{-1/2}\big)G^{1/2}$.

    metric is a metric of type Metric::Enumerated type.

    The result is an Hermitian matrix.

    (2) Logarithmic map (1) at base-point $G$ at once for $k$ positive definite matrices in 1d array $𝐏={P_1,...,P_k}$ of ℍVector type.

    The result is an ℍVector.

    Nota Bene

    Currently only the Fisher metric is supported for tangent space operations.

    The inverse operation is expMap.

    See also: vecP, parallelTransport.

    Examples

    using PosDefManifold
    +for all the above: where T<:RealOrComplex

    (1) Logaritmic Map: map a positive definite matrix $P$ from the SPD or Hermitian manifold into the tangent space at base-point $G$ using the Fisher metric.

    $P$ and $G$ must be flagged as Hermitian. See typecasting matrices.

    The map is defined as

    $Log_G(P)=S=G^{1/2}\textrm{log}\big(G^{-1/2}PG^{-1/2}\big)G^{1/2}$.

    metric is a metric of type Metric::Enumerated type.

    The result is an Hermitian matrix.

    (2) Logarithmic map (1) at base-point $G$ at once for $k$ positive definite matrices in 1d array $𝐏={P_1,...,P_k}$ of ℍVector type.

    The result is an ℍVector.

    Nota Bene

    Currently only the Fisher metric is supported for tangent space operations.

    The inverse operation is expMap.

    See also: vecP, parallelTransport.

    Examples

    using PosDefManifold
     (1)
     P=randP(3)
     Q=randP(3)
    @@ -458,10 +458,10 @@
     (2)
     Pset=randP(3, 4)
     # projecting all matrices in Pset at the base point given by their geometric mean.
    -Sset=logMap(Fisher, Pset, mean(Fisher, Pset))
    source
    PosDefManifold.expMapFunction
    (1) expMap(metric::Metric, S::ℍ{T}, G::ℍ{T})
    +Sset=logMap(Fisher, Pset, mean(Fisher, Pset))
    source
    PosDefManifold.expMapFunction
    (1) expMap(metric::Metric, S::ℍ{T}, G::ℍ{T})
     
     (2) expMap(metric::Metric, 𝐒::ℍVector, G::ℍ{T})
    -for all the above: where T<:RealOrComplex

    (1) Exponential Map: map a tangent vector (a matrix) $S$ from the tangent space at base-point $G$ into the SPD or Hermitian manifold (using the Fisher metric).

    $S$ and $G$ must be flagged as Hermitian. See typecasting matrices.

    The map is defined as

    $Exp_G(S)=P=G^{1/2}\textrm{exp}\big(G^{-1/2}SG^{-1/2}\big)G^{1/2}$.

    metric is a metric of type Metric::Enumerated type.

    The result is an Hermitian matrix.

    (2) Exponential map (1) at base-point $G$ at once for $k$ tangent vectors (matrices) in 1d array $𝐒={S_1,...,S_k}$ of ℍVector type.

    The result is an ℍVector.

    Nota Bene

    Currently only the Fisher metric is supported for tangent space operations.

    The inverse operation is logMap.

    Examples

    (1)
    +for all the above: where T<:RealOrComplex

    (1) Exponential Map: map a tangent vector (a matrix) $S$ from the tangent space at base-point $G$ into the SPD or Hermitian manifold (using the Fisher metric).

    $S$ and $G$ must be flagged as Hermitian. See typecasting matrices.

    The map is defined as

    $Exp_G(S)=P=G^{1/2}\textrm{exp}\big(G^{-1/2}SG^{-1/2}\big)G^{1/2}$.

    metric is a metric of type Metric::Enumerated type.

    The result is an Hermitian matrix.

    (2) Exponential map (1) at base-point $G$ at once for $k$ tangent vectors (matrices) in 1d array $𝐒={S_1,...,S_k}$ of ℍVector type.

    The result is an ℍVector.

    Nota Bene

    Currently only the Fisher metric is supported for tangent space operations.

    The inverse operation is logMap.

    Examples

    (1)
     using PosDefManifold, LinearAlgebra
     P=randP(3)
     Q=randP(3)
    @@ -477,8 +477,8 @@
     G=mean(Fisher, Pset)
     Sset=logMap(Fisher, Pset, G)
     # projecting back onto the manifold
    -Pset2=expMap(Fisher, Sset, G)
    source
    PosDefManifold.vecPFunction
    vecP(S::Union{ℍ{T}, Symmetric{R}};
    -     range::UnitRange=1:size(S, 2)) where T<:RealOrComplex where R<:Real =

    Vectorize a tangent vector (which is an Hermitian or Symmetric matrix) $S$: mat ↦ vec.

    It gives weight $1$ to diagonal elements and $√2$ to off-diagonal elements so as to preserve the norm (Barachant et al., 201E)🎓, such as

    $∥S∥_F=∥vecP(S)∥_F$.

    The result is a vector holding $n(n+1)/2$ elements, where $n$ is the size of $S$.

    $S$ must be flagged as Hermitian or Symmetric. See typecasting matrices.

    The reverse operation is provided by matP, which always return an Hermitian matrix.

    If an optional keyword argument range is provided, the vectorization concerns only the rows (or columns, since the input matrix is symmetric or Hermitian) in the range. Note that in this case the operation cannot be reverted by the matP, that is, in this case the matrix is 'stuck' in the tangent space.

    Examples

    using PosDefManifold
    +Pset2=expMap(Fisher, Sset, G)
    source
    PosDefManifold.vecPFunction
    vecP(S::Union{ℍ{T}, Symmetric{R}};
    +     range::UnitRange=1:size(S, 2)) where T<:RealOrComplex where R<:Real =

    Vectorize a tangent vector (which is an Hermitian or Symmetric matrix) $S$: mat ↦ vec.

    It gives weight $1$ to diagonal elements and $√2$ to off-diagonal elements so as to preserve the norm (Barachant et al., 201E)🎓, such as

    $∥S∥_F=∥vecP(S)∥_F$.

    The result is a vector holding $n(n+1)/2$ elements, where $n$ is the size of $S$.

    $S$ must be flagged as Hermitian or Symmetric. See typecasting matrices.

    The reverse operation is provided by matP, which always return an Hermitian matrix.

    If an optional keyword argument range is provided, the vectorization concerns only the rows (or columns, since the input matrix is symmetric or Hermitian) in the range. Note that in this case the operation cannot be reverted by the matP, that is, in this case the matrix is 'stuck' in the tangent space.

    Examples

    using PosDefManifold
     P=randP(3)
     Q=randP(3)
     G=mean(Fisher, P, Q)
    @@ -487,7 +487,7 @@
     # vectorize S
     v=vecP(S)
     # vectorize onlt the first two columns of S
    -v=vecP(S; range=1:2)
    source
    PosDefManifold.matPFunction
    matP(ς::Vector{T}) where T<:RealOrComplex

    Matrizize a tangent vector (vector) ς : vec -> mat.

    This is the function reversing the vecP function, thus the weighting applied therein is reversed as well.

    If $ς=vecP(S)$ and $S$ is a $n⋅n$ Hermitian or Symmetric matrix, $ς$ is a tangent vector of size $n(n+1)/2$. The result of calling matP(ς) is then $n⋅n$ matrix $S$. $S$ is always returned flagged as Hermitian.

    To Do: This function may be rewritten more efficiently.

    Examples

    using PosDefManifold
    +v=vecP(S; range=1:2)
    source
    PosDefManifold.matPFunction
    matP(ς::Vector{T}) where T<:RealOrComplex

    Matrizize a tangent vector (vector) ς : vec -> mat.

    This is the function reversing the vecP function, thus the weighting applied therein is reversed as well.

    If $ς=vecP(S)$ and $S$ is a $n⋅n$ Hermitian or Symmetric matrix, $ς$ is a tangent vector of size $n(n+1)/2$. The result of calling matP(ς) is then $n⋅n$ matrix $S$. $S$ is always returned flagged as Hermitian.

    To Do: This function may be rewritten more efficiently.

    Examples

    using PosDefManifold
     P=randP(3)
     Q=randP(3)
     G=mean(Fishr, P, Q)
    @@ -500,14 +500,14 @@
     U=randP(n)
     z=U*v
     # Get the point in the tangent space
    -S=matP(z)
    source
    PosDefManifold.parallelTransportFunction
    (1) parallelTransport(S::ℍ{T}, P::ℍ{T}, Q::ℍ{T})
     
     (2) parallelTransport(S::ℍ{T}, P::ℍ{T})
     
     (3) parallelTransport(𝐒::ℍVector, P::ℍ{T}, Q::ℍ{T})
     
     (4) parallelTransport(𝐒::ℍVector, P::ℍ{T})
    -for all the above: where T<:RealOrComplex

    alias: pt

    (1) Parallel transport of tangent vector $S$ (a matrix) lying on the tangent space at base-point $P$ to the tangent space at base-point $Q$.

    $S$, $P$ and $Q$ must all be Hermitian matrices. Return an Hermitian matrix. The transport is defined as:

    $∥_{(P→Q)}(S)=\big(QP^{-1}\big)^{1/2}S\big(QP^{-1}\big)^{H/2}$.

    If $S$ is a positive definite matrix in the manifold (and not a tangent vector) it will be 'trasported' from $P$ to $Q$, amounting to (Yair et al., 2019🎓)

    • project $S$ onto the tangent space at base-point $P$,
    • parallel transport it to the tangent space at base-point $Q$,
    • project it back onto the manifold at base-point $Q$.

    (2) Parallel transport as in (1), but to the tangent space at base-point the identity matrix.

    The transport reduces in this case to:

    $∥_{(P→I)}(S)=P^{-1/2}SP^{-1/2}$.

    (3) Parallel transport as in (1) at once for $k$ tangent vectors (matrices) in 1d array $𝐒={S_1,...,S_k}$ of ℍVector type.

    (4) Parallel transport as in (2) at once for $k$ tangent vectors (matrices) in 1d array $𝐒={S_1,...,S_k}$ of ℍVector type.

    Nota Bene

    Currently only the Fisher metric is supported for parallel transport.

    See also: logMap, expMap, vecP, matP.

    Examples

    using PosDefManifold
    +for all the above: where T<:RealOrComplex

    alias: pt

    (1) Parallel transport of tangent vector $S$ (a matrix) lying on the tangent space at base-point $P$ to the tangent space at base-point $Q$.

    $S$, $P$ and $Q$ must all be Hermitian matrices. Return an Hermitian matrix. The transport is defined as:

    $∥_{(P→Q)}(S)=\big(QP^{-1}\big)^{1/2}S\big(QP^{-1}\big)^{H/2}$.

    If $S$ is a positive definite matrix in the manifold (and not a tangent vector) it will be 'trasported' from $P$ to $Q$, amounting to (Yair et al., 2019🎓)

    • project $S$ onto the tangent space at base-point $P$,
    • parallel transport it to the tangent space at base-point $Q$,
    • project it back onto the manifold at base-point $Q$.

    (2) Parallel transport as in (1), but to the tangent space at base-point the identity matrix.

    The transport reduces in this case to:

    $∥_{(P→I)}(S)=P^{-1/2}SP^{-1/2}$.

    (3) Parallel transport as in (1) at once for $k$ tangent vectors (matrices) in 1d array $𝐒={S_1,...,S_k}$ of ℍVector type.

    (4) Parallel transport as in (2) at once for $k$ tangent vectors (matrices) in 1d array $𝐒={S_1,...,S_k}$ of ℍVector type.

    Nota Bene

    Currently only the Fisher metric is supported for parallel transport.

    See also: logMap, expMap, vecP, matP.

    Examples

    using PosDefManifold
     
     (1)
     P=randP(3)
    @@ -546,10 +546,10 @@
     # recenter all matrices so to have mean=I
     Pset2=parallelTransport(Pset, G)
     # check
    -mean(Fisher, Pset2) ≈ I ? println(" ⭐ ") : println(" ⛔ ")
    source

    Procrustes problems

    FunctionDescription
    procrustesSolution to the Procrustes problem in the manifold of positive definite matrices

    PosDefManifold.procrustesFunction
    procrustes(P::ℍ{T}, Q::ℍ{T}, extremum="min") where T<:RealOrComplex

    Given two positive definite matrices $P$ and $Q$, return by default the solution of problem

    $\textrm{argmin}_Uδ(P,U^HQU)$,

    where $U$ varies over the set of unitary matrices and $δ(.,.)$ is a distance or divergence function.

    $U^HQU$ is named in physics the unitary orbit of $Q$.

    If the argument extremum is passed as "max", it returns instead the solution of

    $\textrm{argmax}_Uδ(P,U^HQU)$.

    $P$ and $Q$ must be flagged as Hermitian. See typecasting matrices.

    As it has been shown in Bhatia and Congedo (2019)🎓, using each of the Fisher, logdet zero, Wasserstein and the Kullback-Leibler divergence (see logdet α), the best approximant to $P$ from the unitary orbit of $Q$ commutes with $P$ and, surprisingly, has the same closed-form expression, namely

    $U_Q^↓U_P^{↓H}$ for the argmin and $U_Q^↑U_P^{↓H}$ for the argmax,

    where $U^↓$ denotes the eigenvector matrix of the subscript argument with eigenvectors in columns sorted by decreasing order of corresponding eigenvalues and $U^↑$ denotes the eigenvector matrix of the subscript argument with eigenvectors in columns sorted by increasing order of corresponding eigenvalues.

    The same solutions are known since a long time also by solving the extremal problem here above using the Euclidean metric (Umeyama, 1988).

    The generalized Procrustes problem

    $\textrm{argmin}_Usum_{i=1}^{k}δ(P_i,U^HQ_iU)$

    can be solved using Julia package Manopt.

    Examples

    using PosDefManifold
    +mean(Fisher, Pset2) ≈ I ? println(" ⭐ ") : println(" ⛔ ")
    source

    Procrustes problems

    FunctionDescription
    procrustesSolution to the Procrustes problem in the manifold of positive definite matrices

    PosDefManifold.procrustesFunction
    procrustes(P::ℍ{T}, Q::ℍ{T}, extremum="min") where T<:RealOrComplex

    Given two positive definite matrices $P$ and $Q$, return by default the solution of problem

    $\textrm{argmin}_Uδ(P,U^HQU)$,

    where $U$ varies over the set of unitary matrices and $δ(.,.)$ is a distance or divergence function.

    $U^HQU$ is named in physics the unitary orbit of $Q$.

    If the argument extremum is passed as "max", it returns instead the solution of

    $\textrm{argmax}_Uδ(P,U^HQU)$.

    $P$ and $Q$ must be flagged as Hermitian. See typecasting matrices.

    As it has been shown in Bhatia and Congedo (2019)🎓, using each of the Fisher, logdet zero, Wasserstein and the Kullback-Leibler divergence (see logdet α), the best approximant to $P$ from the unitary orbit of $Q$ commutes with $P$ and, surprisingly, has the same closed-form expression, namely

    $U_Q^↓U_P^{↓H}$ for the argmin and $U_Q^↑U_P^{↓H}$ for the argmax,

    where $U^↓$ denotes the eigenvector matrix of the subscript argument with eigenvectors in columns sorted by decreasing order of corresponding eigenvalues and $U^↑$ denotes the eigenvector matrix of the subscript argument with eigenvectors in columns sorted by increasing order of corresponding eigenvalues.

    The same solutions are known since a long time also by solving the extremal problem here above using the Euclidean metric (Umeyama, 1988).

    The generalized Procrustes problem

    $\textrm{argmin}_Usum_{i=1}^{k}δ(P_i,U^HQ_iU)$

    can be solved using Julia package Manopt.

    Examples

    using PosDefManifold
     P=randP(3)
     Q=randP(3)
     # argmin problem
     U=procrustes(P, Q)
     # argmax problem
    -V=procrustes(P, Q, "max")
    source
    +V=procrustes(P, Q, "max")
    source
    diff --git a/docs/build/search/index.html b/docs/build/search/index.html index 95b3348..e98f718 100644 --- a/docs/build/search/index.html +++ b/docs/build/search/index.html @@ -1,2 +1,2 @@ -Search · PosDefManifold

    Loading search...

      +Search · PosDefManifold

      Loading search...

        diff --git a/docs/build/search_index.js b/docs/build/search_index.js index b0bedde..bd5c6e1 100644 --- a/docs/build/search_index.js +++ b/docs/build/search_index.js @@ -1,3 +1,3 @@ var documenterSearchIndex = {"docs": -[{"location":"signalProcessing/#signalProcessing.jl-1","page":"signalProcessing.jl","title":"signalProcessing.jl","text":"","category":"section"},{"location":"signalProcessing/#","page":"signalProcessing.jl","title":"signalProcessing.jl","text":"This unit contains miscellaneous signal processing functions useful in relation to the Riemannian geometry of the manifold of Symmetric Positive Definite (SPD) or Hermitian Positive Definite (HPD) matrices. In Julia those are Hermitian matrices, see typecasting matrices.","category":"page"},{"location":"signalProcessing/#","page":"signalProcessing.jl","title":"signalProcessing.jl","text":"Function Description\nrandChi², randχ² Generate a random variable distributed as a chi-squared\nrandEigvals, randλ Generate a random vectors of real positive eigenvalues\nrandEigvalsMat, randΛ Generate a random diagonal matrix of real positive eigenvalues\nrandUnitaryMat, randU Generate a random orthogonal or unitary matrix\nrandPosDefMat, randP Generate one or an array of random positive definite matrices\nregularize! Regularize an array of positive definite matrices\ngram Gram matrix of a matrix\ntrade trace and determinant of a matrix as a 2-tuple","category":"page"},{"location":"signalProcessing/#","page":"signalProcessing.jl","title":"signalProcessing.jl","text":"⋅","category":"page"},{"location":"signalProcessing/#","page":"signalProcessing.jl","title":"signalProcessing.jl","text":"randChi²\r\nrandEigvals\r\nrandEigvalsMat\r\nrandUnitaryMat\r\nrandPosDefMat\r\nregularize!\r\ngram\r\ntrade","category":"page"},{"location":"signalProcessing/#PosDefManifold.randChi²","page":"signalProcessing.jl","title":"PosDefManifold.randChi²","text":"randChi²(df::Int)\n\nalias: randχ²\n\nGenerate a random variable distributed as a chi-squared with df degrees of freedom.\n\nIt uses the Wilson–Hilferty transformation for df>=20 - see chi-squared distribution.\n\nExamples\n\nusing Plots, PosDefManifold\nchi=[randχ²(2) for i=1:10000]\nhistogram(chi) # needs Plots package. Check your plots back-end.\n\n\n\n\n\n","category":"function"},{"location":"signalProcessing/#PosDefManifold.randEigvals","page":"signalProcessing.jl","title":"PosDefManifold.randEigvals","text":" randEigvals(n::Int;\n <\n df::Int=2,\n eigvalsSNR::Real=10e3 >)\n\nalias: randλ\n\nGenerate an n-vector of random real positive eigenvalues. The eigenvalues are generated as in function randΛ(randEigvalsMat), the syntax of which is used.\n\nSee also: randU (randUnitaryMat), randP (randPosDefMat).\n\nExamples\n\nusing Plots, PosDefManifold\nλ=sort(randλ(10), rev=true)\nσ=sort(randλ(10, eigvalsSNR=10), rev=true)\nplot(λ) # needs Plots package. Check your plots back-end.\nplot!(σ) # needs Plots package. Check your plots back-end.\n\n\n\n\n\n","category":"function"},{"location":"signalProcessing/#PosDefManifold.randEigvalsMat","page":"signalProcessing.jl","title":"PosDefManifold.randEigvalsMat","text":" (1) randEigvalsMat(n::Int;\n <\n df::Int=2,\n eigvalsSNR::Real=10e3 >)\n\n (2) randEigvalsMat(n::Int, k::Int;\n < same keyword arguments as in (1) >)\n\nalias: randΛ\n\n(1) Generate an nn diagonal matrix of random real positive eigenvalues.\n\n(2) An array 1d (of 𝔻Vector type) of k matrices of the kind in (1)\n\nThe eigenvalues are generated according to model\n\nλ_i=χ_df^2+ηhspace6pttextrmforhspace2pti=1n\n\nwhere\n\nχ_df^2 (signal term) is randomly distributed as a chi-square with df degrees of freedom,\nη is a white noise term, function of eigvalsSNR, such that\n\ntextrmeigenvalues SNR=mathbbEbig(sum_i=1^nλ_ibig)bignη\n\nThe expected sum mathbbEbig(sum_i=1^nλ_ibig) here above is the expected variance of the signal term, i.e., n(df), since the expectation of a random chi-squared variable is equal to its degrees of freedom.\n\nIf eigvalsSNR=Inf is passed as argument, then η is set to zero, i.e., no white noise is added. In any case eigvalsSNR must be positive.\n\nNote that with the default value of df (df=2) the generating model assumes that the eigenvalues have exponentially decaying variance, which is often observed on real data.\n\nnote: Nota Bene\nThe eigvalsSNR expresses the expected eigenvalues SNR (signal-to-noise ratio), not the real one, and is not expressed in decibels, but as the expected SNR variance ratio.\n\nThis function is used by function randP (randPosDefMat) to generate random positive definite matrices with added white noise in order to emulate eigenvalues observed in real data and to improve the conditioning of the generated matrices with respect to inversion.\n\nSee also: randλ (randEigvals), randU (randUnitaryMat), randP (randPosDefMat), randχ² (randChi²).\n\nExamples\n\nusing PosDefManifold\n# (1)\nn=3;\nU=randU(n);\nΛ=randΛ(n, eigvalsSNR=100)\nP=U*Λ*U' # generate an SPD matrix\nusing LinearAlgebra\nQ=ℍ(U*Λ*U') # generate an SPD matrix and flag it as 'Hermitian'\n\n# (2) generate an array of 10 matrices of simulated eigenvalues\nDvec=randΛ(n, 10)\n\n\n\n\n\n","category":"function"},{"location":"signalProcessing/#PosDefManifold.randUnitaryMat","page":"signalProcessing.jl","title":"PosDefManifold.randUnitaryMat","text":"(1) randUnitaryMat(n::Int)\n(2) randUnitaryMat(::Type{Complex{T}}, n::Int)\n\naliases: randOrthMat, randU\n\nGenerate a random nn\n\n(1) orthogonal matrix (real)\n(2) unitary matrix (complex)\n\nThe matrices are generated running the modified (stabilized) Gram-Schmidt orthogonalization procedure (mgs) on an nn matrix filled with random Gaussian elements.\n\nSee also: randΛ (randEigvals), randP (randPosDefMat).\n\nExamples\n\nusing PosDefManifold\nn=3;\nX=randU(n)*sqrt(randΛ(n))*randU(n)' # (1) generate a random square real matrix\n\nU=randU(ComplexF64, n);\nV=randU(ComplexF64, n);\nY=U*sqrt(randΛ(n))*V' # (2) generate a random square complex matrix\n\n\n\n\n\n","category":"function"},{"location":"signalProcessing/#PosDefManifold.randPosDefMat","page":"signalProcessing.jl","title":"PosDefManifold.randPosDefMat","text":" (1) randPosDefMat(n::Int;\n <\n df::Int=2,\n eigvalsSNR::Real=10e3 >)\n\n (2) randPosDefMat(::Type{Complex{T}}, n:: Int;\n < same keyword arguments as in (1) >)\n\n (3) randPosDefMat(n::Int, k::Int;\n <\n df::Int=2,\n eigvalsSNR::Real=10e3,\n SNR::Real=100,\n commuting=false >)\n\n (4) randPosDefMat(::Type{Complex{T}}, n::Int, k::Int;\n < same keyword arguments as in (3) >)\n\nalias: randP\n\nGenerate\n\n(1) one random Hermitian positive definite matrix (real) of size nn\n(2) one random Hermitian positive definite matrix (complex) of size nn\n(3) an array 1d (of ℍVector type) of k matrices of the kind in (1)\n(4) an array 1d (of ℍVector type) of k matrices of the kind in (2).\n\nMethods (3) and (4) are multi-threaded. See Threads.\n\nFor (1) and (2) the matrix is generated according to model\n\nUΛU^H+ηI,\n\nwhere U is a random orthogonal (1) or unitary (2) matrix generated by function randU(randUnitaryMat) and Λ, η are a positive definite diagonal matrix and a non-negative scalar depending on df and eigvalsSNR randomly generated calling function randΛ(randEigvalsMat).\n\nFor (3) and (4), if the commuting=true is passed, the k matrices are generated according to model\n\nUΛ_iU^H+ηIhspace8pt, for i=1:k\n\notherwise they are generated according to model\n\n(UΛ_iU^H+ηI)+φ(V_iΔ_iV_i^H+ηI)hspace8pt, for i=1:k Eq.[1]\n\nwhere\n\nU and the V_i are random (3) orthogonal/(4) unitary matrices,\nΛ_i and Δ_i are positive definite diagonal matrices\nη is a non-negative scalar.\n\nAll variables here above are randomly generated as in (1) and (2)\n\nφ is adjusted so as to obtain a desired output SNR (signal-to-noise ratio), which is also an\n\n, such as\n\nSNR=fracdisplaystylesum_i=1^ktextrmtr(UΛ_iU^H+ηI)displaystylesum_i=1^ktextrmtrφ(V_iΔ_iV_i^H+ηI).\n\nnote: Nota Bene\nThe keyword arguments SNR is not expressed in decibels, but as the expected SNR variance ratio. It must be a positive number.\n\nA slightly different version of this model for generating positive definite matrices has been proposed in (Congedo et al., 2017b)[🎓]; in the model of Eq. [1]\n\nUΛ_iU^H is the signal term, where the signal is supposed sharing the same coordinates for all matrices,\nφ(V_iΔ_iV_i^H) is a structured noise term, which is different for all matrices\nηI is a white noise term, with same variance for all matrices.\n\nSee also: the aforementioned paper and randΛ (randEigvalsMat).\n\nExamples\n\nusing PosDefManifold\nR=randP(10, df=10, eigvalsSNR=1000) # 1 SDP Matrix of size 10x10 #(1)\nH=randP(ComplexF64, 5, eigvalsSNR=10) # 1 Hermitian Matrix of size 5x5 # (2)\nℛ=randP(10, 1000, eigvalsSNR=100) # 1000 SPD Matrices of size 10x10 # (3)\nusing Plots\nheatmap(Matrix(ℛ[1]), yflip=true, c=:bluesreds)\nℋ=randP(ComplexF64, 20, 1000) # 1000 Hermitian Matrices of size 20x20 # (4)\n\n\n\n\n\n","category":"function"},{"location":"signalProcessing/#PosDefManifold.regularize!","page":"signalProcessing.jl","title":"PosDefManifold.regularize!","text":"(1) regularize!(P::ℍ; )\n(2) regularize!(𝐏::ℍVector; )\n\nAdd white noise to either\n\n(1) a positive definite matrix P of size nn, or\n(2) a 1d array 𝐏 of k positive definite matrices of size nn, of ℍVector type.\n\nThe added noise improves the matrix conditioning with respect to inversion. This is used to avoid numerical errors when decomposing these matrices or when evaluating some functions of their eigevalues such as the log.\n\nA constant value is added to all diagonal elements of (1) P or (2) af all matrices in 𝐏, that is, on output:\n\ntextrm(1)hspace2ptPleftarrow P+ηI\n\ntextrm(2)hspace2pt𝐏_ileftarrow 𝐏_i+ηI hspace2pttextrmforhspace2pt i=1k\n\nThe amount of added noise η is determined by the SNR , which by default is 10000. This is such that\n\ntextrm(1)hspace2ptSNR=fracdisplaystyletextrmtr(P)displaystyletextrmtr(ηI)\n\ntextrm(2)hspace2ptSNR=fracdisplaystylesum_i=1^ktextrmtr(𝐏_i)displaystyle khspace1pttextrmtr(ηI)\n\nP in (1) must be flagged as Hermitian. See typecasting matrices.\n\nnote: Nota Bene\nThe keyword argument SNR expresses a SNR (signal-to-noise ratio), and is not expressed in decibels, but as the SNR variance ratio. It must be a positive number. Differently from function randΛrandEigvalsMat, randλrandEigvals and randPrandPosDefMat, the SNR here is not the expected SNR, but the actual SNR.\n\nSee also: randP (randPosDefMat).\n\nExamples\n\n# (1)\nusing LinearAlgebra, Plots, PosDefManifold\nn=3\nU=randU(n)\n# in Q we will write two matrices,\n# the unregularized and regularized matrix side by side\nQ=Matrix{Float64}(undef, n, n*2)\nP=ℍ(U*Diagonal(randn(n).^2)*U') # generate a real 3x3 positive matrix\nfor i=1:n, j=1:n Q[i, j]=P[i, j] end\nregularize!(P, SNR=5)\nfor i=1:n, j=1:n Q[i, j+n]=P[i, j] end # the regularized matrix is on the right\nheatmap(Matrix(Q), yflip=true, c=:bluesreds)\n\n# (2)\n𝐏=[ℍ(U*Diagonal(randn(3).^2)*U') for i=1:5] # 5 real 3x3 positive matrices\nregularize!(𝐏, SNR=1000)\n\nRun a test\n\nusing LinearAlgebra\n𝐏=randP(10, 100, SNR=1000); # 100 real Hermitian matrices\nsignalVar=sum(tr(P) for P in 𝐏);\nregularize!(𝐏, SNR=1000);\nsignalPlusNoiseVar=sum(tr(P) for P in 𝐏);\noutput_snr=signalVar/(signalPlusNoiseVar-signalVar)\n# output_snr should be approx. equal to 1000\n\n\n\n\n\n","category":"function"},{"location":"signalProcessing/#PosDefManifold.gram","page":"signalProcessing.jl","title":"PosDefManifold.gram","text":"gram(X::Matrix{T}) where T<:RealOrComplex\n\nGiven a generic data matrix X, comprised of real or complex elements, return the normalized Gram matrix, that is, the covariance matrix of X corrected by sample size, but without subtracting the mean.\n\nThe result is flagged as Hermitian. See typecasting matrices.\n\nnote: Nota Bene\nIf X is wide or square (r<=c) return XX^Hc. If X is tall (r>c) return X^HXr.\n\nExamples\n\nusing PosDefManifold\nX=randn(5, 150);\nG=gram(X) # => G=X*X'/150\nX=randn(100, 2);\nF=gram(X); # => G=X'*X/100\n\n\n\n\n\n","category":"function"},{"location":"signalProcessing/#PosDefManifold.trade","page":"signalProcessing.jl","title":"PosDefManifold.trade","text":"trade(P::ℍ{T}) where T<:RealOrComplex\n\nGiven a positive definite matrix P, return as a 2-tuple the trace and the determinant of P. This is used to plot positive matrices in two dimensions (TraDe plots: log(trace/n) vs. log(determinant), see exemple here below).\n\nP must be flagged by julia as Hermitian. See typecasting matrices.\n\nExamples\n\nusing PosDefManifold\nP=randP(3)\nt, d=trade(P) # equivalent to (t, d)=trade(P)\n\n# TraDe plot\nusing Plots\nk=100\nn=10\n𝐏=randP(n, k, SNR=1000); # 100 real Hermitian matrices\nx=Vector{Float64}(undef, k)\ny=Vector{Float64}(undef, k)\nfor i=1:k\n x[i], y[i] = trade(𝐏[i])\nend\nx=log.(x./n)\ny=log.(y)\nplot(x, y, seriestype=:scatter)\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#linearAlgebra.jl-1","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"","category":"section"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"This unit contains linear algebra functions useful in relation to the Riemannian geometry of the manifold of Symmetric Positive Definite (SPD) or Hermitian Positive Definite (HPD) matrices. In Julia those are Hermitian matrices, see typecasting matrices.","category":"page"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"In general they take a matrix as input (some may take other arrays as input) and are divided in eight categories depending on what kind of functions thay are and what they give as output:","category":"page"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"Category Output\n1. Utilities - - -\n2. Matrix normalizations and approximations matrix\n3. Boolean functions of matrices matrix\n4. Scalar functions of matrices scalar\n5. Diagonal functions of matrices diagonal matrix\n6. Unitary functions of matrices orthogonal/unitary matrix\n7. Matrix function of matrices matrix\n8. Spectral decompositions of positive matrices spectral function of input\n9. Decompositions involving triangular matrices triangular matrix","category":"page"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"⋅","category":"page"},{"location":"linearAlgebra/#Utilities-1","page":"linearAlgebra.jl","title":"Utilities","text":"","category":"section"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"Function Description\ntypeofMatrix, typeofMat Return the type of the matrix argument\ntypeofVector, typeofVec Return the type of the matrix vector argument\ndim length of the dimensions of matrices and vectors of matrices","category":"page"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"⋅","category":"page"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"typeofMatrix\r\ntypeofVector\r\ndim","category":"page"},{"location":"linearAlgebra/#PosDefManifold.typeofMatrix","page":"linearAlgebra.jl","title":"PosDefManifold.typeofMatrix","text":"function typeofMatrix(\narray::Union{AnyMatrix, AnyMatrixVector, AnyMatrixVector₂})\n\nalias: typeofMat\n\nReturn the type of a matrix, either Hermitian, Diagonal, LowerTriangular, or Matrix. Argument array may be a matrix of one of these types, but also one of the following:\n\nℍVector, ℍVector₂, 𝔻Vector, 𝔻Vector₂, 𝕃Vector, 𝕃Vector₂, 𝕄Vector, 𝕄Vector₂.\n\nThose are Array of Matrices types. See also aliases for the symbols ℍ, 𝔻, 𝕃 and 𝕄.\n\nNote that this function is different from Julia function typeof, which returns the concrete type (see example below), thus cannot be used for typecasting matrices.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\nP=randP(3) # generate a 3x3 Hermitian matrix\ntypeofMatrix(P) # returns `Hermitian`\ntypeof(P) # returns `Hermitian{Float64,Array{Float64,2}}`\n# typecast P as a `Matrix` M\nM=Matrix(P)\n# typecast M as a matrix of the same type as P and write the result in A\nA=typeofMatrix(P)(M)\n\nPset=randP(3, 4) # generate a set of 4 3x3 Hermitian matrix\n# Pset is an ℍVector type\ntypeofMatrix(Pset) # again returns `Hermitian`\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.typeofVector","page":"linearAlgebra.jl","title":"PosDefManifold.typeofVector","text":"function typeofVector(\narray::Union{AnyMatrix, AnyMatrixVector, AnyMatrixVector₂})\n\nalias: typeofVec\n\nReturn the type of a Vector, either HermitianVector, DiagonalVector, LowerTriangularVector, or MatrixVector. The aliases of those are, respectvely, ℍVector, 𝔻Vector, 𝕃Vector and 𝕄Vector. Argument array may be a vector of one of these types, but also one of the following:\n\nℍ, 𝔻, 𝕃 and 𝕄, ℍVector₂, 𝔻Vector₂, 𝕃Vector₂, 𝕄Vector₂.\n\nSee aliases for the symbols ℍ, 𝔻, 𝕃 and 𝕄. The last four are Array of Matrices types.\n\nNote that this function is different from Julia function typeof only in that it returns the vector type also if array is not of the ℍVector, 𝔻Vector, 𝕃Vector or 𝕄Vector type.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\nP=randP(3, 4) # generate 4 3x3 Hermitian matrix\ntypeofMatrix(P) # returns `Array{Hermitian,1}`\ntypeof(P) # also returns `Array{Hermitian,1}`\n\ntypeofMatrix(P[1]) # returns `Array{Hermitian,1}`\ntypeof(P[1]) # returns `Hermitian{Float64,Array{Float64,2}}`\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.dim","page":"linearAlgebra.jl","title":"PosDefManifold.dim","text":"(1) function dim(X::AnyMatrix, [d])\n(2) function dim(vector::AnyMatrixVector, [d])\n(3) function dim(vector₂::AnyMatrixVector₂, [d])\n\n(1) X is a real or complex Matrix, Diagonal, LowerTriangular or Hermitian matrix. Return a 2-tuple containing the dimensions of X, which is two times the same dimension for all possible types of X with the exception of the Matrix type, which can be rectangular. Optionally you can specify a dimension (1 or 2) to get just the length of that dimension.\n\n(2) vector is an 𝕄Vector, 𝔻Vector, 𝕃Vector or ℍVector type (see AnyMatrixVector type). Return a 3-tuple containing the number of matrices it holds (dimension 1) and their dimensions (dimension 2 and 3). Optionally you can specify a dimension (1, 2, or 3) to get just the length of that dimension.\n\n(3) vector₂ is an 𝕄Vector₂, 𝔻Vector₂, 𝕃Vector₂ or ℍVector₂ type (see AnyMatrixVector type). Return a 4-tuple containing\n\nthe number of vectors of matrices it holds (dimension 1),\na vector holding the number of matrices in each vector of matrices (dimensions 2),\nthe two dimensions of the matrices (dimension 3 and 4).\n\nOptionally you can specify a dimension (1, 2, 3 or 4) to get just the length of that dimension.\n\nvector and vector₂ are Array of Matrices types. See also aliases for the symbols ℍ, 𝔻, 𝕃 and 𝕄.\n\nnote: Nota Bene\nIf you specify a dimension and this is out of the valid range, the function returns zero.Both the vector(2) and the vector₂(3) object are meant to hold matrices living in the same manifold, therefore it is assumed that all matrices they holds are of the same dimension. The dimensions of the matrices are retrived fromthe first matrix in vector(2),\nthe first matrix in the first vector of vector₂(3).\n\nThis function replaces Julia size function, which cannot be used to retrive dimension for matrix vectors. It is not possible to overload the size function for matrix vectors since this causes problems to other Julia functions.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\n# (1)\nM=randn(3, 4) # generate a 3x4 `Matrix`\ndim(M) # returns (3, 4)\ndim(M, 1) # returns 3\ndim(M, 2) # returns 4\ndim(M, 3) # out of range: returns 0\n\n# (2)\nPset=randP(3, 4) # generate an ℍVector holding 4 3x3 Hermitian matrices\ndim(Pset) # returns (4, 3, 3)\ndim(Pset, 1) # returns 4\ndim(Pset, 2) # returns 3\ndim(Pset, 3) # returns 3\n\n# (3)\n# Generate a set of 4 random 3x3 SPD matrices\nPset=randP(3, 4)\n# Generate a set of 40 random 4x4 SPD matrices\nQset=randP(3, 40)\nA=ℍVector₂([Pset, Qset])\ndim(A) # return (2, [4, 40], 3, 3)\ndim(A, 1) # return 2\ndim(A, 2) # return [4, 40]\ndim(A, 2)[1] # return 4\ndim(A, 3) # return 3\ndim(A, 4) # return 3\ndim(A, 5) # out of range: return 0\n\n# note: to create an ℍVector₂ object holding k ℍVector objects use\nsets=ℍVector₂(undef, k) # and then fill them\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#Matrix-normalizations-and-approximations-1","page":"linearAlgebra.jl","title":"Matrix normalizations and approximations","text":"","category":"section"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"Function Description\ndet1 Normalize the determinant\ntr1 Normalize the trace\nnearestPosDef Nearest Symmetric/Hermitian Positive Semi-definite matrix\nnearestOrthogonal nearestOrth Nearest Orthogonal matrix\nnormalizeCol! Normalize one or more columns","category":"page"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"⋅","category":"page"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"det1\r\ntr1\r\nnearestPosDef\r\nnearestOrthogonal\r\nnormalizeCol!","category":"page"},{"location":"linearAlgebra/#PosDefManifold.det1","page":"linearAlgebra.jl","title":"PosDefManifold.det1","text":"function det1(X::AnyMatrix; )\n\nReturn the argument matrix X normalized so as to have unit determinant. For square positive definite matrices this is the best approximant from the set of matrices in the special linear group - see Bhatia and Jain (2014)🎓.\n\nX can be a real or complex Diagonal, LowerTriangular, Matrix, or Hermitian matrix. (see AnyMatrix type)\n\nIf the determinant is not greater than tol (which defalts to zero) a warning is printed and X is returned.\n\nnote: Nota Bene\nThis function is meant for positive definite matrices. Julia may throws an error while computing the determinant if the matrix is defective.\n\nSee Julia det function.\n\nSee also: tr1.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\nP=randP(5) # generate a random real positive definite matrix 5x5\nQ=det1(P)\ndet(Q) # must be 1\n# using a tolerance\nQ=det1(P; tol=1e-12)\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.tr1","page":"linearAlgebra.jl","title":"PosDefManifold.tr1","text":"tr1(X::AnyMatrix; tol::Real=0.)\n\nReturn the argument matrix X normalized so as to have unit trace.\n\nX can be a real or complex Diagonal, LowerTriangular, Matrix or Hermitian matrix (see AnyMatrix type). Its trace must be real. If the absolute value of its imaginary part is greater than tol (which defalts to zero) a warning is printed and X is returned. Also, if the trace is not greater than tol a warning is printed and X is returned.\n\nSee: Julia trace function.\n\nSee also: tr, det1.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\n\nP=randP(5) # generate a random real positive definite matrix 5x5\nQ=tr1(P)\ntr(Q) # must be 1\n# using a tolerance\nQ=tr1(P; tol=1e-12)\n\nPc=randP(ComplexF64, 5) # generate a random real positive definite matrix 5x5\nQc=tr1(Pc)\ntr(Qc) # must be 1\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.nearestPosDef","page":"linearAlgebra.jl","title":"PosDefManifold.nearestPosDef","text":"nearestPosDef(X::Union{𝔻, 𝕄}; tol::Real=0.)\n\nReturn the nearest symmetric/Hermitian positive semi-definite matrix of a diagonal or of an arbitary square matrix X according to the Frobenius norm. If the eigenvalues of the symmetric part of X are all non-negative, the result is positive definite and will be flagged as Hermitian, otherwise it is positive semi-definite and will not be flagged. The nearest matrix is given by\n\n(Y+H)2\n\nwhere\n\nY=(X+X^H)2\n\nis the symmetric part of X, and H is the symmetric polar factor of Y. See Higham(1988)🎓 for details and for the way it is computed.\n\nSee also: det1, procrustes.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\nX=randn(5, 5) # generate an arbitrary 5x5 matrix\nS=nearestPosDef(X)\n\nP=randP(5) # generate a random real positive definite 5x5 matrix\nS=nearestPosDef(Matrix(P)) # typecasting an Hermitian matrix as a `Matrix`\n# Since P is a positive definite matrix S must be equal to P\nS ≈ P ? println(\" ⭐ \") : println(\" ⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.nearestOrthogonal","page":"linearAlgebra.jl","title":"PosDefManifold.nearestOrthogonal","text":"nearestOrthogonal(X::AnyMatrix)\n\nalias: nearestOrth\n\nReturn the nearest orthogonal matrix of a square Hermitian, LowerTriangular, Diagonal or generic Matrix X (see AnyMatrix type). This is given by\n\nUV^H,\n\nwhere\n\ntextrm(SVD)=UΛV^H.\n\nIf X is Diagonal, return X.\n\nSee also: nearestPosDef, procrustes.\n\nExamples\n\nusing PosDefManifold\nU=nearestOrth(randn(5, 5))\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.normalizeCol!","page":"linearAlgebra.jl","title":"PosDefManifold.normalizeCol!","text":"(1) normalizeCol!(X::𝕄{T}, j::Int)\n(2) normalizeCol!(X::𝕄{T}, j::Int, by::Number)\n(3) normalizeCol!(X::𝕄{T}, range::UnitRange)\n(4) normalizeCol!(X::𝕄{T}, range::UnitRange, by::Number)\nfor all above: where T<:RealOrComplex\n\nGiven a Matrix type X comprised of real or complex elements,\n\n(1) normalize the j^th column to unit norm\n(2) divide the elements of the j^th column by number by\n(3) normalize the columns in range to unit norm\n(4) divide the elements of columns in range by number by.\n\nby is a number of abstract supertype Number. It should be an integer, real or complex number. For efficiency, it should be of the same type as the elements of X.\n\nrange is a UnitRange type.\n\nMethods (1) and (3) call the BLAS.nrm2 routine for computing the norm of concerned columns. See Threads.\n\nnote: Nota Bene\nJulia does not allow normalizing the columns of Hermitian matrices. If you want to call this function for an Hermitian matrix see typecasting matrices.\n\nSee norm and also randn for the example below.\n\nSee also: colNorm, colProd.\n\nExamples\n\nusing PosDefManifold\nX=randn(10, 20)\nnormalizeCol!(X, 2) # (1) normalize columns 2\nnormalizeCol!(X, 2, 10.0) # (2) divide columns 2 by 10.0\nnormalizeCol!(X, 2:4) # (3) normalize columns 2 to 4\nX=randn(ComplexF64, 10, 20)\nnormalizeCol!(X, 3) # (1) normalize columns 3\nnormalizeCol!(X, 3:6, (2.0 + 0.5im)) # (4) divide columns 3 to 5 by (2.0 + 0.5im)\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#Boolean-functions-of-matrices-1","page":"linearAlgebra.jl","title":"Boolean functions of matrices","text":"","category":"section"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"Function Description\nispos Check whether a real vector or diagonal matrix are comprised of all positive elements","category":"page"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"ispos","category":"page"},{"location":"linearAlgebra/#PosDefManifold.ispos","page":"linearAlgebra.jl","title":"PosDefManifold.ispos","text":" (1) ispos(λ::Vector{T};\n\t<\n\ttol::Real=0,\n\trev=true,\n\t🔔=true,\n\tmsg=\"\">)\n\n (2) ispos(Λ::𝔻{T};\n\t< same optional keyword arguments as in (1) > )\n\n\tfor all above: where T<:Real\n\nReturn true if all numbers in (1) real vector λ or in (2) real Diagonal matrix Λ are not inferior to tol, otherwise return false. This is used, for example, in spectral functions to check that all eigenvalues are positive.\n\nnote: Nota Bene\ntol defaults to the square root of Base.eps of the type of λ (1) or Λ (2). This corresponds to requiring positivity beyond about half of the significant digits.\n\nThe following are :\n\nIf rev=true the (1) elements in λ or (2) the diagonal elements\n\nin Λ will be chacked in reverse order. This is done for allowing a very fast check when the elements are sorted and it is known from where is best to start checking.\n\nIf the result is false:\n\nif =true a bell character will be printed. In most systems this will ring a bell on the computer.\nif string msg is provided, a warning will print msg followed by:\n\n\"at position pos\", where pos is the position where the first non-positive element has been found.\n\n ## Examples\n using PosDefManifold\n a=[1, 0, 2, 8]\n ispos(a, msg=\"non-positive element found\")\n\n # it will print:\n # ┌ Warning: non-positive element found at position 2\n # └ @ [here julie will point to the line of code issuing the warning]\n\n\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#Scalar-functions-of-matrices-1","page":"linearAlgebra.jl","title":"Scalar functions of matrices","text":"","category":"section"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"Function Description\ncolProd Sum of products of the elements in two columns\nsumOfSqr, ss Sum of squares of all elements or of specified columns\nsumOfSqrDiag, ssd Sum of squares of the diagonal elements\ncolNorm Eucliden norm of a column\nsumOfSqrTril, sst Sum of squares of the lower triangle elements up to a given underdiagonal\ntr Fast trace of the product of two Hermitian matrices\nquadraticForm, qf Fast quadratic form\nfidelity (Quantum) Fidelity of two positive matrices","category":"page"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"⋅","category":"page"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"colProd\r\nsumOfSqr\r\nsumOfSqrDiag\r\ncolNorm\r\nsumOfSqrTril\r\ntr\r\nquadraticForm\r\nfidelity","category":"page"},{"location":"linearAlgebra/#PosDefManifold.colProd","page":"linearAlgebra.jl","title":"PosDefManifold.colProd","text":"(1) colProd(X::Union{𝕄{T}, ℍ{T}}, j::Int, l::Int)\n(2) colProd(X::Union{𝕄{T}, ℍ{T}}, Y::Union{𝕄{T}, ℍ{T}}, j::Int, l::Int)\nfor all above: where T<:RealOrComplex\n\n(1) Given a real or complex Matrix or Hermitian matrix X, return the dot product of the j^th and l^th columns, defined as,\n\nsum_i=1^r big(x_ij^*x_ilbig)\n\nwhere r is the number of rows of X and ^* denotes complex conjugate (nothing if the matrix is real).\n\n(2) Given real or complex Matrix or Hermitian matrices X and Y, return the dot product of the j^th column of X and the l^th column of Y, defined as,\n\nsum_i=1^r big(x_ij^*y_ilbig)\n\nwhere r is the number of rows of X and of Y and ^* is as above.\n\nnote: Nota Bene\nX and of Y may have a different number of columns, but must have the same number of rows.\n\nArguments j and l must be positive integers in range\n\n(1) j,l in 1:size(X, 2),\n(2) j in 1:size(X, 2), l in 1:size(Y, 2).\n\nSee also: normalizeCol!, colNorm.\n\nExamples\n\nusing PosDefManifold\nX=randn(10, 20)\np=colProd(X, 1, 3)\nY=randn(10, 30)\nq=colProd(X, Y, 2, 25)\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.sumOfSqr","page":"linearAlgebra.jl","title":"PosDefManifold.sumOfSqr","text":"(1) sumOfSqr(A::Array)\n(2) sumOfSqr(H::ℍ{T})\n(3) sumOfSqr(L::𝕃{T})\n(4) sumOfSqr(D::𝔻{T})\n(5) sumOfSqr(X::Union{𝕄{T}, ℍ{T}}, j::Int)\n(6) sumOfSqr(X::Union{𝕄{T}, ℍ{T}}, range::UnitRange)\nfor (1)-(6) above: where T<:RealOrComplex\n\nalias: ss\n\nReturn\n\n(1) the sum of squares of the elements in an array A of any dimensions.\n(2) as in (1), but for an Hermitian matrix H, using only the lower triangular part.\n(3) as in (1), but for a LowerTriangular matrix L.\n(4) as in (1), but for a Diagonal matrix D (sum of squares of diagonal elements).\n(5) the sum of square of the j^th column of a Matrix or Hermitian X.\n(6) the sum of square of the columns of a Matrix or Hermitian X in a given range.\n\nAll methods support real and complex matrices.\n\nOnly method (1) works for arrays of any dimensions.\n\nMethods (1)-(4) return the square of the Frobenius norm.\n\nFor method (5), j is a positive integer in range 1:size(X, 1).\n\nFor method (6), range is a UnitRange type.\n\nSee also: colNorm, sumOfSqrDiag, sumOfSqrTril.\n\nExamples\n\nusing PosDefManifold\nX=randn(10, 20)\nsum2=sumOfSqr(X) # (1) sum of squares of all elements\nsum2=sumOfSqr(X, 1) # (2) sum of squares of elements in column 1\nsum2=sumOfSqr(X, 2:4) # (3) sum of squares of elements in column 2 to 4\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.sumOfSqrDiag","page":"linearAlgebra.jl","title":"PosDefManifold.sumOfSqrDiag","text":"sumOfSqrDiag(X::AnyMatrix)\n\nalias: ssd\n\nSum of squares of the diagonal elements in real or complex Matrix, Diagonal, Hermitian or LowerTriangular matrix X. If X is rectangular (which can be only if it is of the Matrix type), the main diagonal is considered.\n\nSee AnyMatrix type\n\nSee also: sumOfSqr, sumOfSqrTril.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\nX=randn(10, 20)\nsumDiag2=sumOfSqrDiag(X) # (1)\nsumDiag2=sumOfSqrDiag(𝔻(X)) # (2) 𝔻=LinearAlgebra.Diagonal\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.colNorm","page":"linearAlgebra.jl","title":"PosDefManifold.colNorm","text":"colNorm(X::Union{𝕄{T}, ℍ{T}}, j::Int) where T<:RealOrComplex\n\nGiven a real or complex Matrix or Hermitian matrix X, return the Euclidean norm of its j^th column.\n\nThis function calls the BLAS.nrm2 routine. See Threads.\n\nSee also: normalizeCol!, colProd, sumOfSqr.\n\nExamples\n\nusing PosDefManifold\nX=randn(10, 20)\nnormOfSecondColumn=colNorm(X, 2)\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.sumOfSqrTril","page":"linearAlgebra.jl","title":"PosDefManifold.sumOfSqrTril","text":"sumOfSqrTril(X::AnyMatrix, k::Int=0)\n\nalias: sst\n\nGiven a real or complex Matrix, Diagonal, Hermitian or LowerTriangular matrix X (see AnyMatrix type), return the sum of squares of the elements in its lower triangle up to the k^th underdiagonal.\n\nMatrix X may be rectangular.\n\nk must be in range\n\n1-size(X, 1):c-1 for X Matrix, Diagonal or Hermitian,\n1-size(X, 1):0 for X LowerTriangular.\n\nFor X Diagonal the result is\n\n0 if k0,\nthe sum of the squares of the diagonal elements otherwise.\n\nSee julia tril(M, k::Integer) function for numbering of diagonals.\n\nSee also: sumOfSqr, sumOfSqrDiag.\n\nExamples\n\nusing PosDefManifold\nA=[4. 3.; 2. 5.; 1. 2.]\n#3×2 Array{Float64,2}:\n# 4.0 3.0\n# 2.0 5.0\n# 1.0 2.0\n\ns=sumOfSqrTril(A, -1)\n# 9.0 = 1²+2²+2²\n\ns=sumOfSqrTril(A, 0)\n# 50.0 = 1²+2²+2²+4²+5²\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#LinearAlgebra.tr","page":"linearAlgebra.jl","title":"LinearAlgebra.tr","text":"(1) tr(P::ℍ{T}, Q::ℍ{T})\n(2) tr(P::ℍ{T}, M::𝕄{T})\n(3) tr(D::𝔻{T}, H::Union{ℍ{T}, 𝕄{T}})\n(4) tr(H::Union{ℍ{T}, 𝕄{T}}, D::𝔻{T})\nfor all above: where T<:RealOrComplex\n\nGiven (1) two Hermitian positive definite matrix P and Q, return the trace of the product PQ. This is real even if P and Q are complex.\n\nP must always be flagged as Hermitian. See typecasting matrices.\n\nIn (2) Q is a Matrix object, in which case return\n\na real trace if the product PQ is real or if it has all positive real eigenvalues.\na complex trace if the product PQ is not real and has complex eigenvalues.\n\nMethods (3) and (4) return the trace of the product DH or HD, where D is a Diagonal matrix and H an Hermitian or Matrix object. The result is of the same type as the input matrices.\n\nFor all methods all arguments must be of the same type.\n\nMath\n\nLet P and Q be Hermitian matrices, using the properties of the trace (e.g., the cyclic property and the similarity invariance) you can use this function to fast compute the trace of several expressions. For example:\n\ntextrmtr(PQ)=textrmtr(P^12QP^12)\n\nand\n\ntextrmtr(PQP)=textrmtr(P^2Q) (see example below).\n\nSee: trace.\n\nSee also: DiagOfProd, tr1.\n\nExamples\n\nusing PosDefManifold\nP=randP(ComplexF64, 5) # generate a random complex positive definite matrix 5x5\nQ=randP(ComplexF64, 5) # generate a random complex positive definite matrix 5x5\ntr(P, Q) ≈ tr(P*Q) ? println(\" ⭐ \") : println(\" ⛔ \")\ntr(P, Q) ≈ tr(sqrt(P)*Q*sqrt(P)) ? println(\" ⭐ \") : println(\" ⛔ \")\ntr(sqr(P), Q) ≈ tr(P*Q*P) ? println(\" ⭐ \") : println(\" ⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.quadraticForm","page":"linearAlgebra.jl","title":"PosDefManifold.quadraticForm","text":"(1) quadraticForm(v::Vector{T}, P::ℍ{T}) where T<:Real\n(2) quadraticForm(v::Vector{T}, L::𝕃{T}) where T<:Real\n(3) quadraticForm(v::Vector{T}, X::𝕄{T}, forceLower::Bool=false) where T<:Real\n(4) quadraticForm(v::Vector{S}, X::Union{𝕄{S}, ℍ{S}, 𝕃{S}}) where S<:Complex\n\nalias: qf\n\n(1) Given a real vector v and a real Hermitian matrix P, compute the quadratic form\n\nv^TPv,\n\nwhere the superscript T denotes transpose. It uses only the lower triangular part of P.\n\n(2) As in (1), given a real vector v and a LowerTriangular matrix L.\n\n(3) As in (1), given a real vector v and a real generic Matrix M, if forceLower=true. If forceLower=false, the product v^TMv is evaluated instead using the whole matrix M.\n\n(4) Quadratic form v^HPv, where superscript H denotes complex conjugate and transpose, for a complex vector v and a complex Matrix, LowerTrianglar or Hermitian matrix. The whole matrix is used.\n\nMath\n\nFor v and X real and X symmetric, the quadratic form is\n\nsum_i(v_i^2x_ii)+sum_ij(2v_iv_jx_ij).\n\nFor L lower triangular is\n\nsum_i(v_i^2x_ii)+sum_ij(v_iv_jx_ij).\n\nThese formula are used in methods (1), (2) and (3).\n\nExamples\n\nusing PosDefManifold\nP=randP(5) # generate a random real positive definite matrix 5x5\nv=randn(5)\nq1=quadraticForm(v, P) # or q1=qf(v, P)\nq2=v'*P*v\nq1 ≈ q2 ? println(\" ⭐ \") : println(\" ⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.fidelity","page":"linearAlgebra.jl","title":"PosDefManifold.fidelity","text":"fidelity(P::ℍ{T}, Q::ℍ{T}) where T<:RealOrComplex\n\nGiven two positive definte Hermitian matrices P and Q, return their fidelity:\n\ntrbig(P^12QP^12big)^12\n\nThis is used in quantum physics and is related to the Wasserstein metric. See for example Bhatia, Jain and Lim (2019b)🎓.\n\nExamples\n\nusing PosDefManifold\nP=randP(5);\nQ=randP(5);\nf=fidelity(P, Q)\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#Diagonal-functions-of-matrices-1","page":"linearAlgebra.jl","title":"Diagonal functions of matrices","text":"","category":"section"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"Function Description\nfDiag, 𝑓𝔻 Elemen-wise functions of matrix diagonals\nDiagOfProd, dop Diagonal of the product of two matrices","category":"page"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"⋅","category":"page"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"fDiag\r\nDiagOfProd","category":"page"},{"location":"linearAlgebra/#PosDefManifold.fDiag","page":"linearAlgebra.jl","title":"PosDefManifold.fDiag","text":"fDiag(func::Function, X::AnyMatrix, k::Int=0)\n\nalias: 𝑓𝔻\n\nApplies function func element-wise to the elements of the k^th diagonal of real or complex Diagonal, LowerTriangular, Matrix or Hermitian matrix X and return a diagonal matrix with these elements. X must be square in all cases, but for the 𝕄=Matrix type argument, in which case it may be of dimension r⋅c, with r ≠ c.\n\nSee julia tril(M, k::Integer) function for numbering of diagonals.\n\nBt default the main diagonal is considered.\n\nIf X is Diagonal, k is set automatically to zero (main diagonal).\nIf X is LowerTriangular, k cannot be positive.\n\nNote that if X is rectangular the dimension of the result depends on the size of X and on the chosen diagonal. For example,\n\nr ≠ c and k=0 (main diagonal), the result will be of dimension min(r,c)⋅min(r,c),\nX 3⋅4 and k=-1, the result will be 2⋅2,\nX 3⋅4 and k=1, the result will be 3⋅3, etc.\n\nnote: Nota Bene\nThe function func must support the func. syntax and therefore must be able to apply element-wise to the elements of the chosen diagonal (this includes anonymous functions).\n\nIf the input matrix is complex, the function `func`\nmust be able to support complex arguments.\n\nSee also: DiagOfProd, tr.\n\nExamples\n\nusing PosDefManifold\nP=randP(5) # use P=randP(ComplexF64, 5) for generating an Hermitian matrix\n\n# diagonal matrix with the inverse of the first sub-diagonal of P\nD=fDiag(inv, P, -1)\n\n(Λ, U) = evd(P) # Λ holds the eigenvalues of P, see evd\n\n# diagonal matrix with the log of the eigenvalues\nΔ=fDiag(log, Λ)\n\n# using an anonymous function for the square of the eigenvalues\nΔ=fDiag(x->x^2, Λ)\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.DiagOfProd","page":"linearAlgebra.jl","title":"PosDefManifold.DiagOfProd","text":"DiagOfProd(P::ℍ{T}, Q::ℍ{T}) where T<:RealOrComplex\n\nalias: dop\n\nReturn the Diagonal matrix holding the diagonal of the product PQ of two Hermitian matrices P and Q. Only the diagoanl part of the product is computed.\n\nSee also: tr, fDiag.\n\nExamples\n\nusing PosDefManifold, LinearAlgebra\nP, Q=randP(5), randP(5)\nDiagOfProd(P, Q)≈Diagonal(P*Q) ? println(\"⭐ \") : println(\"⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#Unitary-functions-of-matrices-1","page":"linearAlgebra.jl","title":"Unitary functions of matrices","text":"","category":"section"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"Function Description\nmgs Modified Gram-Schmidt orthogonalization","category":"page"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"⋅","category":"page"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"mgs","category":"page"},{"location":"linearAlgebra/#PosDefManifold.mgs","page":"linearAlgebra.jl","title":"PosDefManifold.mgs","text":"mgs(X::𝕄{T}, numCol::Int=0) where T<:RealOrComplex\n\nModified (stabilized) Gram-Schmidt orthogonalization of the columns of square or tall matrix X, which can be comprised of real or complex elements. The orthogonalized X is returned by the function. X is not changed.\n\nAll columns are orthogonalized by default. If instead argument numCol is provided, then only the first numCol columns of X are orthogonalized. In this case only the firt numCol columns will be returned.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\nX=randn(10, 10);\nU=mgs(X) # result is 10⋅10\nU=mgs(X, 3) # result is 10⋅3\nU'*U ≈ I ? println(\" ⭐ \") : println(\" ⛔ \")\n# julia undertands also:\nU'U ≈ I ? println(\" ⭐ \") : println(\" ⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#Matrix-function-of-matrices-1","page":"linearAlgebra.jl","title":"Matrix function of matrices","text":"","category":"section"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"Function Description\nfVec General function for multi-threaded computation of means and sums of matrix vectors\ncongruence, cong Compute congruent transformations","category":"page"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"⋅","category":"page"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"fVec\r\ncongruence","category":"page"},{"location":"linearAlgebra/#PosDefManifold.fVec","page":"linearAlgebra.jl","title":"PosDefManifold.fVec","text":"\t(1) fVec(f::Function, 𝐏::AnyMatrixVector;\n\t<\n\tw::Vector=[],\n\t✓w=false,\n\tallocs=[])\n\t>\n\n\t(2) fVec(f::Function, g::Function, 𝐏::AnyMatrixVector;\n\t< same optional keyword arguments in (1) >)\n\nGiven a 1d array 𝐏=P_1P_k of k matrices of the 𝕄Vector type, 𝔻Vector type, 𝕃Vector type or ℍVector type and an optional non-negative real weights vector w=w_1w_k, return expression\n\n(1)hspace6ptf_i=1^k(w_iP_i),\n\nor\n\n(2)hspace6ptf_i=1^k(w_ig(P_i)),\n\nwhere f is either the mean or the sum standard julia functions and g is whatever matrix function applying to each matrix P_k, such as exp, log,sqrt`, etc, and anonymous functions.\n\nThis function is multi-threaded. It works by partitioning the k operations required by the f function in several groups, passing each group to a separate thread and combining the result of the intermediate operations. This function allows a gain in computational time only when the number of matrices (1) and/or their size (2) is high. Use mean and sum otherwise. The maximal gain is obtained when the number of matrices in 𝐏 is an exact multiple of the number of threads Julia is instructed to use. For this latter, see Threads.\n\n!!! note \"Nota Bene\"\n\n Contrarily to Julia `mean` and `sum` function (v 1.1.0) the `fVec` function\n returns a matrix of the same type of the matrices in ``𝐏``.\n\n allocs allows to pass pre-allocated memory for holding the intermediate result of each thread. Argument allocs must be a vector of as many matrices as threads and where the matrices have the same dimension as the the matrices in 𝐏 (see the example here below). Using this option is worthwhile only if the size of the matrices is very high and/or when fVec is to be called repeatedly on many vector of matrices, where the matrices have always the same size, so that one allocation works for all calls.\n\nIf ✓w=true is passed, the weights are normalized so as to sum up to 1, otherwise they are used as they are passed. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time. By default ✓w is false.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\nPset=randP(4, 1000); # generate 1000 positive definite 4x4 matrices\nmean(Pset) # arithmetic mean calling Julia function\nThreads.nthreads() # check how many threads are available\nfVec(mean, Pset) # multi-threaded arithmetic mean\n\ninv(mean(inv, Pset)) # Harmonic mean calling Julia function\ninv(fVec(mean, inv, Pset)) # multi-threaded Harmonic mean\n\nexp(mean(log, Pset)) # log Euclidean mean calling Julia function\nexp(fVec(mean, log, Pset)) # multi-threaded log Euclidean mean\n\n# notice that Julia `exp` function has changed the type of the result\n# to `Symmetric`. To obtain an `Hermitian` output use\nℍ(exp(fVec(mean, log, Pset)))\n\nw=(randn(1000)).^2\nw=w./sum(w) \t\t# generate normalized random weights\n\n# weighted arithmetic mean calling Julia function\nsum(Pset[i]*w[i] for i=1:length(w))\n# multi-threaded weighted arithmetic mean\nfVec(sum, Pset, w=w)\n\n# weighted harmonic mean calling Julia function\ninv(sum(inv(Pset[i])*w[i] for i=1:length(w)))\n# multi-threaded weighted harmonic mean\ninv(fVec(sum, inv, Pset, w=w))\n\n# pre-allocating memory\nPset=randP(100, 1000); # generate 1000 positive definite 100x100 matrices\nQset=MatrixVector(repeat([similar(Pset[1])], Threads.nthreads()))\nfVec(mean, log, Pset, allocs=Qset)\n\n# How much computing time we save ?\n# (example min time obtained with 4 threads & 4 BLAS threads)\nusing BenchmarkTools\n# standard Julia function\n@benchmark(mean(log, Pset)) \t\t\t\t\t# (5.271 s)\n# fVec\n@benchmark(fVec(mean, log, Pset))\t\t\t\t# (1.540 s)\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.congruence","page":"linearAlgebra.jl","title":"PosDefManifold.congruence","text":"(1) congruence(B::AnyMatrix, P::AnyMatrix, matrixType)\n(2) congruence(B::AnyMatrix, 𝐏::AnyMatrixVector, matrixVectorType)\n(3) congruence(B::AnyMatrix, 𝑷::AnyMatrixVector₂, matrixVector₂Type)\n(4) congruence(𝐁::AnyMatrixVector, 𝑷::AnyMatrixVector₂, matrixVector₂Type)\n\nalias: cong\n\n(1) Return the congruent transformation\n\nBPB^H,\n\nfor B and P any combination of Hermitian, LowerTriangular, Diagonal or general Matrix type.\n\nThe result is of the matrixType argument, which must be provided and must be one of these four abstract type (not an instance of them). See aliases for shortening these type using symbols ℍ, 𝔻, 𝕃 and 𝕄.\n\n(2) Return a vector of matrices holding the congruent transformations\n\nBP_kB^H,\n\nfor all k matrices in 𝐏=P_1P_k, for B and 𝐏 any combination of matrix type Hermitian, LowerTriangular, Diagonal or Matrix (B) and vector of matrices type ℍVector, 𝔻Vector, 𝕃Vector and 𝕄Vector (𝐏). See Array of Matrices types.\n\nThe result is a vector of matrices of the matrixVectorType argument, which must be provided and must be one of the following abstract types: ℍVector, 𝔻Vector, 𝕃Vector or 𝕄Vector (and not an instance of these types).\n\n(3) Return a vector of vector of matrices holding the congruent transformations\n\nBP_mkB^H,\n\nfor all m vectors of km vectors of matrices in 𝑷, for B and 𝑷 any combination of matrix type Hermitian, LowerTriangular, Diagonal or Matrix (B) and vector of matrices type ℍVector₂, 𝔻Vector₂, 𝕃Vector₂ and 𝕄Vector₂ (𝑷). See Array of Matrices types.\n\nThe result is a vector of vector of matrices of the matrixVector₂Type argument, which must be provided and must be one of the following abstract types: ℍVector₂, 𝔻Vector₂, 𝕃Vector₂ or 𝕄Vector₂ (and not an instance of these types).\n\n(4) Return a vector of vector of matrices holding the congruent transformations\n\nB_iP_ijB_j^H, for ij1m.\n\nfor 𝐁 holding m matrices and 𝑷 holding m vectors holding m matrices each. Note that, differently from method (3), here the vectors of 𝑷 are all of the same length and this is eaxctly the length of 𝐁. 𝐁 and 𝑷 may be any combination of matrix vector type ℍVector, 𝔻Vector, 𝕃Vector and 𝕄Vector (𝐁) and vector of matrices type ℍVector₂, 𝔻Vector₂, 𝕃Vector₂ and 𝕄Vector₂ (𝑷). See Array of Matrices types.\n\nNote that this function computes the following algebraic expression:\n\nbeginpmatrix B_1 hspace001cm 0 hspace001cm ddots hspace001cm 0 hspace001cm B_m endpmatrix beginpmatrix C_11 cdots C_1m vdots ddots vdots C_m1 cdots C_mm endpmatrix beginpmatrixB_1^T hspace001cm 0 hspace001cm ddots hspace001cm 0 hspace001cm B_m^Tendpmatrix .\n\nThe result is a vector of vector of matrices of the matrixVector₂Type argument, which must be provided and must be one of the following abstract types: ℍVector₂, 𝔻Vector₂, 𝕃Vector₂ or 𝕄Vector₂ (and not an instance of these types).\n\nWhen you pass it to this function, make sure to typecast 𝐁 as an ℍVector, 𝔻Vector, 𝕃Vector or 𝕄Vector type if it is not already created as one of these types. See the example here below and typecasting matrices.\n\nMethod (2), (3) and (4) are multi-threaded. See Threads.\n\nnote: Nota Bene\nTypes ℍ, 𝔻, 𝕃 or 𝕄 are actually constructors, thus they may modify the result of the congruence(s). This greatly expand the possibilities of this function, but it is your responsibility to pick the right argument matrixType in (1), matrixVectorType in (2) and\n\n`matrixVector₂Type` in (3)-(4).\nFor example, in (1) if ``B`` and ``P`` are `Hermitian`,\ncalling `cong(B, P, 𝔻)` will actually\nreturn the diagonal part of ``B*P*B'`` and calling `cong(B, P, 𝕃)` will\nactually return its lower triangular part. The full congruence can\nbe obtained as an `Hermitian` matrix by `cong(B, P, ℍ)` and as a generic\nmatrix object by `cong(B, P, 𝕄)`.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\n\n# (1)\nP=randP(3) # generate a 3x3 positive matrix\nM=randn(3, 3)\nC=cong(M, P, ℍ) # equivalent to C=ℍ(M*P*M')\n\n# (2)\nPset=randP(4, 100); # generate 100 positive definite 4x4 matrices\nM=randn(4, 4)\nQset=cong(M, Pset, ℍVector) # = [M*Pset_1*M',...,M*Pset_k*M'] as an ℍVector type\n\n# recenter the matrices in Pset to their Fisher mean:\nQset=cong(invsqrt(mean(Fisher, Pset)), Pset, ℍVector)\n\n# as a check, the Fisher mean of Qset is now the identity\nmean(Fisher, Qset)≈I ? println(\"⭐\") : println(\"⛔\")\n\n# (3)\nPset1=randP(4, 10); # generate 10 positive definite 4x4 matrices\nPset2=randP(4, 8);\nPset=ℍVector₂([Pset1, Pset2]);\nM=randn(4, 4)\nQset=cong(M, Pset, MatrixVector₂)\nQset[1][1]≈M*Pset[1][1]*M' ? println(\"⭐\") : println(\"⛔\")\nQset[1][5]≈M*Pset[1][5]*M' ? println(\"⭐\") : println(\"⛔\")\nQset[2][1]≈M*Pset[2][1]*M' ? println(\"⭐\") : println(\"⛔\")\nQset[2][4]≈M*Pset[2][4]*M' ? println(\"⭐\") : println(\"⛔\")\n\n\n# (4)\nPset1=randP(4, 2); # generate 2 positive definite 4x4 matrices\nPset2=randP(4, 2);\nPset=ℍVector₂([Pset1, Pset2]);\nU=𝕄Vector([randU(4), randU(4)])\nQset=cong(U, Pset, MatrixVector₂)\nQset[1][1]≈U[1]*Pset[1][1]*U[1]' ? println(\"⭐\") : println(\"⛔\")\nQset[1][2]≈U[1]*Pset[1][2]*U[2]' ? println(\"⭐\") : println(\"⛔\")\nQset[2][1]≈U[2]*Pset[2][1]*U[1]' ? println(\"⭐\") : println(\"⛔\")\nQset[2][2]≈U[2]*Pset[2][2]*U[2]' ? println(\"⭐\") : println(\"⛔\")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#Spectral-decompositions-of-positive-matrices-1","page":"linearAlgebra.jl","title":"Spectral decompositions of positive matrices","text":"","category":"section"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"Function Description\nevd Eigenvalue-Eigenvector decomposition of a matrix in UΛU=P form\nfrf Full-rank factorization of an Hermitian matrix\ninvfrf Inverse of the full-rank factorization of an Hermitian matrix (whitening)\nspectralFunctions Mother function for creating spectral functions of eigenvalues\npow Power of a positive matrix for any number of exponents in one pass\ninvsqrt Principal square root inverse (whitening) of a positive matrix\nsqr Square of a positive matrix\npowerIterations, powIter Power method for estimating any number of eigenvectors and associated eigenvalues","category":"page"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"⋅","category":"page"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"evd\r\nfrf\r\ninvfrf\r\nspectralFunctions\r\npow\r\ninvsqrt\r\nsqr\r\npowerIterations","category":"page"},{"location":"linearAlgebra/#PosDefManifold.evd","page":"linearAlgebra.jl","title":"PosDefManifold.evd","text":"evd(S::Union{𝕄{T}, ℍ{T}}) where T<:RealOrComplex\n\nGiven a positive semi-definite matrix S, returns a 2-tuple (Λ U), where U is the matrix holding in columns the eigenvectors and Λ is the matrix holding the eigenvalues on the diagonal. This is the output of Julia eigen function in UΛU=S form.\n\nAs for the eigen function, the eigenvalues and associated eigenvectors are sorted by increasing values of eigenvalues.\n\nS may be real or complex and may be flagged by Julia as Hermitian (in this case PosDefManifold assumes it is positive definite).\n\nSee typecasting matrices.\n\nSee also: spectralFunctions.\n\nExamples\n\nusing PosDefManifold\nA=randn(3, 3);\nS=A+A';\nΛ, U=evd(S); # which is equivalent to (Λ, U)=evd(P)\n(U*Λ*U') ≈ S ? println(\" ⭐ \") : println(\" ⛔ \")\n# => UΛU'=S, UΛ=SU, ΛU'=U'S\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.frf","page":"linearAlgebra.jl","title":"PosDefManifold.frf","text":"frf(P::ℍ{T}) where T<:RealOrComplex\n\nFull-rank factorization of Hermitian matrix P. It is given by\n\nF=UD^12,\n\nwhere\n\ntextrmEVD(P)=UDU^H\n\nis the eigenvalue-eigenvector decomposition of P. It verifies\n\nFF^H=P,\n\nthus F^-1 is a whitening matrix.\n\nSee also: invfrf.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\nP=randP(3)\nF = frf(P)\nF*F'≈P ? println(\" ⭐ \") : println(\" ⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.invfrf","page":"linearAlgebra.jl","title":"PosDefManifold.invfrf","text":"invfrf(P::ℍ{T}) where T<:RealOrComplex\n\nInverse of the full-rank factorization of Hermitian matrix P. It is given by\n\nF=D^-12U^H,\n\nwhere\n\ntextrmEVD(P)=UDU^H\n\nis the eigenvalue-eigenvector decomposition of P. It verifies\n\nFPF^H=I,\n\nthus F is a whitening matrix.\n\nSee also: frf.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\nP=randP(3)\nF = invfrf(P)\nF*P*F'≈I ? println(\" ⭐ \") : println(\" ⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.spectralFunctions","page":"linearAlgebra.jl","title":"PosDefManifold.spectralFunctions","text":"(1) spectralFunctions(P::ℍ{T}, func) where T<:RealOrComplex\n(2) spectralFunctions(D::𝔻{S}, func) where S<:Real\n\n(1) This is the mother function for all spectral functions of eigenvalues implemented in this library, which are:\n\npow (power),\nisqrt (inverse square root).\n\nThe function sqr (square) does not use it, as it can be obtained more efficiently by simple multiplication.\n\nYou can use this function if you need another spectral function of eigenvalues besides those and those already implemented in the standard package LinearAlgebra. In general, you won't call it directly.\n\nfunc is the function that will be applied on the eigenvalues.\n\nP must be flagged as Hermitian. See typecasting matrices. It must be a positive definite or positive semi-definite matrix, depending on func.\n\nA special method is provided for real Diagonal matrices (2).\n\nnote: Nota Bene\nThe function func must support the func. syntax and therefore must be able to apply element-wise to the eigenvalues (those include anonymous functions).\n\nMaths\n\nThe definition of spectral functions for a positive definite matrix P is at it follows:\n\nfbig(Pbig)=Ufbig(Λbig)U^H\n\nwhere U is the matrix holding in columns the eigenvectors of P, Λ is the matrix holding on diagonal its eigenvalues and f is a function applying element-wise to the eigenvalues.\n\nSee also: evd.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\nn=5\nP=randP(n) # P=randP(ComplexF64, 5) to generate an Hermitian complex matrix\nnoise=0.1;\nQ=spectralFunctions(P, x->x+noise) # add white noise to the eigenvalues\ntr(Q)-tr(P) ≈ noise*n ? println(\" ⭐ \") : println(\" ⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.pow","page":"linearAlgebra.jl","title":"PosDefManifold.pow","text":"(1) pow(P::ℍ{T}, args...) where T<:RealOrComplex\n(2) pow(D::𝔻{S}, args...) where S<:Real\n\n(1) Given a positive semi-definite Hermitian matrix P, return the power P^r_1 P^r_2 for any number of exponents r_1 r_2. It returns a tuple comprising as many elements as arguments passed after P.\n\nP must be flagged as Hermitian. See typecasting matrices.\n\narg1 arg2 are real numbers.\n\nA special method is provided for real Diagonal matrices (2).\n\nSee also: invsqrt.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\nP=randP(5); # use P=randP(ComplexF64, 5) for generating an Hermitian matrix\nQ=pow(P, 0.5); # => QQ=P\nQ, W=pow(P, 0.5, -0.5);\nW*P*W ≈ I ? println(\" ⭐ \") : println(\" ⛔ \")\nQ*Q ≈ P ? println(\" ⭐ \") : println(\" ⛔ \")\nR, S=pow(P, 0.3, 0.7);\nR*S ≈ P ? println(\" ⭐ \") : println(\" ⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.invsqrt","page":"linearAlgebra.jl","title":"PosDefManifold.invsqrt","text":"(1) invsqrt(P{T}::ℍ) where T<:RealOrComplex\n(2) invsqrt(D{S}::𝔻) where S<:Real\n\nGiven a positive definite Hermitian matrix P, compute the inverse of the principal square root P^-12.\n\nP must be flagged as Hermitian. See typecasting matrices.\n\nA special method is provided for real Diagonal matrices (2).\n\nMaths\n\nThe principal square root of a positive definite matrix P is the only symmetric (if P is real) or Hermitian (if P is complex) square root. Its inverse P^-12 is also named the whitening or sphering matrix sinceP^-12PP^-12=I.\n\nSee: typecasting matrices.\n\nSee also: pow.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\nP=randP(ComplexF64, 5);\nQ=invsqrt(P);\nQ*P*Q ≈ I ? println(\" ⭐ \") : println(\" ⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.sqr","page":"linearAlgebra.jl","title":"PosDefManifold.sqr","text":"(1) sqr(P::ℍ{T}) where T<:RealOrComplex\n(2) sqr(X::Union{𝕄{T}, 𝕃{T}, 𝔻{S}}) where T<:RealOrComplex where S<:Real\n\n(1) Given a positive semi-definite Hermitian matrix P, compute its square P^2.\n\nP must be flagged as Hermitian. See typecasting matrices.\n\nA method is provided also for generic matrices of the Matrix type, LowerTriangular matrices and real Diagonal matrices (2). The output is of the same type as the input.\n\nSee also: pow.\n\nExamples\n\nusing PosDefManifold\nP=randP(5);\nP²=sqr(P); # => P²=PP\nsqrt(P²)≈ P ? println(\" ⭐ \") : println(\" ⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.powerIterations","page":"linearAlgebra.jl","title":"PosDefManifold.powerIterations","text":"powerIterations(H::Union{ℍ{T}, 𝕄{T}}, q::Int;\n<\nevalues=false,\ntol::Real=0,\nmaxiter::Int=300,\nverbose=false>) where T<:RealOrComplex\n\npowerIterations(L::𝕃{S}, q::Int;\n< same optional keyword arguments in (1)>) where S<:Real\n\nalias: powIter\n\n(1) Compute the q eigenvectors associated to the q largest (real) eigenvalues of real or complex Hermitian or Matrix H using the power iterations + Gram-Schmidt orthogonalization as suggested by Strang. The eigenvectors are returned with the same type as the elements of H.\n\nH must have real eigenvalues, that is, it must be a symmetric matrix if it is real or an Hermitian matrix if it is complex.\n\n(2) as in (1), but using only the LowerTriangular view L of a matrix. This option is available only for real matrices (see below).\n\nThe following are :\n\n`tol is the tolerance for the convergence of the power method (see below),\n`maxiter is the maximum number of iterations allowed for the power method,\nif `verbose=true, the convergence of all iterations will be printed,\nif evalues=true, return the 4-tuple(Λ, U, iterations, covergence)`,\nif evalues=false return the 3-tuple(U, iterations, covergence)`.\n\nnote: Nota Bene\nDifferently from the evd function, the eigenvectors and eigenvalues are sorted by decreasing order of eigenvalues.If H is Hermitian and real, only its lower triangular part is used for computing the power iterations, like in (2). In this case the BLAS.symm routine is used. Otherwise the BLAS.gemm routine is used. See Threads.tol defaults to 100 times the square root of Base.eps of the type of H. This corresponds to requiring the relative convergence criterion over two successive iterations to vanish for about half the significant digits minus 2.\n\nSee also: mgs.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\n# Generate an Hermitian (complex) matrix\nH=randP(ComplexF64, 10);\n# 3 eigenvectors and eigenvalues\nU, iterations, convergence=powIter(H, 3, verbose=true)\n# all eigenvectors\nΛ, U, iterations, convergence=powIter(H, size(H, 2), evalues=true, verbose=true);\nU'*U ≈ I && U*Λ*U'≈H ? println(\" ⭐ \") : println(\" ⛔ \")\n\n# passing a `Matrix` object\nΛ, U, iterations, convergence=powIter(Matrix(H), 3, evalues=true)\n\n# passing a `LowerTriangular` object (must be a real matrix in this case)\nL=𝕃(randP(10))\nΛ, U, iterations, convergence=powIter(L, 3, evalues=true)\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#Decompositions-involving-triangular-matrices-1","page":"linearAlgebra.jl","title":"Decompositions involving triangular matrices","text":"","category":"section"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"Function Description\nchoL Lower triangular factor of Cholesky decomposition\nchoInv Lower triangular factor of Cholesky decomposition and its inverse in one pass\nchoInv! as choInv, but destroying the input matrix","category":"page"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"⋅","category":"page"},{"location":"linearAlgebra/#","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"choL\r\nchoInv\r\nchoInv!","category":"page"},{"location":"linearAlgebra/#PosDefManifold.choL","page":"linearAlgebra.jl","title":"PosDefManifold.choL","text":"(1) choL(P::ℍ{T}) where T<:RealOrComplex\n(2) choL(D::𝔻{S}) where S<:Real\n\n(1) Given a real or complex positive definite Hermitian matrix P, return the Cholesky lower triangular factor L such that LL^H=P. To obtain L^H or both L and L^H, use instead julia function cholesky.\n\nOn output, L is of type LowerTriangular.\n\n(2) For a real Diagonal matrix D, return D^12.\n\nSee also: choInv.\n\nExamples\n\nusing PosDefManifold\nP=randP(5);\nL=choL(P);\nL*L'≈ P ? println(\" ⭐ \") : println(\" ⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.choInv","page":"linearAlgebra.jl","title":"PosDefManifold.choInv","text":"choInv(P::AbstractArray{T};\n\t kind::Symbol = :LLt, tol::Real = √eps(T)) where T<:RealOrComplex\n\nFor a real or complex positive definite P, let P=LL^H its Cholesky decomposition and P=L_1DL_1^H the related LDLt decomposition. In the above L is a lower triangular matrix, D a positive-definite diagonal matrix and L_1 a unit lower triangular matrix. Return:\n\nif kindis LLt (default), the 2-tuple L, L^-H\nif kindis LDLt, the 3-tuple L_1, D, L_1^-H.\n\nThose are obtained in one pass and this is faster then calling Julia's chelosky function and inverting the lower factor.\n\nInput matrix P may be of type Matrix or Hermitian. Since only the lower triangle is used, P may also be a LowerTriangular view. If P is real it can also be Symmetric.\n\nThe algorithm is a multiplicative Gaussian elimination. If run completely in input matrux P there will be the Identity at the end. Only the lower part of P is required.\n\nNotes: Output L^-H is an inverse square root (whitening matrix) of P, since L^-1PL^-H=I. It therefore yields toinvert P as P^-1L^-HL^-1. It is the fastes whitening matrix to be computed, however it yields poor numerical precision, especially for large matrices.\n\nThe following relations holds: L=PL^-H, L^H=L^-1P, L^-H=P^-1L, L^-1=L^HP^-1.\n\nWe also have L^HL=L^-1P^2L^-H=L^HPL^-H=L^-1PL=UPU^H, with U orthogonal (see below) and L^-1L^-H=L^HC^-2L=L^HC^-1L^-H=L^-HC^-1L=UC^-1U^H.\n\nLL^{H} and L^{H}L are unitarily similar, that is, ULL^{H}U^H=L^{H}L, where U=L^-1P^12, with P^12=H the principal (unique symmetric) square root of P. This is seen writing PP^-1=HHL^-HL^-1; multiplying both sides on the left by L^{-1} and on the right by L we obtain L^-1CC^-1L=L^-1HHL^-H=I=(L^-1H)(L^-1H)^H and since L^-1H is square it must be unitary.\n\nFrom these expressions we have H=LU=U^HL^H L=HU^H H^-1=U^HL^-1 L^-1=UHL^-1. U is the polar factor of L^H, i.e., L^H=UH, since LL^H=HU^HUH^H=H^2=P.\n\nFrom L^HL=UCU^H we have L^HLU=UC=ULL^H and from U=L^-1H we have L=HU^H.\n\nExamples\n\nusing PosDefManifold\nP=randP(5);\nL=choL(P);\nL*L'≈ P ? println(\" ⭐ \") : println(\" ⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#riemannianGeometry.jl-1","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"","category":"section"},{"location":"riemannianGeometry/#","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"This is the fundamental unit of PosDefManifold. It contains functions for manipulating points in the Riemannian manifold of Symmetric Positive Definite (SPD) or Hermitian Positive Definite (HPD) matrices. In Julia those are Hermitian matrices, see typecasting matrices.","category":"page"},{"location":"riemannianGeometry/#","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"The functions are divided in six categories:","category":"page"},{"location":"riemannianGeometry/#","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"Category Output\n1. Geodesic equations interpolation, extrapolation, weighted mean of two matrices, ...\n2. Distances length of geodesics\n3. Graphs and Laplacians inter-distance matrices, spectral embedding, eigenmaps, ...\n4. Means mid-points of geodesics, Fréchet means of several points, midrange,...\n5. Tangent Space operations maps from the manifold to the tangent space and viceversa, parallel transport,...\n6. Procrustes problems data matching, transfer learning (domain adaptation), ...","category":"page"},{"location":"riemannianGeometry/#","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"⋅","category":"page"},{"location":"riemannianGeometry/#Geodesic-equations-1","page":"riemannianGeometry.jl","title":"Geodesic equations","text":"","category":"section"},{"location":"riemannianGeometry/#","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"Function Description\ngeodesic Geodesic equations (weighted mean of two positive definite matrices) for any metric","category":"page"},{"location":"riemannianGeometry/#","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"⋅","category":"page"},{"location":"riemannianGeometry/#","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"geodesic","category":"page"},{"location":"riemannianGeometry/#PosDefManifold.geodesic","page":"riemannianGeometry.jl","title":"PosDefManifold.geodesic","text":"(1) geodesic(metric::Metric, P::ℍ{T}, Q::ℍ{T}, a::Real) where T<:RealOrComplex\n(2) geodesic(metric::Metric, D::𝔻{S}, E::𝔻{S}, a::Real) where S<:Real\n\n(1) Move along the geodesic from point P to point Q (two positive definite matrices) with arclegth 0=a=1, using the specified metric, of type Metric::Enumerated type.\n\nFor all metrics,\n\nwith a=0 we stay at P,\nwith a=1 we move up to Q,\nwith a=12 we move to the mid-point of P and Q (mean).\n\nUsing the Fisher metric, argument a can be any real number, for instance:\n\nwith 0a1 we move toward Q (attraction),\nwith a1 we move over and beyond Q (extrapolation),\nwith a0 we move back away from Q (repulsion).\n\nP and Q must be flagged by julia as Hermitian. See typecasting matrices.\n\nThe Fisher geodesic move is computed by the Cholesky-Schur algorithm given in Eq. 4.2 by Iannazzo(2016)🎓. If Q=I, the Fisher geodesic move is simply P^a (no need to call this funtion).\n\nnote: Nota Bene\nFor the logdet zero and Jeffrey metric no closed form expression for the geodesic is available to the best of authors' knowledge, so in this case the geodesic is found as the weighted mean using the mean function. For the Von Neumann not even an expression for the mean is available, so in this case the geodesic is not provided and a warning is printed.\n\n(2) Like in (1), but for two real positive definite diagonal matrices D and E.\n\nMaths\n\nFor points P, Q and arclength a, letting b=1-a, the geodesic equations for the supported metrics are:\n\nMetric geodesic equation\nEuclidean bP + aQ\ninvEuclidean big(bP^-1 + aQ^-1big)^-1\nChoEuclidean TT^*, where T=bL_P + aL_Q\nlogEuclidean textexpbig(bhspace2pttextlog(P) + ahspace2pttextlog(Q)big)\nlogCholesky TT^*, where T=S_P+a(S_Q-S_P)+D_Phspace2pttextexpbig(a(textlogD_Q-textlogD_P)big)\nFisher P^12 big(P^-12 Q P^-12big)^a P^12\nlogdet0 uses weighted mean algorithm logdet0Mean\nJeffrey uses weighted mean mean\nVonNeumann N.A.\nWasserstein b^2P+a^2Q +abbig(PQ)^12 +(QP)^12big\n\nlegend: L_X, S_X and D_X are the Cholesky lower triangle of X, its strictly lower triangular part and diagonal part, respectively (hence, S_X+D_X=L_X, L_XL_X^*=X).\n\nSee also: mean.\n\nExamples\n\nusing PosDefManifold\nP=randP(10)\nQ=randP(10)\n# Wasserstein mean\nM=geodesic(Wasserstein, P, Q, 0.5)\n# extrapolate suing the Fisher metric\nE=geodesic(Fisher, P, Q, 2)\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#Distances-1","page":"riemannianGeometry.jl","title":"Distances","text":"","category":"section"},{"location":"riemannianGeometry/#","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"Function Description\ndistanceSqr, distance² Squared distance between positive definite matrices\ndistance Distance between positive definite matrices","category":"page"},{"location":"riemannianGeometry/#","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"⋅","category":"page"},{"location":"riemannianGeometry/#","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"distanceSqr\r\ndistance","category":"page"},{"location":"riemannianGeometry/#PosDefManifold.distanceSqr","page":"riemannianGeometry.jl","title":"PosDefManifold.distanceSqr","text":"(1) distanceSqr(metric::Metric, P::ℍ{T}) where T<:RealOrComplex\n(2) distanceSqr(metric::Metric, P::ℍ{T}, Q::ℍ{T}) where T<:RealOrComplex\n(3) distanceSqr(metric::Metric, D::𝔻{S}) where S<:Real\n(4) distanceSqr(metric::Metric, D::𝔻{S}, E::𝔻{S}) where S<:Real\n\nalias: distance²\n\n(1) Return δ^2(P I), the square of the distance (or divergence) of positive definite matrix P from the the identity matrix. See distance from the origin.\n\n(2) Return δ^2(P Q), the square of the distance (or divergence) between two positive definite matrices P and Q. See distance.\n\nIn both cases the distance function δ is induced by the argument metric of type Metric::Enumerated type.\n\nP in (1) and P, Q in (2) must be flagged by julia as Hermitian. See typecasting matrices.\n\n(3) and (4) are specialized methods of (1) and (2), respectively, for real positive definite Diagonal matrices. See ℍVector type and 𝔻Vector type.\n\nMaths\n\nFor point P the squared distances from the identity for the supported metrics are:\n\nMetric Squared Distance from the identity\nEuclidean P-I^2\ninvEuclidean P^-1-I^2\nChoEuclidean L_P-I^2\nlogEuclidean textrmlogP^2\nlogCholesky S_P^2+textrmlogD_P^2\nFisher textrmlogP^2\nlogdet0 textrmlogdetfrac12(P+I) - frac12textrmlogdet(P)\nJeffrey frac12textrmtr(P+P^-1)-n\nVonNeumann frac12textrmtr(PtextrmlogP-textrmlogP)\nWasserstein textrmtr(P+I) -2textrmtr(P^12)\n\nFor points P and Q their squared distances for the supported metrics are:\n\nMetric Squared Distance\nEuclidean P-Q^2\ninvEuclidean P^-1-Q^-1^2\nChoEuclidean L_P - L_Q ^2\nlogEuclidean textrmlogP-textrmlogQ^2\nlogCholesky S_P-S_Q^2+textrmlogD_P-textrmlogD_Q^2\nFisher textrmlog(P^-12QP^-12)^2\nlogdet0 textrmlogdetfrac12(P+Q) - frac12textrmlogdet(PQ)\nJeffrey frac12textrmtr(Q^-1P+P^-1Q)-n\nVonNeumann frac12textrmtr(PtextrmlogP-PtextrmlogQ+QtextrmlogQ-QtextrmlogP)\nWasserstein textrmtr(P+Q) -2textrmtr(P^12QP^12)^12\n\nlegend: L_X, S_X and D_X are the Cholesky lower triangle of X, its strictly lower triangular part and diagonal part, respectively (hence, S_X+D_X=L_X, L_XL_X^*=X).\n\nSee also: distanceSqrMat.\n\nExamples (1)\n\nusing PosDefManifold\nP=randP(10)\nd=distanceSqr(Wasserstein, P)\ne=distanceSqr(Fisher, P)\nmetric=Metric(Int(logdet0)) # or metric=logdet0\ns=string(metric) # check what is the current metric\nf=distance²(metric, P) #using the alias distance²\n\nExamples (2)\n\nusing PosDefManifold\nP=randP(10)\nQ=randP(10)\nd=distanceSqr(logEuclidean, P, Q)\ne=distance²(Jeffrey, P, Q)\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.distance","page":"riemannianGeometry.jl","title":"PosDefManifold.distance","text":"(1) distance(metric::Metric, P::ℍ{T}) where T<:RealOrComplex\n(2) distance(metric::Metric, P::ℍ{T}, Q::ℍ{T}) where T<:RealOrComplex\n(3) distance(metric::Metric, D::𝔻{S}) where S<:Real\n(4) distance(metric::Metric, D::𝔻{S}, E::𝔻{S}) where S<:Real\n\n(1) Return δ(P I), the distance between positive definite matrix P and the identity matrix.\n\n(2) Return δ(P Q), the distance between positive definite matrices P and Q.\n\n(3) and (4) are specialized methods of (1) and (2), respectively, for real positive definite Diagonal matrices.\n\nThis is the square root of distanceSqr and is invoked with the same syntax therein.\n\nSee also: distanceMat.\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#Graphs-and-Laplacians-1","page":"riemannianGeometry.jl","title":"Graphs and Laplacians","text":"","category":"section"},{"location":"riemannianGeometry/#","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"Function Description\ndistanceSqrMat, distance²Mat Lower triangular matrix of all squared inter-distances\ndistanceMat Lower triangular matrix of all inter-distances\nlaplacian Laplacian of a squared inter-distances matrix\nlaplacianEigenMaps, laplacianEM Eigen maps (eigenvectors) of a Laplacian\nspectralEmbedding, spEmb Spectral Embedding (the above functions run in series)","category":"page"},{"location":"riemannianGeometry/#","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"⋅","category":"page"},{"location":"riemannianGeometry/#","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"distanceSqrMat\r\ndistanceMat\r\nlaplacian\r\nlaplacianEigenMaps\r\nspectralEmbedding","category":"page"},{"location":"riemannianGeometry/#PosDefManifold.distanceSqrMat","page":"riemannianGeometry.jl","title":"PosDefManifold.distanceSqrMat","text":" (1) distanceSqrMat(metric::Metric, 𝐏::ℍVector;\n <⏩=true>)\n\n (2) distanceSqrMat(type::Type{T}, metric::Metric, 𝐏::ℍVector;\n <⏩=true>) where T<:AbstractFloat\n\nalias: distance²Mat\n\nGiven a 1d array 𝐏 of k positive definite matrices P_1P_k of ℍVector type, create the kk real LowerTriangular matrix comprising elements δ^2(P_i P_j)textrm for all i=j.\n\nThis is the lower triangular matrix holding all squared inter-distances (zero on diagonal), using the specified metric, of type Metric::Enumerated type, giving rise to distance function δ. See distanceSqr.\n\nOnly the lower triangular part is computed in order to optimize memory use.\n\nBy default, the result matrix is of type Float32. The type can be changed to another real type using method (2).\n\n:\n\nif ⏩=true (default) the computation of inter-distances is multi-threaded.\n\nnote: Nota Bene\nMulti-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.\n\nSee: distance.\n\nSee also: laplacian, laplacianEigenMaps, spectralEmbedding.\n\nExamples\n\nusing PosDefManifold\n# Generate a set of 8 random 10x10 SPD matrices\nPset=randP(10, 8) # or, using unicode: 𝐏=randP(10, 8)\n# Compute the squared inter-distance matrix according to the log Euclidean metric.\n# This is much faster as compared to the Fisher metric and in general\n# it is a good approximation.\nΔ²=distanceSqrMat(logEuclidean, Pset)\n\n# return a matrix of type Float64\nΔ²64=distanceSqrMat(Float64, logEuclidean, Pset)\n\n# Get the full matrix of inter-distances\nfullΔ²=Hermitian(Δ², :L)\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.distanceMat","page":"riemannianGeometry.jl","title":"PosDefManifold.distanceMat","text":" (1) distanceMat(metric::Metric, 𝐏::ℍVector;\n <⏩=true>)\n\n (2) distanceMat(type::Type{T}, metric::Metric, 𝐏::ℍVector;\n <⏩=true>) where T<:AbstractFloat\n\nGiven a 1d array 𝐏 of k positive definite matrices P_1P_k of ℍVector type, create the kk real LowerTriangular matrix comprising elements δ(P_i P_j)textrm for all i=j.\n\nThis is the lower triangular matrix holding all inter-distances (zero on diagonal), using the specified metric, of type Metric::Enumerated type, giving rise to distance δ. See distance.\n\nOnly the lower triangular part is computed in order to optimize memory use.\n\nBy default, the result matrix is of type Float32. The type can be changed to another real type using method (2).\n\nThe elements of this matrix are the square root of distanceSqrMat.\n\n:\n\nif ⏩=true the computation of inter-distances is multi-threaded.\n\nnote: Nota Bene\nMulti-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.\n\nSee: distance.\n\nExamples\n\nusing PosDefManifold\n# Generate a set of 4 random 10x10 SPD matrices\nPset=randP(10, 4) # or, using unicode: 𝐏=randP(10, 4)\nΔ=distanceMat(Fisher, Pset)\n\n# return a matrix of type Float64\nΔ64=distanceMat(Float64, Fisher, Pset)\n\n# Get the full matrix of inter-distances\nfullΔ=Hermitian(Δ, :L)\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.laplacian","page":"riemannianGeometry.jl","title":"PosDefManifold.laplacian","text":"laplacian(Δ²::𝕃{S}, epsilon::Real=0;\n ) where S<:Real\n\nGiven a LowerTriangular matrix of squared inter-distances Δ^2, return the lower triangular part of the so-called normalized Laplacian or density-invariant normalized Laplacian, which in both cases is a symmetric Laplacian. The elements of the Laplacian are of the same type as the elements of Δ^2. The result is a LowerTriangular matrix.\n\nThe definition of Laplacian given by Lafon (2004)🎓 is implemented:\n\nFirst, a Gaussian radial basis functions, known as Gaussian kernel or heat kernel, is applied to all elements of Δ^2, such as\n\nW_ij = expbigg(fracdisplaystyle-Δ^2_ijdisplaystyle2εbigg),\n\nwhere ε is the bandwidth of the kernel.\n\nIf densityInvariant=true is used, then the density-invariant transformation is applied\n\nW - E^-1WE^-1\n\nwhere E is the diagonal matrix holding on the main diagonal the sum of the rows (or columns) of W.\n\nFinally, the normalized Laplacian (density-invariant or not) is defined as\n\nΩ = D^-12WD^-12,\n\nwhere D is the diagonal matrix holding on the main diagonal the sum of the rows (or columns) of W.\n\nIf you do not provide argument epsilon, the bandwidth ε is set to the median of the elements of squared distance matrix Δ^2_ij. Another educated guess is the dimension of the original data, that is, the data that has been used to compute the squared distance matrix. For positive definite matrices this is n(n-1)2, where n is the dimension of the matrices. Still another is the dimension of the ensuing spectralEmbedding space. Keep in mind that by tuning the epsilon parameter (which must be positive) you can control both the rate of compression of the embedding space and the spread of points in the embedding space. See Coifman et al. (2008)🎓 for a discussion on ε.\n\nnote: Nota Bene\nThe Laplacian as here defined can be requested for any input matrix of squared inter-distances, for example, those obtained on scalars or on vectors using appropriate metrics. In any case, only the lower triangular part of the Laplacian is taken as input. See typecasting matrices.\n\nSee also: distanceSqrMat, laplacianEigenMaps, spectralEmbedding.\n\nExamples\n\nusing PosDefManifold\n# Generate a set of 4 random 10x10 SPD matrices\nPset=randP(10, 4) # or, using unicode: 𝐏=randP(10, 4)\nΔ²=distanceSqrMat(Fisher, Pset)\nΩ=laplacian(Δ²)\n\n# density-invariant Laplacian\nΩ=laplacian(Δ²; densityInvariant=true)\n\n# increase the bandwidth\nr=size(Δ², 1)\nmyεFactor=0.1\nmed=Statistics.median([Δ²[i, j] for j=1:r-1 for i=j+1:r])\nε=2*myεFactor*med\nΩ=laplacian(Δ², ε; densityInvariant=true)\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.laplacianEigenMaps","page":"riemannianGeometry.jl","title":"PosDefManifold.laplacianEigenMaps","text":" laplacianEigenMaps(Ω::𝕃{S}, q::Int;\n <\n tol::Real=0.,\n maxiter::Int=300,\n verbose=false >) where S<:Real\n\nalias: laplacianEM\n\nGiven the lower triangular part of a Laplacian Ω (see laplacian ) return the eigen maps in q dimensions, i.e., the q eigenvectors of the Laplacian associated with the largest q eigenvalues, excluding the first (which is always equal to 1.0). The eigenvectors are of the same type as Ω. They are all divided element-wise by the first eigenvector (see Lafon, 2004🎓).\n\nThe eigenvectors of the Laplacian are computed by the power iterations+modified Gram-Schmidt method (see powerIterations), allowing the execution of this function for Laplacian matrices of very large size.\n\nReturn the 4-tuple (Λ U iterations convergence), where:\n\nΛ is a qq diagonal matrix holding on diagonal the eigenvalues corresponding to the q dimensions of the Laplacian eigen maps,\nU holds in columns the q eigenvectors holding the q coordinates of the points in the embedding space,\niterations is the number of iterations executed by the power method,\nconvergence is the convergence attained by the power method.\n\nUsing the notion of Laplacian, spectral embedding seek a low-dimension representation of the data emphasizing local neighbothood information while neglecting long-distance information. The embedding is non-linear, however the embedding space is Euclidean. The eigenvectors of U holds the coordinates of the points in the embedding space (typically two- or three-dimensional for plotting or more for clustering). Spectral embedding is done for plotting data in low-dimension, clustering, imaging, classification, following their trajectories over time or other dimensions, and much more. For examples of applications see Ridrigues et al. (2018) 🎓 and references therein.\n\nArguments:\n\nΩ is a real LowerTriangular normalized Laplacian obtained by the laplacian function,\nq is the dimension of the Laplacian eigen maps;\nThe following are for the power iterations:\ntol is the tolerance for convergence (see below),\nmaxiter is the maximum number of iterations allowed,\nif verbose is true, the convergence at all iterations will be printed.\n\nnote: Nota Bene\nThe maximum value of q that can be requested is n-1, where n is the size of the Laplacian. In general, q=2 or q=3 is requested.tol defaults to the square root of Base.eps of the (real) type of Ω. This corresponds to requiring equality for the convergence criterion over two successive power iterations of about half of the significant digits.\n\nSee also: distanceSqrMat, laplacian, spectralEmbedding.\n\nExamples\n\nusing PosDefManifold\n# Generate a set of 4 random 10x10 SPD matrices\nPset=randP(10, 4)\nΔ²=distanceSqrMat(Fisher, Pset)\nΩ=laplacian(Δ²)\nevalues, maps, iterations, convergence=laplacianEM(Ω, 2)\nevalues, maps, iterations, convergence=laplacianEM(Ω, 2; verbose=true)\nevalues, maps, iterations, convergence=laplacianEM(Ω, 2; verbose=true, maxiter=500)\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.spectralEmbedding","page":"riemannianGeometry.jl","title":"PosDefManifold.spectralEmbedding","text":" (1) spectralEmbedding(metric::Metric, 𝐏::ℍVector, q::Int, epsilon::Real=0;\n <\n tol::Real=0.,\n maxiter::Int=300,\n densityInvariant=false,\n verbose=false,\n ⏩=true >)\n\n (2) spectralEmbedding(type::Type{T}, metric::Metric, 𝐏::ℍVector, q::Int, epsilon::Real=0;\n < same optional keyword arguments as in (1) >) where T<:Real\n\nalias: spEmb\n\nGiven a 1d array 𝐏 of k positive definite matrices P_1P_k (real or complex), compute its eigen maps in q dimensions.\n\nThis function runs one after the other the functions:\n\ndistanceSqrMat (compute the squared inter-distance matrix),\nlaplacian (compute the normalized Laplacian),\nlaplacianEigenMaps (get the eigen maps).\n\nBy default all computations above are done with Float32 precision. Another real type can be requested using method (2), where the type argument is defined.\n\nReturn the 4-tuple (Λ, U, iterations, convergence), where:\n\nΛ is a qq diagonal matrix holding on diagonal the eigenvalues corresponding to the q dimensions of the Laplacian eigen maps,\nU holds in columns the q eigenvectors holding the q coordinates of the points in the embedding space,\niterations is the number of iterations executed by the power method,\nconvergence is the convergence attained by the power method.\n\nArguments:\n\nmetric is the metric of type Metric::Enumerated type used for computing the inter-distances,\n𝐏 is a 1d array of k positive matrices of ℍVector type,\nq is the dimension of the Laplacian eigen maps,\nepsilon is the bandwidth of the Laplacian (see laplacian);\nThe following applyies for computing the inter-distances:\nif ⏩=true (default) the computation of inter-distances is multi-threaded.\nThe following applyies to the computation of the Laplacian by the laplacian function:\nif densityInvariant=true the density-invariant Laplacian is computed (see laplacian).\nThe following are for the power method iterative algorithm invoked by laplacianEigenMaps:\ntol is the tolerance for convergence of the power method (see below),\nmaxiter is the maximum number of iterations allowed for the power method,\nif verbose=true the convergence at all iterations will be printed;\n\nnote: Nota Bene\ntol defaults to the square root of Base.eps of the Float32 type (1) or of the type passed as argumant (2). This corresponds to requiring equality for the convergence criterion over two successive power iterations of about half of the significant digits.Multi-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.\n\nSee also: distanceSqrMat, laplacian, laplacianEigenMaps.\n\nExamples\n\nusing PosDefManifold\n# Generate a set of k random 10x10 SPD matrices\nk=10\nPset=randP(10, k)\nevalues, maps, iter, conv=spectralEmbedding(Fisher, Pset, 2)\n\n# show convergence information\nevalues, maps, iter, conv=spectralEmbedding(Fisher, Pset, 2; verbose=true)\n\n# use Float64 precision.\nevalues, maps, iter, conv=spectralEmbedding(Float64, Fisher, Pset, 2)\n\nusing Plots\n# check eigevalues and eigenvectors\nplot(diag(evalues))\nplot(maps[:, 1])\nplot!(maps[:, 2])\nplot!(maps[:, 3])\n\n# plot the data in the embedded space\nplot(maps[:, 1], maps[:, 2], seriestype=:scatter, title=\"Spectral Embedding\", label=\"Pset\")\n\n# try a different value of epsilon\nevalues, maps, iter, conv=spEmb(Fisher, Pset, k-1, 0.01; maxiter=1000)\nplot(maps[:, 1], maps[:, 2], seriestype=:scatter, title=\"Spectral Embedding\", label=\"Pset\")\n# see the example in `Laplacian` function for more on this\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#Means-1","page":"riemannianGeometry.jl","title":"Means","text":"","category":"section"},{"location":"riemannianGeometry/#","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"Function Description\nmean Weighted Fréchet mean (wFm) of a scalar or matrix set using any metric\nmeans As above for several sets at once\ngeneralizedMean Generalized wFm of a matrix set\ngeometricMean, gMean wFm of a matrix set minimizing the dispersion according to the Fisher metric (iterative)\ngeometricpMean, gpMean robust wFm of a matrix set minimizing the p-dispersion according to the Fisher metric (iterative)\nlogdet0Mean, ld0Mean wFm of a matrix set according to the logdet0 metric (iterative)\nwasMean wFm of a matrix set according to the Wasserstein metric (iterative)\npowerMean Power wFm of a matrix set (iterative)\ninductiveMean, indMean Recursive Fréchet mean of a matrix set (constructive)\nmidrange Geometric midrange of two matrices","category":"page"},{"location":"riemannianGeometry/#","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"⋅","category":"page"},{"location":"riemannianGeometry/#","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"mean\r\nmeans\r\ngeneralizedMean\r\ngeometricMean\r\ngeometricpMean\r\nlogdet0Mean\r\nwasMean\r\npowerMean\r\ninductiveMean\r\nmidrange","category":"page"},{"location":"riemannianGeometry/#Statistics.mean","page":"riemannianGeometry.jl","title":"Statistics.mean","text":" (1) mean(metric::Metric, P::ℍ{T}, Q::ℍ{T}) where T<:RealOrComplex\n\n (2) mean(metric::Metric, D::𝔻{T}, E::𝔻{T}) where T<:Real\n\n (3) mean(metric::Metric, 𝐏::ℍVector;\n <\n w::Vector=[],\n ✓w=true,\n init::Union{ℍ, Nothing}=nothing,\n tol::Real=0.,\n verbose=false,\n ⏩=true >)\n\n (4) mean(metric::Metric, 𝐃::𝔻Vector;\n < same optional keyword arguments as in (3) >)\n\n(1) Mean of two positive definite matrices, passed in arbitrary order as arguments P and Q, using the specified metric of type Metric::Enumerated type. The order is arbitrary as all metrics implemented in PosDefManifold are symmetric. This is the midpoint of the geodesic. For the weighted mean of two positive definite matrices use instead the geodesic function. P and Q must be flagged as Hermitian. See typecasting matrices.\n\n(2) Like in (1), but for two real diagonal positive definite matrices D and E.\n\n(3) Fréchet mean of an 1d array 𝐏 of k positive definite matrices 𝐏=P_1P_k of ℍVector type, with optional non-negative real weights w=w_1w_k and using the specified metricas in (1).\n\n(4) Fréchet mean of an 1d array 𝐃 of k positive definite matrices 𝐃=D_1D_k of 𝔻Vector type, with optional non-negative real weights w=w_1w_k and using the specified metricas in (1).\n\nIf you don't pass a weight vector with w, return the unweighted mean.\n\nIf ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time.\n\nAdopting the Fisher, logdet0 and Wasserstein metric in (3) and the logdet0 metric in (4), the mean is computed by means of an iterative algorithm. A particular initialization for these algorithms can be provided passing an Hermitian matrix as init. The convergence for these algorithm is required with a tolerance given by tol. if verbose=true the covergence attained at each iteration is printed. Other information such as if the algorithm has diverged is also printed. For more options in computing these means call directly functions geometricMean, logdet0Mean and wasMean, which are called hereby. For the meaning of the tol default value see the documentation of these functions. See also the robust mean function geometricpMean, which cannot be called from here. Notice that arguments init and tol have an effect only for the aferomentioned metrics in methods (3) and (4).\n\nFor (3) and (4), if ⏩=true (default), the computation of the mean is multi-threaded for all metrics.\n\nnote: Nota Bene\nMulti-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.\n\nMath\n\nThe Fréchet mean of a set of k matrices P_1 P_2 P_k weighted by w_1 w_2 w_ksum_i=1^kw_i=1 for the supported metrics are, for those with closed form expression:\n\nMetric weighted Fréchet mean\nEuclidean sum_i=1^kw_i P_i\ninvEuclidean big(sum_i=1^kw_i P_i^-1big)^-1\nChoEuclidean TT^*, where T=bL_P + aL_Q\nlogEuclidean textrmexpbig(sum_i=1^kw_ihspace1pt textrmlogP_i big)\nlogCholesky TT^*, where T=sum_i=1^k(w_kS_k)+sum_i=1^k(w_ktextrmlogD_k)\nJeffrey A^12big(A^-12HA^-12big)^12A^12\n\nand for those that are found by an iterative algorithm and that verify an equation:\n\nMetric equation verified by the weighted Fréchet mean\nFisher sum_i=1^kw_itextrmlogbig(G^-12 P_k G^-12big)=0\nlogdet0 sum_i=1^kw_ibig(frac12P_i+frac12Gbig)^-1=G^-1\nVonNeumann N.A.\nWasserstein G=sum_i=1^kw_ibig( G^12 P_i G^12big)^12\n\nlegend: L_X, S_X and D_X are the Cholesky lower triangle of X, its strictly lower triangular part and diagonal part, respectively (hence, S_X+D_X=L_X, L_XL_X^*=X). A and H are the weighted arithmetic and weighted harmonic mean, respectively.\n\nSee: geodesic, mean, Fréchet mean.\n\nExamples\n\nusing LinearAlgebra, Statistics, PosDefManifold\n# Generate 2 random 3x3 SPD matrices\nP=randP(3)\nQ=randP(3)\nM=mean(logdet0, P, Q) # (1)\nM=mean(Euclidean, P, Q) # (1)\n\n# passing several matrices and associated weights listing them\n# weights vector, does not need to be normalized\nR=randP(3)\nmean(Fisher, ℍVector([P, Q, R]); w=[1, 2, 3])\n\n# Generate a set of 4 random 3x3 SPD matrices\nPset=randP(3, 4)\nweights=[1, 2, 3, 1]\n# passing a vector of Hermitian matrices (ℍVector type)\nM=mean(Euclidean, Pset; w=weights) # (2) weighted Euclidean mean\nM=mean(Wasserstein, Pset) # (2) unweighted Wassertein mean\n# display convergence information when using an iterative algorithm\nM=mean(Fisher, Pset; verbose=true)\n\n# run multi-threaded when the number of matrices is high\nusing BenchmarkTools\nPset=randP(20, 160)\n@benchmark(mean(logEuclidean, Pset; ⏩=false)) # single-threaded\n@benchmark(mean(logEuclidean, Pset)) # multi-threaded\n\n\n\n\n\nmean(metric::Metric, ν::Vector{T}) where T<:RealOrComplex\n\nMean of k real or complex scalars, using the specified metric of type Metric::Enumerated type. Note that using the Fisher, logEuclidean and Jeffrey metric, the resulting mean is the scalar geometric mean. Note also that the code of this method is in unit statistics.jl, while the code for all the others is in unit riemannianGeometry.jl.\n\nExamples\n\nusing PosDefManifold\n# Generate 10 random numbers distributed as a chi-square with 2 df.\nν=[randχ²(2) for i=1:10]\narithmetic_mean=mean(Euclidean, ν)\ngeometric_mean=mean(Fisher, ν)\nharmonic_mean=mean(invEuclidean, ν)\nharmonic_mean<=geometric_mean<=arithmetic_mean # AGH inequality\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.means","page":"riemannianGeometry.jl","title":"PosDefManifold.means","text":" (1) means(metric::Metric, 𝒫::ℍVector₂;\n <⏩=true>)\n\n (2) means(metric::Metric, 𝒟::𝔻Vector₂;\n <⏩=true>)\n\n(1) Given a 2d array 𝒫 of positive definite matrices as an ℍVector₂ type compute the Fréchet mean for as many ℍVector type objects as hold in 𝒫, using the specified metric of type Metric::Enumerated type. Return the means in a vector of Hermitian matrices, that is, as an ℍVector type.\n\n(2) Given a 2d array 𝒟 of real positive definite matrices as an 𝔻Vector₂ type compute the Fréchet mean for as many 𝔻Vector type objects as hold in 𝒟, using the specified metric of type Metric::Enumerated type. Return the means in a vector of Diagonal matrices, that is, as a 𝔻Vector type.\n\nThe weigted Fréchet mean is not supported in this function.\n\nIf ⏩=true (default) the computation of the means is multi-threaded.\n\nnote: Nota Bene\nMulti-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.\n\nSee also: mean.\n\nExamples\n\n using PosDefManifold\n # Generate a set of 4 random 3x3 SPD matrices\n Pset=randP(3, 4) # or, using unicode: 𝐏=randP(3, 4)\n # Generate a set of 40 random 4x4 SPD matrices\n Qset=randP(3, 40) # or, using unicode: 𝐐=randP(3, 40)\n # listing directly ℍVector objects\n means(logEuclidean, ℍVector₂([Pset, Qset])) # or: means(logEuclidean, ℍVector₂([𝐏, 𝐐]))\n # note that [𝐏, 𝐐] is actually a ℍVector₂ type object\n\n # creating and passing an object of ℍVector₂ type\n sets=ℍVector₂(undef, 2) # or: 𝒫=ℍVector₂(undef, 2)\n sets[1]=Pset # or: 𝒫[1]=𝐏\n sets[2]=Qset # or: 𝒫[2]=𝐐\n means(logEuclidean, sets) # or: means(logEuclidean, 𝒫)\n\n # going multi-threated\n\n # first, create 20 sets of 200 50x50 SPD matrices\n sets=ℍVector₂([randP(50, 200) for i=1:20])\n\n # How much computing time we save ?\n # (example min time obtained with 4 threads & 4 BLAS threads)\n using BenchmarkTools\n\n # non multi-threaded, mean with closed-form solution\n @benchmark(means(logEuclidean, sets; ⏩=false)) # (6.196 s)\n\n # multi-threaded, mean with closed-form solution\n @benchmark(means(logEuclidean, sets)) # (1.897 s)\n\n sets=ℍVector₂([randP(10, 200) for i=1:10])\n\n # non multi-threaded, mean with iterative solution\n # wait a bit\n @benchmark(means(Fisher, sets; ⏩=false)) # (4.672 s )\n\n # multi-threaded, mean with iterative solution\n @benchmark(means(Fisher, sets)) # (1.510 s)\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.generalizedMean","page":"riemannianGeometry.jl","title":"PosDefManifold.generalizedMean","text":" generalizedMean(𝐏::Union{ℍVector, 𝔻Vector}, p::Real;\n <\n w::Vector=[],\n ✓w=true,\n ⏩=true >)\n\nGiven a 1d array 𝐏=P_1P_k of k positive definite matrices of ℍVector type or real positive definite diagonal matrices of 𝔻Vector type and optional non-negative real weights vector w=w_1w_k, return the weighted generalized means G with real parameter p, that is,\n\nG=big(sum_i=1^kw_iP_i^pbig)^1p.\n\nIf you don't pass a weight vector with w, return the unweighted generalized mean\n\nG=big(sum_i=1^kP_i^pbig)^1p.\n\nIf ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the weights each time.\n\nIf ⏩=true the computation of the generalized mean is multi-threaded.\n\nnote: Nota Bene\nMulti-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.\n\nThe following special cases for parameter p are noteworthy:\n\nFor p=frac12 the generalized mean is the modified Bhattacharyya mean.\nFor p=1 the generalized mean is the Euclidean mean.\nFor p=-1 the generalized mean is the inverse Euclidean mean.\nFor (the limit of) p=0 the generalized mean is the log Euclidean mean, which is the Fisher mean when matrices in 𝐏 all pair-wise commute.\n\nNotice that when matrices in 𝐏 all pair-wise commute, for instance if the matrices are diagonal, the generalized means coincide with the power means for any p-1 1 and for p=05 it coincides also with the Wasserstein mean. For this reason the generalized means are used as default initialization of both the powerMean and wasMean algorithm.\n\nSee: generalized means.\n\nSee also: powerMean, wasMean, mean.\n\nExamples\n\nusing LinearAlgebra, Statistics, PosDefManifold\n# Generate a set of 4 random 3x3 SPD matrices\nPset=randP(3, 4) # or, using unicode: 𝐏=randP(3, 4)\n\n# weights vector, does not need to be normalized\nweights=[1, 2, 3, 1]\n\n# unweighted mean\nG = generalizedMean(Pset, 0.25) # or: G = generalizedMean(𝐏, 0.25)\n\n# weighted mean\nG = generalizedMean(Pset, 0.5; w=weights)\n\n# with weights previously normalized we can set ✓w=false\nweights=weights./sum(weights)\nG = generalizedMean(Pset, 0.5; w=weights, ✓w=false)\n\n# run multi-threaded when the number of matrices is high\nusing BenchmarkTools\nPset=randP(20, 160)\n@benchmark(generalizedMean(Pset; ⏩=false)) # single-threaded\n@benchmark(generalizedMean(Pset)) # multi-threaded\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.geometricMean","page":"riemannianGeometry.jl","title":"PosDefManifold.geometricMean","text":" geometricMean(𝐏::Union{ℍVector, 𝔻Vector};\n <\n w::Vector=[],\n ✓w=true,\n init=nothing,\n tol::Real=0.,\n maxiter::Int=500,\n adaptStepSize::Bool=true,\n verbose=false,\n ⏩=true >)\n\nalias: gmean\n\nGiven a 1d array 𝐏=P_1P_k of k positive definite matrices of ℍVector type or diagonal matrices of 𝔻Vector type and optional non-negative real weights vector w=w_1w_k, return the 3-tuple (G iter conv), where G is the mean according to the Fisher metric and iter, conv are the number of iterations and convergence attained by the algorithm. Mean G is the unique positive definite matrix satisfying\n\nsum_i=1^kw_itextrmlogbig(G^-12 P_i G^-12big)=0\n\nFor estimating it, this function implements the well-known gradient descent algorithm, but with an exponential decaying step size ς, yielding iterations\n\nG G^12textrmexpbig(ςsum_i=1^kw_itextrmlog(G^-12 P_i G^-12)big)G^12\n\nIf you don't pass a weight vector with w, return the unweighted geometric mean.\n\nIf ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time.\n\nThe following are more :\n\ninit is a matrix to be used as initialization for the mean. If no matrix is provided, the log Euclidean mean will be used,\ntol is the tolerance for the convergence (see below).\nmaxiter is the maximum number of iterations allowed\nif verbose=true, the convergence attained at each iteration and the step size ς is printed. Also, a warning is printed if convergence is not attained.\nif ⏩=true the iterations are multi-threaded (see below).\nif adaptStepSize=false the step size ς is fixed to 1 at all iterations.\n\nIf the input is a 1d array of k real positive definite diagonal matrices the solution is available in closed-form as the log Euclidean mean, hence the init, tol and verbose have no effect and return the 3-tuple (G 1 0). See the log Euclidean metric.\n\nnote: Nota Bene\nMulti-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.In normal circumstances this algorithm converges monothonically. If the algorithm diverges and verbose is true a warning is printed indicating the iteration when this happened.The exponential decaying step size features a faster convergence rate as compared to the fixed step size ς=1 that is usually adopted. The decaying rate is inversely proportional to maxiter, thus, increase/decrease maxiter in order to set a slower/faster decaying rate. maxiter should not be set too low though.tol defaults to the square root of Base.eps of the nearest real type of data input 𝐏. This corresponds to requiring the norm of the satisfying matrix equation divided by the number of elements to vanish for about half the significant digits.\n\nSee: Fisher metric.\n\nSee also: geometricpMean, powerMean, wasMean, logdet0Mean, mean.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\n# Generate a set of 4 random 3x3 SPD matrices\nPset=randP(3, 4) # or, using unicode: 𝐏=randP(3, 4)\n\n# unweighted mean\nG, iter, conv = geometricMean(Pset) # or G, iter, conv = geometricMean(𝐏)\n\n# weights vector, does not need to be normalized\nweights=[1, 2, 3, 1]\n\n# weighted mean\nG, iter, conv = geometricMean(Pset, w=weights)\n\n# print the convergence at all iterations\nG, iter, conv = geometricMean(Pset; verbose=true)\n\n# now suppose Pset has changed a bit, initialize with G to hasten convergence\nPset[1]=ℍ(Pset[1]+(randP(3)/100))\nG, iter, conv = geometricMean(Pset; w=weights, ✓w=true, verbose=true, init=G)\n\n# run multi-threaded when the number of matrices is high\nusing BenchmarkTools\nPset=randP(20, 120)\n@benchmark(geometricMean(Pset; ⏩=false)) # single-threaded\n@benchmark(geometricMean(Pset)) # multi-threaded\n\n# show the mean and the input points using spectral embedding\nusing Plots\nk=80\nPset=randP(20, k)\nG, iter, conv = geometricMean(Pset)\npush!(Pset, G)\nΛ, U, iter, conv=spectralEmbedding(Fisher, Pset, 2; verbose=true)\nplot(U[1:k, 1], U[1:k, 2], seriestype=:scatter, title=\"Spectral Embedding\", label=\"Pset\")\nplot!(U[k+1:k+1, 1], U[k+1:k+1, 2], seriestype=:scatter, label=\"mean\")\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.geometricpMean","page":"riemannianGeometry.jl","title":"PosDefManifold.geometricpMean","text":" geometricpMean(𝐏::ℍVector, p::Real=0.5;\n <\n w::Vector=[],\n ✓w=true,\n init=nothing,\n tol::Real=0.,\n maxiter::Int=500,\n adaptStepSize=true,\n verbose=false,\n ⏩=true >)\n\nalias: gpmean\n\nGiven a 1d array 𝐏=P_1P_k of k positive definite matrices of ℍVector type, a real parameter 0p=1 and optional non-negative real weights vector w=w_1w_k, return the 3-tuple (G iter conv), where G is the p-mean, i.e., the mean according to the Fisher metric minimizing the p-dispersion (see below) and iter, conv are the number of iterations and convergence attained by the algorithm.\n\nThis function implements the p-dispersion gradient descent algorithm with step-size ς (to be published), yielding iterations\n\nG G^12textrmexpbig(ςsum_i=1^kpδ^2(G P_i)^p-1w_itextrmlog(G^-12 P_i G^-12)big)G^12\n\nif p=1 this yields the geometric mean (implemented specifically in geometricMean).\nif p=05 this yields the geometric median (default).\n\nIf you don't pass a weight vector with w, return the unweighted geometric-p mean.\n\nIf ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time.\n\nThe following are more :\n\ninit is a matrix to be used as initialization for the mean. If no matrix is provided, the log Euclidean mean will be used,\ntol is the tolerance for the convergence (see below).\nmaxiter is the maximum number of iterations allowed.\nif adaptStepSize=true (default) the step size ς for the gradient descent is adapted at each iteration (see below).\nif verbose=true, the step-size and convergence attained at each iteration is printed. Also, a warning is printed if convergence is not attained.\nif ⏩=true the iterations are multi-threaded (see below).\n\nnote: Nota Bene\nMulti-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.If the algorithm diverges and verbose is true a warning is printed indicating the iteration when this happened. This algorithm may temporary diverge, still reach convergence. Overall, while all other iterative algorithms implemented in PosDefMaifold are very stable, this is not.The smaller the parameter p is, the slower and less likely the convergence is. If the algorithm does not converge, try increasing p, initializing the algorithm with the output of geometricMean and/or eliminating the otliers from the input set 𝐏.If adaptStepSize is true (default) the step-size ς is adapted at each iteration, otherwise a fixed step size ς=1 is used. Adapting the step size in general hastens convergence and improves the convergence behavior.tol defaults to the square root of Base.eps of the nearest real type of data input 𝐏. This corresponds to requiring the norm of the satisfying matrix equation divided by the number of elements to vanish for about half the significant digits.\n\nSee: Fisher metric.\n\nSee also: geometricMean, powerMean, wasMean, logdet0Mean, mean.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold, Plots\n\n# This examples show that this algorithm is more robust to outliers\n# as compared to the standard geometric mean algorithm\n\n# Generate a set of 100 random 10x10 SPD matrices\nPset=randP(10, 100)\n\n# Get the usual geometric mean for comparison\nG, iter1, conv1 = geometricMean(Pset, verbose=true)\n\n# change p to observe how the convergence behavior changes accordingly\n# Get the median (default)\nH, iter2, conv2 = geometricpMean(Pset, verbose=true)\n# Get the p-mean for p=0.25\nH, iter2, conv2 = geometricpMean(Pset, 0.25, verbose=true)\n\nprintln(iter1, \" \", iter2); println(conv1, \" \", conv2)\n\n# move the first matrix in Pset to possibly create an otlier\nPset[1]=geodesic(Fisher, G, Pset[1], 3)\nG1, iter1, conv1 = geometricMean(Pset, verbose=true)\nH1, iter2, conv2 = geometricpMean(Pset, 0.25, verbose=true)\nprintln(iter1, \" \", iter2); println(conv1, \" \", conv2)\n\n# collect the geometric and p-means, before and after the\n# introduction of the outier in vector of Hermitian matrices `S`.\nS=HermitianVector([G, G1, H, H1])\n\n# check the interdistance matrix Δ² to verify that the geometric mean\n# after the introduction of the outlier (``G1``) is farther away from\n# the geometric mean as compared to how much ``H1`` is further away\n# from ``H``, i.e., that element (4,3) is much smaller than element (2,1).\nΔ²=distance²Mat(Float64, Fisher, S)\n\n# how far are all these matrices from all the others?\nfullΔ²=Hermitian(Δ², :L)\ndist=[sum(fullΔ²[:, i]) for i=1:size(fullΔ², 1)]\n\n# plot the matrices in `S` using spectral embedding.\nusing Plots\nΛ, U, iter, conv = laplacianEM(laplacian(Δ²), 3; verbose=true)\nplot([U[1, 1]], [U[1, 2]], seriestype=:scatter, label=\"g-mean\")\nplot!([U[2, 1]], [U[2, 2]], seriestype=:scatter, label=\"g-mean outlier\")\nplot!([U[3, 1]], [U[3, 2]], seriestype=:scatter, label=\"p-mean\")\nplot!([U[4, 1]], [U[4, 2]], seriestype=:scatter, label=\"p-mean outlier\")\n\n# estimate how much you gain running the algorithm in multi-threaded mode\nusing BenchmarkTools\nPset=randP(20, 120)\n@benchmark(geometricpMean(Pset; ⏩=true)) # single-threaded\n@benchmark(geometricpMean(Pset)) # multi-threaded\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.logdet0Mean","page":"riemannianGeometry.jl","title":"PosDefManifold.logdet0Mean","text":" logdet0Mean(𝐏::Union{ℍVector, 𝔻Vector};\n <\n w::Vector=[],\n ✓w=true,\n init=nothing,\n tol::Real=0.,\n maxiter::Int=500,\n verbose=false,\n ⏩=true >)\n\nalias: ld0Mean\n\nGiven a 1d array 𝐏=P_1P_k of k positive definite matrices of ℍVector type or real positive definite diagonal matrices of 𝔻Vector type and optional non-negative real weights vector w=w_1w_k, return the 3-tuple (G iter conv), where G is the mean according to the logdet zero metric and iter, conv are the number of iterations and convergence attained by the algorithm. Mean G is the unique positive definite matrix satisfying\n\nsum_i=1^kw_ibig(frac12P_i+frac12Gbig)^-1-G^-1=0.\n\nFor estimating it, this function implements the fixed-point iteration algorithm suggested by (Moakher, 2012, p315)🎓, yielding iterations\n\nG frac12big(sum_i=1^kw_i(P_i+G)^-1big)^-1.\n\nIf you don't pass a weight vector with w, return the unweighted logdet zero mean.\n\nIf ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time.\n\nThe following are more :\n\ninit is a matrix to be used as initialization for the mean. If no matrix is provided, the log Euclidean mean will be used,\ntol is the tolerance for the convergence (see below).\nmaxiter is the maximum number of iterations allowed.\nif verbose=true, the convergence attained at each iteration is printed and a warning is printed if convergence is not attained.\nif ⏩=true the iterations are multi-threaded (see below).\n\nnote: Nota Bene\nMulti-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.In normal circumstances this algorithm converges monothonically. If the algorithm diverges and verbose is true a warning is printed indicating the iteration when this happened.tol defaults to 100 times the square root of Base.eps of the nearest real type of data input 𝐏. This corresponds to requiring the square root of the relative convergence criterion over two successive iterations to vanish for about half the significant digits minus 2.\n\nSee: logdet zero metric, modified Bhattacharyya mean.\n\nSee also: powerMean, wasMean, logdet0Mean, mean.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\n# Generate a set of 4 random 3x3 SPD matrices\nPset=randP(3, 4) # or, using unicode: 𝐏=randP(3, 4)\n\n# unweighted mean\nG, iter, conv = logdet0Mean(Pset) # or G, iter, conv = logdet0Mean(𝐏)\n\n# weights vector, does not need to be normalized\nweights=[1, 2, 3, 1]\n\n# weighted mean\nG, iter, conv = logdet0Mean(Pset, w=weights)\n\n# print the convergence at all iterations\nG, iter, conv = logdet0Mean(Pset; w=weights, verbose=true)\n\n# suppose Pset has changed a bit; initialize with G to hasten convergence\nPset[1]=ℍ(Pset[1]+(randP(3)/100))\nG, iter, conv = logdet0Mean(Pset; w=weights, ✓w=false, verbose=true, init=G)\n\n# estimate how much you gain running the algorithm in multi-threaded mode\nusing BenchmarkTools\nPset=randP(20, 120)\n@benchmark(logdet0Mean(Pset; ⏩=false)) # single-threaded\n@benchmark(logdet0Mean(Pset)) # multi-threaded\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.wasMean","page":"riemannianGeometry.jl","title":"PosDefManifold.wasMean","text":" wasMean(𝐏::Union{ℍVector, 𝔻Vector};\n <\n w::Vector=[],\n ✓w=true,\n init=nothing,\n tol::Real=0.,\n maxiter::Int=500,\n verbose=false,\n ⏩=true >)\n\nGiven a 1d array 𝐏=P_1P_k of k positive definite matrices of ℍVector type or real positive definite diagonal matrices of 𝔻Vector type and optional non-negative real weights vector w=w_1w_k, return the 3-tuple (G iter conv), where G is the mean according to the Wasserstein metric and iter, conv are the number of iterations and convergence attained by the algorithm. Mean G is the unique positive definite matrix satisfying\n\nG=sum_i=1^kw_ibig( G^12 P_i G^12big)^12.\n\nFor estimating it, this function implements the fixed-point iterative algorithm proposed by (Álvarez-Esteban et al., 2016)🎓:\n\nG G^-12big(sum_i=1^k w_i(G^12P_i G^12)^12big)^2 G^-12.\n\nIf you don't pass a weight vector with w, return the unweighted Wassertein mean.\n\nIf ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and they should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time.\n\nThe following are more :\n\ninit is a matrix to be used as initialization for the mean. If no matrix is provided, the instance of generalized means with p=05 will be used,\ntol is the tolerance for the convergence (see below).\nmaxiter is the maximum number of iterations allowed.\nif verbose=true, the convergence attained at each iteration is printed and a warning is printed if convergence is not attained.\nif ⏩=true the iterations are multi-threaded (see below).\n\nIf the input is a 1d array of k real positive definite diagonal matrices the solution is available in closed-form as the modified Bhattacharyya mean, hence the init, tol and verbose have no effect and return the 3-tuple (G 1 0). See modified Bhattacharyya mean.\n\nnote: Nota Bene\nMulti-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.In normal circumstances this algorithm converges monothonically. If the algorithm diverges and verbose is true a warning is printed indicating the iteration when this happened.tol defaults to the square root of Base.eps of the nearest real type of data input 𝐏. This corresponds to requiring the norm of the satisfying matrix equation divided by the number of elements to vanish for about half the significant digits.\n\nSee: Wasserstein metric.\n\nSee also: powerMean, wasMean, logdet0Mean, mean.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\n# Generate a set of 4 random 3x3 SPD matrices\nPset=randP(3, 4) # or, using unicode: 𝐏=randP(3, 4)\n\n# unweighted mean\nG, iter, conv = wasMean(Pset) # or: G, iter, conv = wasMean(𝐏)\n\n# weights vector, does not need to be normalized\nweights=[1, 2, 3, 1]\n\n# weighted mean\nG, iter, conv = wasMean(Pset; w=weights)\n\n# print the convergence at all iterations\nG, iter, conv = wasMean(Pset; w=weights, verbose=true)\n\n# suppose 𝐏 has changed a bit; initialize with G to hasten convergence\nPset[1]=ℍ(Pset[1]+(randP(3)/100))\nG, iter, conv = wasMean(Pset; w=weights, verbose=true, init=G)\n\n# estimate how much you gain running the algorithm in multi-threaded mode\nusing BenchmarkTools\nPset=randP(20, 120)\n@benchmark(wasMean(Pset; ⏩=false)) # single-threaded\n@benchmark(wasMean(Pset)) # multi-threaded\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.powerMean","page":"riemannianGeometry.jl","title":"PosDefManifold.powerMean","text":" powerMean(𝐏::Union{ℍVector, 𝔻Vector}, p::Real;\n <\n w::Vector=[],\n ✓w=true,\n init=nothing,\n tol::Real=0.,\n maxiter::Int=500,\n verbose=false,\n ⏩=true >)\n\nGiven a 1d array 𝐏=P_1P_k of k positive definite matrices of ℍVector type or real positive definite diagonal matrices of 𝔻Vector type, an optional non-negative real weights vector w=w_1w_k and a real parameter p in-1 1, return the 3-tuple (G iter conv), where G is Lim and Palfia (2012)'s power means of order p and iter, conv are the number of iterations and convergence attained by the algorithm, respectively. Mean G is the unique positive definite matrix satisfying\n\nG=sum_i=1^k(w_iGtextrm_pP_i),\n\nwhere Gtextrm_pP_i is the Fisher geodesic equation. In particular:\n\nwith p=-1 this is the harmonic mean (see the inverse Euclidean metric),\nwith p=+1 this is the arithmetic mean (see the Euclidean metric),\nat the limit of p evaluated at zero from both side this is the geometric mean (see Fisher metric).\n\nFor estimating power means for pin(-1 1), this function implements the fixed-point iterative algorithm of (Congedo et al., 2017b)🎓. For p=0 (geometric mean) this algorithm is run two times with a small positive and negative value of p and the geometric mean of the two resulting means is returned, as suggested in (Congedo et al., 2017b)🎓. This way of estimating the geometric mean of a set of matrices is faster as compared to the usual gradient descent algorithm.\n\nIf you don't pass a weight vector with w, return the unweighted power mean.\n\nIf ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time.\n\nThe following are more :\n\ninit is a matrix to be used as initialization for the mean. If no matrix is provided, the instance of generalized means with parameter p will be used.\ntol is the tolerance for the convergence (see below).\nmaxiter is the maximum number of iterations allowed.\nif verbose=true, the convergence attained at each iteration is printed and a warning is printed if convergence is not attained.\nif ⏩=true the iterations are multi-threaded.\n\nIf the input is a 1d array of k real positive definite diagonal matrices the solution is available in closed-form as the generalized mean of order p, hence the init, tol and verbose have no effect and return the 3-tuple (G 1 0). See generalized means.\n\nnote: Nota Bene\nMulti-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.In normal circumstances this algorithm converges monothonically. If the algorithm diverges and verbose is true a warning is printed indicating the iteration when this happened.tol defaults to the square root of Base.eps of the nearest real type of data input 𝐏. This corresponds to requiring the norm of the difference of the matrix solution over two successive iterations divided by the number of elements in the matrix to vanish for about half the significant digits.\n\n(2) Like in (1), but for a 1d array 𝐃=D_1D_k of k real positive definite diagonal matrices of 𝔻Vector type. In this case the solution is available in closed-form, hence the init, tol and verbose have no effect and return the 3-tuple (G 1 0). See generalized means.\n\nSee: power means, generalized means, modified Bhattacharyya mean.\n\nSee also: generalizedMean, wasMean, logdet0Mean, mean.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\n# Generate a set of 4 random 3x3 SPD matrices\nPset=randP(3, 4) # or, using unicode: 𝐏=randP(3, 4)\n\n# unweighted mean\nG, iter, conv = powerMean(Pset, 0.5) # or G, iter, conv = powerMean(𝐏, 0.5)\n\n# weights vector, does not need to be normalized\nweights=[1, 2, 3, 1]\n\n# weighted mean\nG, iter, conv = powerMean(Pset, 0.5; w=weights)\n\n# print the convergence at all iterations\nG, iter, conv = powerMean(Pset, 0.5; w=weights, verbose=true)\n\n# suppose 𝐏 has changed a bit; initialize with G to hasten convergence\nPset[1]=ℍ(Pset[1]+(randP(3)/100))\nG, iter, conv = powerMean(Pset, 0.5; w=weights, verbose=true, init=G)\n\n# estimate how much you gain running the algorithm in multi-threaded mode\nusing BenchmarkTools\nPset=randP(20, 120)\n@benchmark(powerMean(Pset, 0.5; ⏩=false)) # single-threaded\n@benchmark(powerMean(Pset, 0.5)) # multi-threaded\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.inductiveMean","page":"riemannianGeometry.jl","title":"PosDefManifold.inductiveMean","text":"(1) inductiveMean(metric::Metric, 𝐏::ℍVector)\n\n(2) inductiveMean(metric::Metric, 𝐏::ℍVector, q::Int, Q::ℍ)\n\nalias: indMean\n\n(1) Compute the Fréchet mean of 1d array 𝐏=P_1P_k of k positive definite matrices of ℍVector type with a law of large number inductive procedure (Ho et al., 2013; Massart et al., 2018), such as 🎓\n\nG_1=P_1\n\nG_i=γ(i^-1 G_(i-1) P_i) i=2k\n\nwhere γ(i^-1 G_(i-1) P_i) is a step on the geodesic relying G_(i-1) to P_i with arclength i^-1 using the specified metric, of type Metric::Enumerated type.\n\n(2) Like (1), but for the set of matrices 𝐐 𝐏, where it is assumed knowledge only of the set 𝐏, the mean of 𝐐 (Hermitian matrix argument Q) and the number of matrices in 𝐐 (integer argument q). This method can be used, for example, for updating a block on-line algorithm, where 𝐏 is the incoming block, Q the previous mean estimation and q the cumulative number of matrices on which the mean has been computed on-line.\n\nFor Fréchet means that do not have a closed form expression, this procedure features a computational complexity amounting to less than two iterations of gradient descent or fixed-point algorithms. This comes at the price of an approximation. In fact, the solution is not invariant to permutations of the matrices in array 𝐏 and convergence to the Fréchet mean with the implemented procedure is not ensured (see Massart et al., 2018)🎓.\n\nSince the inductive mean uses the geodesic function, it is not available for the Von Neumann metric.\n\nExamples\n\n# A set of 100 matrices for which we want to compute the mean\n𝐏=randP(10, 100)\n\n𝐏1=ℍVector(collect(𝐏[i] for i=1:50)) # first 50\n𝐏2=ℍVector(collect(𝐏[i] for i=51:100)) # last 50\n\n# inductive mean of the whole set 𝐏\nG=inductiveMean(Fisher, 𝐏)\n\n# mean using the usual gradient descent algorithm\nH, iter, conv=geometricMean(𝐏)\n\n# inductive mean of 𝐏 given only 𝐏2,\n# the number of matrices in 𝐏1 and the mean of 𝐏1\nG2=inductiveMean(Fisher, 𝐏2, length(𝐏1), mean(Fisher, 𝐏1))\n\n# average error\nnorm(G-H)/(dim(G, 1)^2)\nnorm(G2-H)/(dim(G, 1)^2)\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.midrange","page":"riemannianGeometry.jl","title":"PosDefManifold.midrange","text":"midrange(metric::Metric, P::ℍ{T}, Q::ℍ{T}) where T<:RealOrComplex\n\nMidrange (average of extremal values) of positive definite matrices P and Q. Only the Fisher metric is supported, allowing the so-called geometric midrange. This has been defined in Mostajeran et al. (2019) 🎓 as\n\nP * Q = frac1sqrtlambda_(min)+sqrtlambda_(max)Big(Q+sqrtlambda_(min)*lambda_(max)PBig),\n\nwhere lambda_(min) and lambda_(max) are the extremal generalized eigenvalues of P and Q.\n\nExamples\n\nP=randP(3)\nQ=randP(3)\nM=midrange(Fisher, P, Q)\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#Tangent-Space-operations-1","page":"riemannianGeometry.jl","title":"Tangent Space operations","text":"","category":"section"},{"location":"riemannianGeometry/#","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"Function Description\nlogMap Logarithmic map (from manifold to tangent space)\nexpMap Exponential map (from tangent space to manifold)\nvecP vectorization of matrices in the tangent space\nmatP matrization of matrices in the tangent space (inverse of vecp)\nparallelTransport, pt Parallel transport of tangent vectors and matrices","category":"page"},{"location":"riemannianGeometry/#","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"⋅","category":"page"},{"location":"riemannianGeometry/#","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"logMap\r\nexpMap\r\nvecP\r\nmatP\r\nparallelTransport","category":"page"},{"location":"riemannianGeometry/#PosDefManifold.logMap","page":"riemannianGeometry.jl","title":"PosDefManifold.logMap","text":"(1) logMap(metric::Metric, P::ℍ{T}, G::ℍ{T})\n\n(2) logMap(metric::Metric, 𝐏::ℍVector, G::ℍ{T})\nfor all the above: where T<:RealOrComplex\n\n(1) Logaritmic Map: map a positive definite matrix P from the SPD or Hermitian manifold into the tangent space at base-point G using the Fisher metric.\n\nP and G must be flagged as Hermitian. See typecasting matrices.\n\nThe map is defined as\n\nLog_G(P)=S=G^12textrmlogbig(G^-12PG^-12big)G^12.\n\nmetric is a metric of type Metric::Enumerated type.\n\nThe result is an Hermitian matrix.\n\n(2) Logarithmic map (1) at base-point G at once for k positive definite matrices in 1d array 𝐏=P_1P_k of ℍVector type.\n\nThe result is an ℍVector.\n\nnote: Nota Bene\nCurrently only the Fisher metric is supported for tangent space operations.\n\nThe inverse operation is expMap.\n\nSee also: vecP, parallelTransport.\n\nExamples\n\nusing PosDefManifold\n(1)\nP=randP(3)\nQ=randP(3)\nmetric=Fisher\nG=mean(metric, P, Q)\n# projecting P at the base point given by the geometric mean of P and Q\nS=logMap(metric, P, G)\n\n(2)\nPset=randP(3, 4)\n# projecting all matrices in Pset at the base point given by their geometric mean.\nSset=logMap(Fisher, Pset, mean(Fisher, Pset))\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.expMap","page":"riemannianGeometry.jl","title":"PosDefManifold.expMap","text":"(1) expMap(metric::Metric, S::ℍ{T}, G::ℍ{T})\n\n(2) expMap(metric::Metric, 𝐒::ℍVector, G::ℍ{T})\nfor all the above: where T<:RealOrComplex\n\n(1) Exponential Map: map a tangent vector (a matrix) S from the tangent space at base-point G into the SPD or Hermitian manifold (using the Fisher metric).\n\nS and G must be flagged as Hermitian. See typecasting matrices.\n\nThe map is defined as\n\nExp_G(S)=P=G^12textrmexpbig(G^-12SG^-12big)G^12.\n\nmetric is a metric of type Metric::Enumerated type.\n\nThe result is an Hermitian matrix.\n\n(2) Exponential map (1) at base-point G at once for k tangent vectors (matrices) in 1d array 𝐒=S_1S_k of ℍVector type.\n\nThe result is an ℍVector.\n\nnote: Nota Bene\nCurrently only the Fisher metric is supported for tangent space operations.\n\nThe inverse operation is logMap.\n\nExamples\n\n(1)\nusing PosDefManifold, LinearAlgebra\nP=randP(3)\nQ=randP(3)\nG=mean(Fisher, P, Q)\n# projecting P on the tangent space at the Fisher mean base point G\nS=logMap(Fisher, P, G)\n# projecting back onto the manifold\nP2=expMap(Fisher, S, G)\n\n(2)\nPset=randP(3, 4)\n# projecting all matrices in Pset at the base point given by their geometric mean.\nG=mean(Fisher, Pset)\nSset=logMap(Fisher, Pset, G)\n# projecting back onto the manifold\nPset2=expMap(Fisher, Sset, G)\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.vecP","page":"riemannianGeometry.jl","title":"PosDefManifold.vecP","text":"vecP(S::Union{ℍ{T}, Symmetric{R}};\n range::UnitRange=1:size(S, 2)) where T<:RealOrComplex where R<:Real =\n\nVectorize a tangent vector (which is an Hermitian or Symmetric matrix) S: mat ↦ vec.\n\nIt gives weight 1 to diagonal elements and 2 to off-diagonal elements so as to preserve the norm (Barachant et al., 201E)🎓, such as\n\nS_F=vecP(S)_F.\n\nThe result is a vector holding n(n+1)2 elements, where n is the size of S.\n\nS must be flagged as Hermitian or Symmetric. See typecasting matrices.\n\nThe reverse operation is provided by matP, which always return an Hermitian matrix.\n\nIf an optional keyword argument range is provided, the vectorization concerns only the rows (or columns, since the input matrix is symmetric or Hermitian) in the range. Note that in this case the operation cannot be reverted by the matP, that is, in this case the matrix is 'stuck' in the tangent space.\n\nExamples\n\nusing PosDefManifold\nP=randP(3)\nQ=randP(3)\nG=mean(Fisher, P, Q)\n# projecting P at the base point given by the geometric mean of P and Q\nS=logMap(Fisher, P, G)\n# vectorize S\nv=vecP(S)\n# vectorize onlt the first two columns of S\nv=vecP(S; range=1:2)\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.matP","page":"riemannianGeometry.jl","title":"PosDefManifold.matP","text":"matP(ς::Vector{T}) where T<:RealOrComplex\n\nMatrizize a tangent vector (vector) ς : vec -> mat.\n\nThis is the function reversing the vecP function, thus the weighting applied therein is reversed as well.\n\nIf ς=vecP(S) and S is a nn Hermitian or Symmetric matrix, ς is a tangent vector of size n(n+1)2. The result of calling matP(ς) is then nn matrix S. S is always returned flagged as Hermitian.\n\nTo Do: This function may be rewritten more efficiently.\n\nExamples\n\nusing PosDefManifold\nP=randP(3)\nQ=randP(3)\nG=mean(Fishr, P, Q)\n# projecting P at onto the tangent space at the Fisher mean base point\nS=logMap(Fisher, P, G)\n# vectorize S\nv=vecP(S)\n# Rotate the vector by an orthogonal matrix\nn=Int(size(S, 1)*(size(S, 1)+1)/2)\nU=randP(n)\nz=U*v\n# Get the point in the tangent space\nS=matP(z)\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.parallelTransport","page":"riemannianGeometry.jl","title":"PosDefManifold.parallelTransport","text":"(1) parallelTransport(S::ℍ{T}, P::ℍ{T}, Q::ℍ{T})\n\n(2) parallelTransport(S::ℍ{T}, P::ℍ{T})\n\n(3) parallelTransport(𝐒::ℍVector, P::ℍ{T}, Q::ℍ{T})\n\n(4) parallelTransport(𝐒::ℍVector, P::ℍ{T})\nfor all the above: where T<:RealOrComplex\n\nalias: pt\n\n(1) Parallel transport of tangent vector S (a matrix) lying on the tangent space at base-point P to the tangent space at base-point Q.\n\nS, P and Q must all be Hermitian matrices. Return an Hermitian matrix. The transport is defined as:\n\n_(PQ)(S)=big(QP^-1big)^12Sbig(QP^-1big)^H2.\n\nIf S is a positive definite matrix in the manifold (and not a tangent vector) it will be 'trasported' from P to Q, amounting to (Yair et al., 2019🎓)\n\nproject S onto the tangent space at base-point P,\nparallel transport it to the tangent space at base-point Q,\nproject it back onto the manifold at base-point Q.\n\n(2) Parallel transport as in (1), but to the tangent space at base-point the identity matrix.\n\nThe transport reduces in this case to:\n\n_(PI)(S)=P^-12SP^-12.\n\n(3) Parallel transport as in (1) at once for k tangent vectors (matrices) in 1d array 𝐒=S_1S_k of ℍVector type.\n\n(4) Parallel transport as in (2) at once for k tangent vectors (matrices) in 1d array 𝐒=S_1S_k of ℍVector type.\n\nnote: Nota Bene\nCurrently only the Fisher metric is supported for parallel transport.\n\nSee also: logMap, expMap, vecP, matP.\n\nExamples\n\nusing PosDefManifold\n\n(1)\nP=randP(3)\nQ=randP(3)\nG=mean(Fisher, P, Q)\n\n# i. projecting P onto the tangent space at base-point G\nS=logMap(Fisher, P, G)\n# ii. parallel transport S to the tangent space at base-point Q\nS_=parallelTransport(S, G, Q)\n# iii. projecting back into the manifold at base-point Q\nP_=expMap(Fisher, S_, Q)\n\n# i., ii. and iii. can be done simply by\nPP_=parallelTransport(P, G, Q)\n# check\nP_≈PP_ ? println(\" ⭐ \") : println(\" ⛔ \")\n\n(2)\nP=randP(3)\nQ=randP(3)\nG=mean(Fisher, P, Q)\n# transport to the tangent space at base-point the identity\nPP_=parallelTransport(P, G)\n\n(3)\nPset=randP(3, 4)\nQ=randP(3)\nG=mean(Fisher, Pset)\n# trasport at once all matrices in Pset\nPset2=parallelTransport(Pset, G, Q)\n\n(4)\nPset=randP(3, 4)\nG=mean(Fisher, Pset)\n# recenter all matrices so to have mean=I\nPset2=parallelTransport(Pset, G)\n# check\nmean(Fisher, Pset2) ≈ I ? println(\" ⭐ \") : println(\" ⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#Procrustes-problems-1","page":"riemannianGeometry.jl","title":"Procrustes problems","text":"","category":"section"},{"location":"riemannianGeometry/#","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"Function Description\nprocrustes Solution to the Procrustes problem in the manifold of positive definite matrices","category":"page"},{"location":"riemannianGeometry/#","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"⋅","category":"page"},{"location":"riemannianGeometry/#","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"procrustes","category":"page"},{"location":"riemannianGeometry/#PosDefManifold.procrustes","page":"riemannianGeometry.jl","title":"PosDefManifold.procrustes","text":"procrustes(P::ℍ{T}, Q::ℍ{T}, extremum=\"min\") where T<:RealOrComplex\n\nGiven two positive definite matrices P and Q, return by default the solution of problem\n\ntextrmargmin_Uδ(PU^HQU),\n\nwhere U varies over the set of unitary matrices and δ() is a distance or divergence function.\n\nU^HQU is named in physics the unitary orbit of Q.\n\nIf the argument extremum is passed as \"max\", it returns instead the solution of\n\ntextrmargmax_Uδ(PU^HQU).\n\nP and Q must be flagged as Hermitian. See typecasting matrices.\n\nAs it has been shown in Bhatia and Congedo (2019)🎓, using each of the Fisher, logdet zero, Wasserstein and the Kullback-Leibler divergence (see logdet α), the best approximant to P from the unitary orbit of Q commutes with P and, surprisingly, has the same closed-form expression, namely\n\nU_Q^U_P^H for the argmin and U_Q^U_P^H for the argmax,\n\nwhere U^ denotes the eigenvector matrix of the subscript argument with eigenvectors in columns sorted by decreasing order of corresponding eigenvalues and U^ denotes the eigenvector matrix of the subscript argument with eigenvectors in columns sorted by increasing order of corresponding eigenvalues.\n\nThe same solutions are known since a long time also by solving the extremal problem here above using the Euclidean metric (Umeyama, 1988).\n\nThe generalized Procrustes problem\n\ntextrmargmin_Usum_i=1^kδ(P_iU^HQ_iU)\n\ncan be solved using Julia package Manopt.\n\nExamples\n\nusing PosDefManifold\nP=randP(3)\nQ=randP(3)\n# argmin problem\nU=procrustes(P, Q)\n# argmax problem\nV=procrustes(P, Q, \"max\")\n\n\n\n\n\n","category":"function"},{"location":"test/#test.jl-1","page":"test.jl","title":"test.jl","text":"","category":"section"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"Most functions in PosDefManifold are tested, both for real and complex data input. This unit declares the function testall() that performs all tests.","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"Some functions are fully tested, the others are just executed. Unce you ran it, for each method of each function, a ⭐ sign is printed if the test is succesful, while a ⛔ sign is printed if the test is not succesful. A ☆ sign is printed if the function has been executed correctly.","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"Tests on functions for which a multi-threated version exist are indicated by symbol ( ⏩ ).","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"If there are fails, the concerned functions will be listed as warnings.","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"Note that the first time you execute the test it will take some time as the code will be compiled.","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"This here below is the output of the testall() function (v0.1.3) run on the 20th of May 2019:","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"⭐ PosDefManifold testing utility⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"Starting tests...","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"Unit 'linearAlgebra.jl'","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"typeofMatrix: ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"dim: ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"det1: ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"function tr1: ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"normalizeCol!: ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"ispos: ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"colProd: ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"colNorm: ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"sumOfSqr: ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"sumOfSqrDiag: ⭐ ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"sumOfSqrTril: ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"tr: ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"quadraticForm: ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"fidelity: ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"fDiag: ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"DiagOfProd: ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"mgs: ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"fVec: ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"evd: ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"spectralFunctions: ☆ ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"pow: ⭐ ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"invsqrt: ⭐ ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"sqr: ⭐ ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"powerIterations: ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"choL: ⭐ ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"Unit 'signalProcessing.jl'","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"randλ: ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"randΛ: ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"randU: ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"randP: ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"regularize!: ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"gram: ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"trade: ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"Unit 'riemannianGeometry.jl'","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"geodesic: ☆ ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"distanceSqr (I): ☆ ☆ ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"distanceSqr (II): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"distanceSqr (III): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"distance (I): ☆ ☆ ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"distance (II): ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"distanceSqrMat (I): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"distanceSqrMat (I ⏩ ): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"distanceSqrMat (II): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"distanceSqrMat (II ⏩ ): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"distanceMat (I): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"distanceMat (I ⏩ ): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"distanceMat (II): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"distanceMat (II ⏩ ): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"laplacian: ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"laplacianEigenMaps: ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"spectralEmbedding: ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"mean (I): ☆ ☆ ☆ ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"mean (II): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"mean (⏩ ): ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"means: ☆ ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"means (⏩ ): ☆ ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"generalizedMean: ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"generalizedMean(⏩ ): ☆ ☆ ☆ ☆ ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"geometricMean: ☆ ☆ ☆ ☆ ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"geometricMean(⏩ ): ☆ ☆ ☆ ☆ ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"logdet0Mean: ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"logdet0Mean(⏩ ): ☆ ☆ ☆ ☆ ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"wasMean: ☆ ☆ ☆ ☆ ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"wasMean(⏩ ): ☆ ☆ ☆ ☆ ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"powerMean: ☆ ☆ ☆ ☆ ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"powerMean(⏩ ): ☆ ☆ ☆ ☆ ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"logMap: ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"expMap: ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"vecP: ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"matP: ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"procrustes: ☆ ☆","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"Unit 'classification.jl'","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"softmax: ⭐","category":"page"},{"location":"test/#","page":"test.jl","title":"test.jl","text":"[ Info: All tests were succesful!","category":"page"},{"location":"MainModule/#MainModule-(PosDefManifold.jl)-1","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"","category":"section"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"This is the main unit containing the PosDefManifold module.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"It uses the following standard Julia packages:","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"using\nLinearAlgebra\nStatistics","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"Examples in some units of PosDefManifold also uses the Plots package. Take a look at this tutorial for an introduction to data plotting with Julia.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"The main module does not contains functions, but it declares all constant, types and aliases of Julia functions used in all units.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"Contents\nconstants\naliases\ntypes\ntips & tricks","category":"page"},{"location":"MainModule/#constants-1","page":"MainModule (PosDefManifold.jl)","title":"constants","text":"","category":"section"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"constant value numeric value\nsqrt2 √2 1.4142135623730951\nsqrt2inv 1/√2 0.7071067811865475\ngolden (√5+1)/2 1.618033988749...\ngoldeninv (√5-1)/2 0.618033988749...\nmaxpos 1e15 100000000000000","category":"page"},{"location":"MainModule/#aliases-1","page":"MainModule (PosDefManifold.jl)","title":"aliases","text":"","category":"section"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"alias Julia function in Package tab-completition REPL support\n𝚺 sum Base \\bfSigma ⛔\n𝛍 mean Statistics \\bfmu ⛔\n𝕄 Matrix Base \\bbM ⛔\n𝔻 Diagonal LinearAlgebra \\bbD ⛔\nℍ Hermitian LinearAlgebra \\bbH ✓\n𝕃 LowerTriangular LinearAlgebra \\bbH ⛔","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"All packages above are built-in julia packages.","category":"page"},{"location":"MainModule/#types-1","page":"MainModule (PosDefManifold.jl)","title":"types","text":"","category":"section"},{"location":"MainModule/#Metric::Enumerated-type-1","page":"MainModule (PosDefManifold.jl)","title":"Metric::Enumerated type","text":"","category":"section"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"@enum Metric begin\r\n Euclidean =1\r\n invEuclidean =2\r\n ChoEuclidean =3\r\n logEuclidean =4\r\n LogCholesky =5\r\n Fisher =6\r\n logdet0 =7\r\n Jeffrey =8\r\n VonNeumann =9\r\n Wasserstein =10\r\nend","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"Riemannian manipulations are defined for a given metric (see metrics). An instance for this type is requested as an argument in many functions contained in the riemannianGeometry.jl unit in order to specify the metric.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":" ## Example\r\n # generate a 15x15 symmetric positive definite matrix\r\n P=randP(15)\r\n # distance from P to the identity matrix according to the logdet0 metric\r\n d=distance(logdet0, P)","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"If you want to work consistently with a specific metric, you may want to declare in your script a global variable such as","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"global metric=logdet0 or global metric=Metric(Int(logdet0)),","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"and then pass metric as argument in all your computations, e.g., referring to the above example,","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"d=distance(metric, P).","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"To know what is the current metric, you can get it as a string using:","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"s=string(metric)","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"To see the list of metrics in type Metric use:","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"instances(Metric)","category":"page"},{"location":"MainModule/#Array-of-Matrices-types-1","page":"MainModule (PosDefManifold.jl)","title":"Array of Matrices types","text":"","category":"section"},{"location":"MainModule/#𝕄Vector-type-1","page":"MainModule (PosDefManifold.jl)","title":"𝕄Vector type","text":"","category":"section"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"𝕄Vector=Vector{𝕄}","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"This is a vector of general Matrix matrices, alias of MatrixVector. Julia sees it as: Array{Array{T,2} where T,1}. See aliases for the 𝕄 symbol and typecasting matrices for the use of matrices in PosDefManifold.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"warning: Nota bene\nThis object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"See dim, typeofMatrix","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"𝕄Vector₂ type","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"𝕄Vector₂=Vector{𝕄Vector}","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"This is a vector of 𝕄Vector type objects, i.e., a vector of vectors of matrices. It is the alias of MatrixVector₂. Julia sees it as: Array{Array{Array{T,2} where T,1},1}.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"warning: Nota bene\nThis object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension. However the several 𝕄Vector objects it holds do not need to have the same length.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"See dim, typeofMatrix","category":"page"},{"location":"MainModule/#𝔻Vector-type-1","page":"MainModule (PosDefManifold.jl)","title":"𝔻Vector type","text":"","category":"section"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"𝔻Vector=Vector{𝔻}","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"This is a vector of Diagonal matrices, alias of DiagonalVector. Julia sees it as: Array{Diagonal,1}. See aliases for the 𝔻 symbol and typecasting matrices for the use of Diagonal matrices in PosDefManifold.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"warning: Nota bene\nThis object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"See dim, typeofMatrix","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"𝔻Vector₂ type","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"𝔻Vector₂=Vector{𝔻Vector}","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"This is a vector of 𝔻Vector type objects, i.e., a vector of vectors of Diagonal matrices. It is the alias of DiagonalVector₂. Julia sees it as: Array{Array{Diagonal,1},1}.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"warning: Nota bene\nThis object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension. However the several 𝔻Vector objects it holds do not need to have the same length.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"See dim, typeofMatrix","category":"page"},{"location":"MainModule/#𝕃Vector-type-1","page":"MainModule (PosDefManifold.jl)","title":"𝕃Vector type","text":"","category":"section"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"𝕃Vector=Vector{𝕃}","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"This is a vector of LowerTriangular matrices, alias of LowerTriangularVector. Julia sees it as: Array{LowerTriangular,1}. See aliases for the 𝕃 symbol and typecasting matrices for the use of LowerTriangular matrices in PosDefManifold.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"warning: Nota bene\nThis object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"See dim, typeofMatrix","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"𝕃Vector₂ type","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"𝕃Vector₂=Vector{𝕃Vector}","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"This is a vector of 𝕃Vector type objects, i.e., a vector of vectors of LowerTriangular matrices. It is the alias of LowerTriangularVector₂. Julia sees it as: Array{Array{LowerTriangular,1},1}.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"warning: Nota bene\nThis object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension. However the several 𝕃Vector objects it holds do not need to have the same length.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"See dim, typeofMatrix","category":"page"},{"location":"MainModule/#ℍVector-type-1","page":"MainModule (PosDefManifold.jl)","title":"ℍVector type","text":"","category":"section"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"ℍVector=Vector{ℍ}","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"This is a vector of Hermitian matrices, alias of HermitianVector. Julia sees is at: Array{Hermitian,1}.See aliases for the ℍ symbol and typecasting matrices for the use of Hermitian matrices in PosDefManifold.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"warning: Nota bene\nThis object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"See dim, typeofMatrix","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"ℍVector₂ type","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"`ℍVector₂=Vector{ℍVector}`","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"This is a vector of ℍVector type objects, i.e., a vector of vectors of Hermitian matrices. It is the alias of HermitianVector₂. Julia sees it as: Array{Array{Hermitian,1},1}.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"warning: Nota bene\nThis object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension. However the several ℍVector objects it holds do not need to have the same length.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"See dim, typeofMatrix","category":"page"},{"location":"MainModule/#RealOrComplex-type-1","page":"MainModule (PosDefManifold.jl)","title":"RealOrComplex type","text":"","category":"section"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"RealOrComplex=Union{Real, Complex}","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"This is the Union of Real and Complex types.","category":"page"},{"location":"MainModule/#AnyMatrix-type-1","page":"MainModule (PosDefManifold.jl)","title":"AnyMatrix type","text":"","category":"section"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"AnyMatrix=Union{𝔻{T}, 𝕃{T}, ℍ{T}, 𝕄{T}} where T<:RealOrComplex","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"This is the Union of real or complex Diagonal, LowerTriangular, Hermitian and Matrix types. It is often used in the definition of functions.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"See aliases","category":"page"},{"location":"MainModule/#AnyMatrixVector-type-1","page":"MainModule (PosDefManifold.jl)","title":"AnyMatrixVector type","text":"","category":"section"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"AnyMatrixVector=Union{𝕄Vector, 𝔻Vector, 𝕃Vector, ℍVector}","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"This is the Union of 𝕄Vector, 𝔻Vector, 𝕃Vector and ℍVector. It is often used in the definition of functions. See Array of Matrices types.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"AnyMatrixVector₂ type","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"AnyMatrixVector₂=Union{𝕄Vector₂, 𝔻Vector₂, 𝕃Vector₂, ℍVector₂}","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"This is the Union of 𝕄Vector₂, 𝔻Vector₂, 𝕃Vector₂, ℍVector₂. It is often used in the definition of functions. See Array of Matrices types.","category":"page"},{"location":"MainModule/#tips-and-tricks-1","page":"MainModule (PosDefManifold.jl)","title":"tips & tricks","text":"","category":"section"},{"location":"MainModule/#typecasting-matrices-1","page":"MainModule (PosDefManifold.jl)","title":"typecasting matrices","text":"","category":"section"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"Several functions in PosDefManifold implement multiple dispatch and can handle several kinds of matrices as input, however the core functions for manipulating objects on the Riemannian manifold of positive definite matrices act by definition on positive definite matrices only. Those matrices must therefore be either symmetric positive definite (SPD, real) or Hermitian positive definite (HPD, complex). Such matrices are uniformly identified in PosDefManifold as being of the Hermitian type, using the standard LinearAlgebra package. The alias ℍ is used consistently in the code (see aliases). If the input is not flagged as Hermitian, the functions restricting the input to positive definite matrices will not be accessible.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"Example","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"julia> using LinearAlgebra\r\n\njulia> f(S::Hermitian)=S*S'\r\nf (generic function with 1 method)\r\n\njulia> A=randn(3, 3)\r\n3×3 Array{Float64,2}:\r\n -0.67407 -0.344258 0.203714\r\n -1.06551 -0.0233796 0.975465\r\n -1.04727 -1.19807 -0.0219121\r\n\njulia> H=A*A' # although SPD, H is not automatically flagged as Hermitian\r\n3×3 Array{Float64,2}:\r\n 0.614384 0.924991 1.11391\r\n 0.924991 2.08738 1.12251\r\n 1.11391 1.12251 2.53263\r\n\njulia> f(H)\r\nERROR: MethodError: no method matching f(::Array{Float64,2})\r\nClosest candidates are:\r\n f(::Hermitian) at none:1","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"If you construct a positive definite matrix and it is not flagged, you can do so simply by typecasting it, that is, passing as argument to the functions Hermitian(P) instead of just P. The ℍ alias can be used for short, i.e., ℍ(P). Continuing the example above:","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"julia> f(ℍ(H)) # this way it works, equivalent to f(Hermitian(H))\r\n3×3 Array{Float64,2}:\r\n 2.47388 3.74948 4.54381\r\n 3.74948 6.4728 6.21635\r\n 4.54381 6.21635 8.91504","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"Be careful: Hermitian(P) will construct and Hermitian matrix from the argument. If the matrix argument is not symmetric (if real) or Hermitian (if complex) it will be made so by copying the transpose (if real) or complex conjugate and transpose (if complex) of a triangular part into the other. See Hermitian.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"If you want to construct an ℍVector type from, say, two Hermitian matrices P and Q, don't write A=[P, Q], but rather A=ℍVector([P, Q]). In fact, the first is seen by Julia as","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"2-element Array{Hermitian{Float64,Array{Float64,2}},1},","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"while the latter as","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"2-element Array{Hermitian,1},","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"which is the type expected in all functions taking an ℍVector type as argument.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"Other functions act on generic matrices (of type Matrix). This is seen by Julia as Array{T,2} where T. Keep in mind that the functions writing on the argument matrix such as normalizeCol! will give an error if you pass an Hermitian matrix, since Julia does not allow writing on non-diagonal elements of those matrices. In this case typecast it in another object using the Matrix type; suppose H is Hermitian, you would use for example:","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"julia> X=Matrix(H)\r\njulia> normalizeCol!(X, 1)\r\njulia> norm(X[:, 1])\r\n1.0","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"Some more examples:","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"Typecasting Adjoint matrices:","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"```Matrix(X')```","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"here is how to get an Hermitian matrix out of the diagonal part of an Hermitian matrix H:","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"```Hermitian(Matrix(Diagonal(H)))```","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"here is how to get a LowerTriangular matrix out of an Hermitian matrix H:","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"```LowerTriangular(Matrix(H))```","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"For example, you can use this to pass a full inter-distance matrix to the laplacian function to obtain the Laplacian matrix.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"A useful function is typeofMatrix. For example, the following line typecasts matrix M to the type of matrix P and put the result in A:","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"A=typeofMatrix(P)(M)","category":"page"},{"location":"MainModule/#Threads-1","page":"MainModule (PosDefManifold.jl)","title":"Threads","text":"","category":"section"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"Some functions in PosDefManifold explicitly call BLAS routines for optimal performnce. This is reported in the help section of the concerned functions. Most functions calls BLAS routine implicitly via Julia. You can set the number of threads the BLAS library should use by:","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"using LinearAlgebra\r\nBLAS.set_num_threads(n)","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"where n is the number of threads. By default, PosDefManifold reserves to BLAS all CPU threads available on your computer (given by the output of Sys.CPU_THREADS). The number of threads used by Julia for multi-threaded computations is given by the output of function Threads.nthreads(). In Windows this latter number of threads is set to half the available threads. In Linux and OSX defaults to one and is controlled by an environment variable, i.e.,","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"export JULIA_NUM_THREADS=4.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"In Linux, working with the Atom IDE, you also have to set to global the field found in Atom under Settings(or Preferences)/julia-client/Settings/Julia Options/Number of Threads.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"In Windows, set the desired number of threads in the settings of the julia-client Juno package.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"See for example this post, this post and julia doc on threads.","category":"page"},{"location":"MainModule/#","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"Notice that PosDefManifold features many multi-threaded functions and these may allow a gain in computation time only if Julia is instructed to use at least two threads.","category":"page"},{"location":"#PosDefManifold-Documentation-1","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"","category":"section"},{"location":"#Requirements-1","page":"PosDefManifold Documentation","title":"Requirements","text":"","category":"section"},{"location":"#","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"Julia version ≥ 1.3","category":"page"},{"location":"#Installation-1","page":"PosDefManifold Documentation","title":"Installation","text":"","category":"section"},{"location":"#","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"Execute the following command in Julia's REPL:","category":"page"},{"location":"#","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"]add PosDefManifold","category":"page"},{"location":"#","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"To obtain the latest development version execute instead","category":"page"},{"location":"#","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"]add PosDefManifold#master","category":"page"},{"location":"#About-the-Author-1","page":"PosDefManifold Documentation","title":"About the Author","text":"","category":"section"},{"location":"#","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"Marco Congedo is a research scientist of CNRS (Centre National de la Recherche Scientifique), working in Grenoble, France.","category":"page"},{"location":"#Overview-1","page":"PosDefManifold Documentation","title":"Overview","text":"","category":"section"},{"location":"#","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"(Image: Figure 1)","category":"page"},{"location":"#","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"Riemannian geometry studies smooth manifolds, multi-dimensional curved spaces with peculiar geometries endowed with non-Euclidean metrics. In these spaces Riemannian geometry allows the definition of angles, geodesics (shortest path between two points), distances between points, centers of mass of several points, etc.","category":"page"},{"location":"#","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"In this package we are concerned with the manifold P of positive definite matrices, either symmetric positive definite or Hermitian positive definite.","category":"page"},{"location":"#","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"In several fields of research such as computer vision and brain-computer interface, treating data in the P manifold has allowed the introduction of machine learning approaches with remarkable characteristics, such as simplicity of use, excellent classification accuracy, as demonstrated by the winning score obtained in six international data classification competitions, and the ability to operate transfer learning (Congedo et al., 2017)🎓).","category":"page"},{"location":"#","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"For a formal introduction to the P manifold the reader is referred to the monography written by Bhatia (2007)🎓.","category":"page"},{"location":"#","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"For an introduction to Riemannian geometry and an overview of mathematical tools implemented in this package, see Intro to Riemannian Geometry in this documentation.","category":"page"},{"location":"#","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"For starting using this package, browse the code units listed here below and execute the many code examples you will find therein. The core functions are contained in unit riemannianGeometry.jl.","category":"page"},{"location":"#Code-units-1","page":"PosDefManifold Documentation","title":"Code units","text":"","category":"section"},{"location":"#","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"PosDefManifold includes six code units (.jl files):","category":"page"},{"location":"#","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"Unit Description\nMainModule (PosDefManifold.jl) Main module, constants, types, aliases, tips & tricks\nriemannianGeometry.jl The fundamental unit collecting all functions acting on the P manifold\nlinearAlgebra.jl Collection of linear algebra routines\nstatistics.jl Collection of statistics routines\nsignalProcessing.jl Collection of signal processing routines\ntest.jl Unit performing all tests","category":"page"},{"location":"#Contents-1","page":"PosDefManifold Documentation","title":"Contents","text":"","category":"section"},{"location":"#","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"Pages = [ \"index.md\",\r\n \"introToRiemannianGeometry.md\",\r\n \"MainModule.md\",\r\n \"riemannianGeometry.md\",\r\n \"linearAlgebra.md\",\r\n \"statistics.md\",\r\n \"signalProcessing.md\",\r\n \"test.md\"]\r\nDepth = 1","category":"page"},{"location":"#Index-1","page":"PosDefManifold Documentation","title":"Index","text":"","category":"section"},{"location":"#","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"","category":"page"},{"location":"statistics/#statistics.jl-1","page":"statistics.jl","title":"statistics.jl","text":"","category":"section"},{"location":"statistics/#","page":"statistics.jl","title":"statistics.jl","text":"Unit for statistics, probability and related functions.","category":"page"},{"location":"statistics/#","page":"statistics.jl","title":"statistics.jl","text":"Category Output\n1. Probability functions relating to probability\n2. Descriptive Statistics functions relating to decriptive statistics","category":"page"},{"location":"statistics/#Probability-1","page":"statistics.jl","title":"Probability","text":"","category":"section"},{"location":"statistics/#","page":"statistics.jl","title":"statistics.jl","text":"Function Description\nsoftmax compute softmax probabilities","category":"page"},{"location":"statistics/#","page":"statistics.jl","title":"statistics.jl","text":"softmax","category":"page"},{"location":"statistics/#PosDefManifold.softmax","page":"statistics.jl","title":"PosDefManifold.softmax","text":"softmax(χ::Vector{T}) where T<:Real\n\nGiven a real vector of k non-negative scores χ=c_1c_k, return the vector π=p_1p_k of their softmax probabilities, as per\n\np_i=fractextrme^c_isum_i=1^ktextrme^c_i.\n\nExamples\n\nχ=[1.0, 2.3, 0.4, 5.0]\nπ=softmax(χ)\n\n\n\n\n\n","category":"function"},{"location":"statistics/#Descriptive-Statistics-1","page":"statistics.jl","title":"Descriptive Statistics","text":"","category":"section"},{"location":"statistics/#","page":"statistics.jl","title":"statistics.jl","text":"Function Description\nmean scalar mean of real or complex numbers according to the specified metric\nstd scalar standard deviation of real or complex numbers according to the specified metric","category":"page"},{"location":"statistics/#","page":"statistics.jl","title":"statistics.jl","text":"mean(metric::Metric, ν::Vector{T}) where T<:RealOrComplex","category":"page"},{"location":"statistics/#","page":"statistics.jl","title":"statistics.jl","text":"See bottom of documentation of general function mean","category":"page"},{"location":"statistics/#","page":"statistics.jl","title":"statistics.jl","text":"std","category":"page"},{"location":"statistics/#Statistics.std","page":"statistics.jl","title":"Statistics.std","text":"std(metric::Metric, ν::Vector{T};\n corrected::Bool=true,\n mean=nothing) where T<:RealOrComplex\n\nStandard deviation of k real or complex scalars, using the specified metric of type Metric::Enumerated type and the specified mean if provided.\n\nOnly the Euclidean and Fisher metric are supported by this function. Using the Euclidean metric return the output of standard Julia std function. Using the Fisher metric return the scalar geometric standard deviation, which is defined such as,\n\nsigma=textexpBig(sqrtk^-1sum_i=1^ktextln^2(v_imu)Big).\n\nIf corrected is true, then the sum is scaled with k-1, whereas if it is false the sum is scaled with k.\n\nExamples\n\nusing PosDefManifold\n# Generate 10 random numbers distributed as a chi-square with 2 df.\nν=[randχ²(2) for i=1:10]\narithmetic_sd=std(Euclidean, ν) # mean not provided\ngeometric_mean=mean(Fisher, ν)\ngeometric_sd=std(Fisher, ν, mean=geometric_mean) # mean provided\n\n\n\n\n\n","category":"function"},{"location":"introToRiemannianGeometry/#Intro-to-Riemannian-Geometry-1","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"","category":"section"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The study of appropriate distance measures for positive definite matrices has recently grown very fast, driven by practical problems in radar data processing, image processing, computer vision, shape analysis, medical imaging (especially diffusion MRI and Brain-Computer Interface), sensor networks, elasticity, mechanics, numerical analysis and machine learning (e.g., see references in Congedo et al., 2017a)🎓.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"In many applications the observed data can be conveniently summarized by positive definite matrices, which are either symmetric positive definite (SPD: real) or Hermitian Positive Definite (HPD: complex). For example, those may be some form of the data covariance matrix in the time, frequency or time-frequency domain, or autocorrelation matrices, kernels, slices of tensors, density matrices, elements of a search space, etc. Positive definite matrices are naturally treated as points on a smooth Riemannian manifold allowing useful operations such as interpolation, smoothing, filtering, approximation, averaging, signal detection and classification. Such operations are the object of the present PosDefManifold library.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"More formally, this Julia library treats operations on the metric space (P δ^2) of n・n positive definite matrices endowed with a distance or symmetric divergence δ(P x P)0 . Several matrix distances or matrix divergences δ are considered. Using some of them, the most important one being the Fisher metric, we define a Riemannian manifold. In mathematics, this is the subject of Riemannian geometry and information geometry.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Note that throughout this library the word 'metric' is used loosely for referring to the actual Riemannian metric on the tangent space and to the resulting distance or to general symmetric divergence acting on P, regardless the fact that we are dealing with a metric in the strict sense and that it induces or not a Riemannian geometry in P. This is done for convenience of exposition, since in practice those 'metrics' in PosDefManifold may be used interchangeably.","category":"page"},{"location":"introToRiemannianGeometry/#Riemannian-manifolds-1","page":"Intro to Riemannian Geometry","title":"Riemannian manifolds","text":"","category":"section"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Here are some important definitions:","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"A smooth manifold in differential geometry is a topological space that is locally similar to the Euclidean space and has a globally defined differential structure.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The tangent space at point G is the vector space containing the tangent vectors to all curves on the manifold passing through G (Fig. 1).","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"A smooth Riemannian manifold is equipped with an inner product on the tangent space (a Riemannian metric) defined at each point and varying smoothly from point to point. For manifold P the tangent space is the space of symmetric or Hermitian matrices.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Thus, a Riemannian metric turns the metric space (P δ^2) into a Riemannian manifold. This is the case, for example, of the Fisher metric, which has a fundamental role in the manifolds of positive definite matrices and of the Wasserstein metric, fundamental in optimal transport theory.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"(Image: Figure 1) Figure 1. Schematic illustration of the Riemannian manifold of positive definite matrices. Left: geodesic relying points P and Q passing through its-mid-point (mean) G (green curve), tangent space at point G with tangent vectors to geodesic from G to P and from G to Q (blue arrowed lines) and distance δ(G Q). Right: the center of mass (also named mean) G of points P_1P_4 defined as the point minimizing the sum of the four squared distances δ²(G P_i), for i=14.","category":"page"},{"location":"introToRiemannianGeometry/#geodesic-1","page":"Intro to Riemannian Geometry","title":"geodesic","text":"","category":"section"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The key object in the P manifold is the geodesic, loosely defined as the shortest path joining two points P and Q on the manifold, analogous to straight lines in the Euclidean space (Fig. 1). The gedesic equation with arclength 0a1 is the equation of the points along the path, denoted gamma(P Q a) where with a=0 we stay at P and with a=1 we move all the way to Q. The points along the geodesic in between P and Q (0a1) can be understood as weighted means of P and Q. For example, the geodesic equation according to the Euclidean metric is (1-a)P + aQ, which is the traditional way to define weighted means. With the metrics we consider here, geodesics are unique and always exist. Furthermore, as we will see, using the Fisher metric those geodesics extends indefinitely, i.e., they are definied and always remain positive definite for -a.","category":"page"},{"location":"introToRiemannianGeometry/#distance-1","page":"Intro to Riemannian Geometry","title":"distance","text":"","category":"section"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The length of the geodesic (at constant velocity) between two points gives the distance δ(P Q). The distance is always real, non-negative and equal to zero if and only if P=Q.","category":"page"},{"location":"introToRiemannianGeometry/#distance-from-the-origin-1","page":"Intro to Riemannian Geometry","title":"distance from the origin","text":"","category":"section"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"In contrast to an Euclidean space, the origin of the P manifold endowed with the Fisher metric is not 0_n, but I_n, the identity matrix of dimension n・n. The distance between a point P and the origin, i.e., δ(P I), is analogous therein to the length of vectors in Euclidean space. This Riemannian manifold is symmetric around I_n, i.e., δ(P I)=δ(P^-1 I) and δ(P Q)=δ(P^-1 Q^-1). This will be made more precise when we talk about invariances.","category":"page"},{"location":"introToRiemannianGeometry/#mean-1","page":"Intro to Riemannian Geometry","title":"mean","text":"","category":"section"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The mid-point on the geodesic relying P and Q is named the mean. Using the Euclidean metric this is the arithmetic mean of P and Q and using the inverse Euclidean metric this is their harmonic mean. As we will see, those are straightforward extensions of their scalar counterparts. Using the Fisher metric the mid-point of the geodesic relying P and Q allows the proper generalization to matrices of the scalars' geometric mean. The other metrics allows other definition of means (see below).","category":"page"},{"location":"introToRiemannianGeometry/#Fréchet-mean-1","page":"Intro to Riemannian Geometry","title":"Fréchet mean","text":"","category":"section"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Using Fréchet's variational approach we can extend to positive-definite matrices the concept of weighted mean of a set of scalars; as the midpoint G on the geodesic relying P and Q is the minimizer of sigma^2(P G)+sigma^2(Q G), so the mean G of points P_1 P_2P_k is the matrix G verifying","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"textrmargmin_Gsum_i=1^kδ^2(P_iG)","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Thus, every metric induces a distance (or divergence) function, which, in turn, induces a mean.","category":"page"},{"location":"introToRiemannianGeometry/#invariances-1","page":"Intro to Riemannian Geometry","title":"invariances","text":"","category":"section"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"An important characteristic of metrics is that they may induce invariance properties on the distance, which are in turn inherited by the mean.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Let us denote shortly by P_i the set P_1P_k, where i=1k and by GP_i the Fréchet mean of the set (in this section we drop the weights here for keeping the notation short). The most important invariance properties are:","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"invariance effect on distance δ(PQ) effect on mean GP_i\nrotation δ(PQ)=δ(U^HPUU^HQU) GU^HP_iU=U^HGP_iU\naffinity δ(PQ)=δ(B^HPBB^HQB) GB^HP_iB=B^HGP_iB\ninversion δ(PQ)=δ(P^-1Q^-1) GP_i^-1=G^-1P_i","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"for any unitary U and non-singular B.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The affine invariance implies the rotation invariance and is also named congruence invariance.","category":"page"},{"location":"introToRiemannianGeometry/#metrics-1","page":"Intro to Riemannian Geometry","title":"metrics","text":"","category":"section"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"We are interested in distance or divergence functions, the difference between the two being that a divergence does not need to be symmetric nor to satisfy the triangle inequality. Note that in PosDefManifold we consider only distances and symmetric divergences. In fact those are of greater interest in practice. One can find several distances and divergences in the literature and they often turn out to be related to each other, see for example (Chebby and Moakher, 2012; Cichocki et al., 2015; Sra, 2016)🎓. Ten of them are implemented in PosDefManifold and two of them are Riemannian metrics (the Fisher and Wasserstein metric as we have said). In this section we give a complete list of the expressions for their induced","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"distance of a point P from the origin,\ndistance between two points P and Q,\ngeodesic relying P to Q (hence the weighted means of P and Q)\nweighted Fréchet mean G(Pw) of a set of k2 points P_1P_k with associated real non-negative weights w_1w_k summing up to 1.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"note: Nota Bene\nIn the following, the weights w_1w_k are always supposed summing up to 1, superscript H indicate conjugate transpose (or just transpose if the matrix is real) and if a is the arclength of a geodesic, we define for convenience b=1-a.","category":"page"},{"location":"introToRiemannianGeometry/#Euclidean-1","page":"Intro to Riemannian Geometry","title":"Euclidean","text":"","category":"section"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"This is the classical Euclidean distance leading to the usual arithmetic mean. In general this metric is not well adapted to the P manifold. It verifies only the rotation invariance, however the mean also verifies the congruence invariance.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"distance² to I distance²\nP-I^2 P-Q^2","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"geodesic Fréchet mean\nbP + aQ sum_i=1^kw_i P_i","category":"page"},{"location":"introToRiemannianGeometry/#inverse-Euclidean-1","page":"Intro to Riemannian Geometry","title":"inverse Euclidean","text":"","category":"section"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"This is the classical harmonic distance leading to the harmonic mean. It verifies only the rotation invariance, however the mean also verifies the congruence invariance.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"distance² to I distance²\nP^-1-I^2 P^-1-Q^-1^2","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"geodesic Fréchet mean\nbig(bP^-1 + aQ^-1big)^-1 big(sum_i=1^kw_i P_i^-1big)^-1","category":"page"},{"location":"introToRiemannianGeometry/#Cholesky-Euclidean-1","page":"Intro to Riemannian Geometry","title":"Cholesky Euclidean","text":"","category":"section"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"This is a very simple metric that has been tried to improve the Euclidean one. It is rarely used (see for example Dai et al., 2016)🎓. It does not verify any invariance. Let L_P be the lower triangular Cholesky factor of P, then","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"distance² to I distance²\nL_P-I^2 L_P-L_Q ^2","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"geodesic Fréchet mean\n(bL_P+aL_Q)(bL_P+aL_Q)^H big(sum_i=1^kw_i L_P_ibig)big(sum_i=1^kw_i L_P_ibig)^H","category":"page"},{"location":"introToRiemannianGeometry/#log-Euclidean-1","page":"Intro to Riemannian Geometry","title":"log Euclidean","text":"","category":"section"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"If matrices P_1P_k all pair-wise commute, then this metric coincides with the Fisher metric. See (Arsigny et al., 2007 ; Bhatia et al., 2019a)🎓. It enjoys the rotation and inversion invariance. The log-Euclidean distance to I is the same as per the Fisher metric. This mean has the same determinant as the Fisher mean, and trace equal or superior to the trace of the Fisher mean. A minimum trace log Euclidean mean approximating well the Fisher mean has been proposed in Congedo et al. (2015)🎓.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"distance² to I distance²\ntextrmlog(P)^2 textrmlog(P)-textrmlog(Q)^2","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"geodesic Fréchet mean\ntextrmexpbig(textrmlogP + atextrmlogQbig) textrmexpbig(sum_i=1^kw_ihspace1pttextrmlogP_ibig)","category":"page"},{"location":"introToRiemannianGeometry/#log-Cholesky-1","page":"Intro to Riemannian Geometry","title":"log Cholesky","text":"","category":"section"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"It is a recently proposed distance in P. Like the Cholesky Euclidean metric here above, it exploits the diffeomorphism between matrices in P and their Cholesky factor, such that L_PL_P^H=P, thanks to the fact that the Cholesky factor is unique and that the map is smooth (Lin, 2019)🎓. The mean has the same determinant as the Fisher and log-Euclidean mean.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Let L_X,S_X and D_X be the lower triangle, the strictly lower triangle and the diagonal part of X, respectively (hence, S_X+D_X=L_X), then","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Distance² to I Distance²\nS_P-I^2+textrmlogD_P^2 S_P-S_Q^2+textrmlogD_P-textrmlogD_Q^2","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"geodesic: S_P+a(S_Q-S_P)+D_Phspace2pttextrmexpbig(atextrmlogD_Q-atextrmlogD_Pbig)","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Fréchet mean: TT^H, where T=sum_i=1^kw_iS_P_i+sum_i=1^kw_itextrmlogD_P_i","category":"page"},{"location":"introToRiemannianGeometry/#Fisher-1","page":"Intro to Riemannian Geometry","title":"Fisher","text":"","category":"section"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The Fisher metric, also known as affine-invariant, natural and Fisher-Rao metric, among others names, has a paramount importance for the P manifold, standing out as the natural choice both from the perspective of differential geometry and information geometry. Endowed with the Fisher metric the manifold P is Riemannian, has nonpositive curvature and is symmetric. This metric verifies all three invariances we have considered.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Distance² to I Distance²\ntextrmlog(P)^2 textrmlog(P^-12QP^-12)^2","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"geodesic\nP^12 big(P^-12 Q P^-12big)^a P^12","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Fréchet mean: it does not have a closed-form solution in general. The solution is the unique positive definite matrix G satisfying (Bhatia and Holbrook, 2006; Moakher, 2005).🎓","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"sum_i=1^kw_itextrmlogbig(G^-12 P_i G^-12big)=0","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"For estimating it, PosDefManifold implements the well-known gradient descent algorithm, resulting in iterations:","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"G G^12textrmexpbig(sum_i=1^kw_itextrmlog(G^-12 P_i G^-12)big)G^12","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Alternatively, and more efficiently, one can ask for an approximate solution invoking the MPM algorithm (Congedo et al., 2017b)🎓, which is also implemented (in order to estimate the geometric mean use function powerMean with parameter p=0 or with a very small value of p).","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"This mean is known under many different names (Fisher, Rao, Fisher-Rao, Pusz-Woronowicz, Cartan, Fréchet, Karcher, geometric....). The ‘centrality’ of this mean among a wide family of divergence-based means can be appreciated in Fig. 4 of Cichocki et al. (2015)🎓.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The geometric mean G of two matrices P and Q is denoted gamma(P Q frac12). Currently it is an object of intense study because of its interesting mathematical properties. For instance,","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"it is the unique solution to Riccati equation GQ^-1G=P\nit is equal to F^-HD_1^12D_2^12F^-1 for whatever joint diagonalizer F of P and Q, i.e., for whatever matrix F satisfying F^HPF=D_1 and F^HQF=D_2, with D_1, D_1 non-singular diagonal matrices (Congedo et al., 2015)🎓.\nit enjoys all 10 properties of means postulated in the seminal work of Ando et al. (2010)🎓.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"When P and Q commutes, the Fisher mean of two matrices reduces to P^12Q^12, which indeed in this case is the log-Euclidean mean frac12textrmlogP + frac12textrmlogQ.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"We denote the Fisher geodesic equation as gamma(P Q a). Note that gamma(I P a)=P^a and gamma(P I a)=P^b, where b=1-a.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Fisher geodesic equation verifies gamma(P Q a)=gamma(Q P b) and (gamma(P Q a))^-1=gamma(P^-1 Q^-1 a).","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"An interesting property of the Fisher metric is that using its geodesic equation we can extrapolate positive matrices, always remaining in P. That is, using any real value of a :","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"with 0 a 1 we move toward Q\t\t(attraction),\nwith a 1 we move over and beyond Q\t(extrapolation) and\nwith a 0 we move back away from Q \t(repulsion).","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Something similar can be done using the log Cholesky metric as well.","category":"page"},{"location":"introToRiemannianGeometry/#power-means-1","page":"Intro to Riemannian Geometry","title":"power means","text":"","category":"section"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The arithmetic, harmonic and geometric mean we have encountered are all members of the 1-parameter family of power means (with parameter p-1 1) introduced by Lim and Palfia (2012)🎓 to generalize the concept of power means of scalars (also known as Hölder means or generalized means). The family of power means G with parameter p satisfies equation","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"G=sum_i=1^kw_igamma(G P p),","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"wheregamma(G P p) is the Fisher geodesic equation we have discussed here above talking about the Fisher metric. In particular:","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"with p=-1 this is the harmonic mean (see the inverse Euclidean metric)\nwith p=+1 this is the arithmetic mean (see the Euclidean metric)\nat the limit of p evaluated at zero from both side this is the geometric mean (see the Fisher metric).","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Thus, the family of power means continuously interpolate between the arithmetic and harmonic mean passing through the the geometric mean.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Power means are the unique positive definite solution of (Yamazaki, 2019)🎓","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"sum_i=1^kw_ibig(G^-12 P_i G^-12big)^p=I.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"All power means enjoy the congruence invariance (hence the rotation invariance), but only the geometric mean enjoy also the inversion invariance.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The power mean with p=frac12 is the solution of the Fréchet mean problem using the following divergence (Bhatia, Gaubert and Jain, 2019)🎓","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"δ^2(PQ)=textrmtr(P+Q)-2textrmtrgamma(G P frac12) = textrmtr(textrmarithm mean(P Q)) textrmtr(textrmgeom mean(P Q))","category":"page"},{"location":"introToRiemannianGeometry/#generalized-means-1","page":"Intro to Riemannian Geometry","title":"generalized means","text":"","category":"section"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"When the matrices in the set all pairwise commute, it has been proved in Lim and Palfia (2012, see Property 1, p. 1502) 🎓 that the power means we have just seen reduce to","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"big(sum_i=1^kw_iP_i^pbig)^1p,","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"which are the straightforward extension of scalar power means (see generalized means) to matrices. As usual, such straightforward extensions work well in commuting algebra, but not in general. See for example the case of the mean obtained using the log Euclidean metric, which is the straightforward extension to matrices of the scalar geometric mean, but is not the matrix geometric mean, unless the matrices all pairwise commute.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Both the generalized means and the power means have a parameter p-1 1. For the latter, the solution is implemented via the fixed-point MPM algorithm (Congedo et al., 2017b)🎓.","category":"page"},{"location":"introToRiemannianGeometry/#modified-Bhattacharyya-mean-1","page":"Intro to Riemannian Geometry","title":"modified Bhattacharyya mean","text":"","category":"section"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"If matrices P_1 P_2P_k all pair-wise commute, the special case p=frac12 yields the following instance of power means (and of generalized means):","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"big(sum_i=1^kw_iP_i^12big)^12.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"This mean has been proposed in a different context by Moakher (2012)🎓 as a modified Bhattacharyya mean, since it is a modification of the Bhattacharyya mean we will encounter next under the name logdet zero. It is worth noting that in commuting algebra Moakher’s mean also corresponds to the mean obtained with the Wasserstein metric.","category":"page"},{"location":"introToRiemannianGeometry/#logdet-zero-1","page":"Intro to Riemannian Geometry","title":"logdet zero","text":"","category":"section"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The logdet zero divergence, also known as the square of the Bhattacharyya divergence (Mohaker, 2013)🎓, Stein divergence (Harandi et al., 2016)🎓, symmetrized Jensen divergence, the S-divergence (Sra, 2016)🎓 or the log determinant α-divergence (with α=0, Chebby and Moakher, 2012 🎓) is a Jensen-Bregman symmetric divergence enjoying all three invariances we have listed.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Its square root has been shown to be a distance (Sra, 2016)🎓. It behaves very similarly to the Fisher metric at short distances (Moakher, 2012; Sra, 2016; Cichocki et al., 2015; Harandi et al., 2016) 🎓 and the mean of two matrices in P is the same as the Fisher mean (Harandi et al., 2016) 🎓. Thus, it has often been used instead of the Fisher metric because it allows more efficient calculations. In fact, the calculation of this distance requires only three Cholesky decompositions, whereas the computation of the Fisher distance involves extracting generalized eigenvalues.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"distance² to I distance²\ntextrmlogdetfrac12(P+I)-frac12textrmlogdet(P) textrmlogdetfrac12(P+Q)-frac12textrmlogdet(PQ)","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"geodesic: we use the Fréchet mean with appropriate weights.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Fréchet mean: the solution is the unique positive definite matrix G satisfying","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"sum_i=1^kw_ibig(frac12P_i+frac12Gbig)^-1=G^-1.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"For estimating it PosDefManifold implements the fixed-point iterations (Moakher, 2012, p315)🎓:","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"G frack2big(sum_i=1^kw_i(P_i+G)^-1big)^-1.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The logdet zero divergence between P and Q can also be written as the log-determinant of their arithmetic mean minus the log-determinant of their geometric mean (Moakher, 2012)🎓, which thus defines a possible extension to matrices of the useful concept of Wiener entropy.","category":"page"},{"location":"introToRiemannianGeometry/#logdet-α-1","page":"Intro to Riemannian Geometry","title":"logdet α","text":"","category":"section"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The log determinant α-divergence family for α-11 (Chebby and Moakher, 2012)🎓 allows","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"the logdet zero mean for α=0,\nthe left Kullback-Leibler mean for α=-1 (which is the harmonic mean)\nthe right Kullback-Leibler mean for α=1 (which is the arithmetic mean).","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"We do not consider the left and right Kullback-Leibler divergences because the related means are trivially the arithmetic and harmonic one (Moakher, 2012). As per the symmetrized Kullback-Leibler divergence, this is known as Jeffrey divergence and will be considered next. The log determinant α-divergence family of means is not implemented in PosDefManifold (besides the special cases α=(-1 0 1), since the family of power means are implemented.","category":"page"},{"location":"introToRiemannianGeometry/#Jeffrey-1","page":"Intro to Riemannian Geometry","title":"Jeffrey","text":"","category":"section"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"This is a Jensen-Bregman symmetric divergence, also known as the symmetrized Kullback-Leibler divergence (see logdet α) (Faraki et al., 2015)🎓. It enjoyes all three invariances we have listed.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"distance² to I distance²\nfrac12textrmtr big(P+P^-1big)-n frac12textrmtr(Q^-1P+P^-1Q)-n","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"geodesic: we use the Fréchet mean with appropriate weights.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Fréchet mean: A^12big(A^-12HA^-12big)^12A^12, where A is the arithmetic mean (see Euclidean metric) and H is the harmonic mean (see inverse Euclidean metric). Thus, the weighted Fréchet mean is the geometric mean (see Fisher metric) of the arithmetic and harmonic mean (Moakher, 2012)🎓.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Note that this is the geometric mean only for k=2, that is, for scalars, but not in general for matrices, the geometric mean is the geometric mean of the arithmetic mean and harmonic mean (the only metric inducing the geometric mean in general is the Fisher mean).","category":"page"},{"location":"introToRiemannianGeometry/#Von-Neumann-1","page":"Intro to Riemannian Geometry","title":"Von Neumann","text":"","category":"section"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The Von Neumann divergence is a Jensen-Bregman symmetric divergence (Sra, 2016; Taghia et al., 2019)🎓. It enjoyes only the rotation invariance.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"distance² to I distance²\nfrac12textrmtr(PtextrmlogP-textrmlogP) frac12textrmtrbig(P(textrmlogP-textrmlogQ)+Q(textrmlogQ-textrmlogP)big)","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The geodesic and weighted Fréchet mean for this metric are not available.","category":"page"},{"location":"introToRiemannianGeometry/#Wasserstein-1","page":"Intro to Riemannian Geometry","title":"Wasserstein","text":"","category":"section"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"This is an extension to matrices of the Hellinger divergence for vectors and is also known as the Bures divergence in quantum physics, where it is applied on density matrices (unit trace positive-definite matrices). It enjoyes only the rotation invariance. Endowed with the Wasserstein metric the manifold P has a Riemannian geometry of nonnegative curvature. See ( Bhatia et al., 2019a; Bhatia et al., 2019b)🎓.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"distance² to I distance²\ntextrmtr(P+I)-2textrmtr(P^12) textrmtr(P+Q) -2textrmtrbig(P^12QP^12big)^12","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"geodesic\nb^2P+a^2Q +abbig(PQ)^12 +(QP)^12big","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The quantity textrmtrbig(P^12QP^12big)^12 is known in quantum physics as the fidelity of P and Q when those are density matrices (unit-trace positive definite matrices).","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Fréchet mean: the solution is the unique positive definite matrix G satisfying (Agueh and Carlier, 2011) 🎓","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"G=sum_i=1^kw_ibig( G^12 P_i G^12big)^12.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"For estimating it, PosDefManifold implements the fixed-point algorithm of Álvarez-Esteban et al. (2016)🎓, giving iterations:","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"G G^-12 big(sum_i=1^k w_i(G^12P_i G^12)^12big)^2 G^-12","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"In the special case when the matrices all pair-wise commute, the Wasserstein mean is equal to the instance of power means and generalized means with p=frac12 (Bhatia, Jain and Lim, 2019b)🎓, that is, to the modified Bhattacharyya mean.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"In the special case k=2 and equal weight the mean is W=frac14big(P+Q+(PQ) ^12+(QP)^12big).","category":"page"},{"location":"introToRiemannianGeometry/#-1","page":"Intro to Riemannian Geometry","title":"🎓","text":"","category":"section"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"References","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"M. Agueh, G. Carlier (2011) Barycenters in the Wasserstein space, SIAM J. Mat. Anal. Appl. 43, 904-924.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"P. C. Álvarez-Esteban, E. del Barrio, J.A. Cuesta-Albertos, C. Matrána (2016) A fixed-point approach to barycenters in Wasserstein space, Journal of Mathematical Analysis and Applications, 441(2), 744-762.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"T. Ando, C.-K. Li, R. Mathias (2004) Geometric means, Linear Algebra and its Applications, 385(1), 305-334.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"V. Arsigny, P. Fillard, X. Pennec, N. Ayache (2007) Geometric means in a novel vector space structure on symmetric positive-definite matrices, SIAM journal on matrix analysis and applications, 29(1), 328-347.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"A. Barachant, S. Bonnet, M. Congedo, C. Jutten (2012) Multi-class Brain Computer Interface Classification by Riemannian Geometry, IEEE Transactions on Biomedical Engineering, 59(4), 920-928.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"A. Barachant, S. Bonnet, M. Congedo, C. Jutten (2013) Classification of covariance matrices using a Riemannian-based kernel for BCI applications, Neurocomputing, 112, 172-178.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"R. Bhatia (2007) Positive Definite Matrices. Princeton University press.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"R. Bhatia, M. Congedo (2019) Procrustes problems in manifolds of positive definite matrices Linear Algebra and its Applications, 563, 440-445.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"R. Bhatia, S. Gaubert, T. Jain (2019) Matrix versions of the Hellinger distance, arXiv:1901.01378.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"R. Bhatia, J. Holbrook (2006) Riemannian geometry and matrix geometric means, Linear Algebra and its Applications, 413 (2-3), 594-618.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"R. Bhatia, T. Jain (2010) Approximation problems in the Riemannian metric on positive definite matrices, Ann. Funct. Anal., 5(2), 118-126.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"R. Bhatia, T. Jain,Y. Lim (2019a) Inequalities for the Wasserstein mean of positive definite matrices, Linear Algebra and its Applications, in press.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"R. Bhatia, T. Jain, Y. Lim (2019b) On the Bures-Wasserstein distance between positive definite matrices Expositiones Mathematicae, in press.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Z. Chebbi, M. Moakher (2012) Means of Hermitian positive-definite matrices based on the log-determinant α-divergence function, Linear Algebra and its Applications, 436(7), 1872-1889.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"A. Cichocki, S. Cruces, S-I- Amari (2015) Log-Determinant Divergences Revisited: Alpha-Beta and Gamma Log-Det Divergences, Entropy, 17(5), 2988-3034.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"R.R. Coifman, Y. Shkolnisky, F.J. Sigworth, A. Singer (2008) Graph Laplacian Tomography From Unknown Random Projections, IEEE Transactions on Image Processing, 17(10), 1891-1899.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"M. Congedo, B. Afsari, A. Barachant, M Moakher (2015) Approximate Joint Diagonalization and Geometric Mean of Symmetric Positive Definite Matrices, PLoS ONE 10(4): e0121423.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"M. Congedo, A. Barachant, R. Bhatia R (2017a) Riemannian Geometry for EEG-based Brain-Computer Interfaces; a Primer and a Review, Brain-Computer Interfaces, 4(3), 155-174.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"M. Congedo, A. Barachant, E. Kharati Koopaei (2017b) Fixed Point Algorithms for Estimating Power Means of Positive Definite Matrices, IEEE Transactions on Signal Processing, 65(9), 2211-2220.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"X. Dai, S. Khamis, Y. Zhang, L.S. Davis (2016) Parameterizing region covariance: an efficient way to apply sparse codes on second order statistics, arXiv:1602.02822.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"M. Faraki, M. Harandi, F. Porikli (2015) More About VLAD: A Leap from Euclidean to Riemannian Manifolds, IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Boston.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"W. Förstner, B. Moonen (1999) A metric for covariance matrices, In Krumm K and Schwarze VS eds. Qho vadis geodesia...?, number 1999.6 in tech. report of the Dep. Of Geodesy and Geoinformatics, p.113–128, Stuttgart University.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"M.T. Harandi, R. Hartley, B. Lovell, C. Sanderson (2016) Sparse coding on symmetric positive definite manifolds using bregman divergences, IEEE transactions on neural networks and learning systems, 27 (6), 1294-1306.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"N.J. Higham (1988) Computing a Nearest Symmetric Positive Semidefinite Matrix Linear Algebra and its Applications, 103, 103-118.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"J. Ho, G. Cheng, H. Salehian, B.C. Vemuri (2013) Recursive Karcher Expectation Estimators and Geometric Law of Large Numbers, Proc. of the AISTATS Conf.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"B. Iannazzo (2016) The geometric mean of two matrices from a computational viewpoint Numerical Linear Algebra with Applications, 23-2, 208-229.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"S. Lafon (2004) Diffusion maps and geometric harmonics, Ph.D. dissertation, Yale University, New Heaven, CT.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Y. Lim, M. Pálfia (2012) Matrix power means and the Karcher mean, Journal of Functional Analysis, 262(4), 1498-1514.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Z. Lin (2019) Riemannian Geometry of Symmetric Positive Definite Matrices via Cholesky Decomposition, in press.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"E. Massart, J.M. Hendrickx, P.-A. Absil (2018) Matrix Geometric Meansbased on shuffled inductive sequences Linear Algebra and its Aplications, 252, 334-359.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"M. Moakher (2005) A Differential Geometric Approach to the Geometric Mean of Symmetric Positive-Definite Matrices, SIAM Journal on Matrix Analysis and Applications, 26(3), 735-747.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"M. Moakher (2012) Divergence measures and means of symmetric positive-definite matrices, in D.H Lailaw and A. Vilanova (Eds) \"New Developments in the Visualization and Processing of Tensor Fields\", Springer, Berlin.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"C. Mostajeran, C. Grussler, R. Sepulchre (2019) Geometric Matrix Midranges arXiv:1907.04188.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"X. Pennec, P. Fillard, N. Ayache (2006) A Riemannian Framework for Tensor Computing, International Journal of Computer Vision, 66(1), 41-66.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"P.L.C. Rodrigues, M. Congedo, C Jutten (2018) Multivariate Time-Series Analysis Via Manifold Learning, in Proc. of the the IEEE Statistical Signal Processing Workshop (SSP 2018), Fribourg-en-Brisgau, Germany.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"S. Sra (2016) Positive definite matrices and the S-divergence, Proc. Amer. Math. Soc., 144, 2787-2797.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"J. Taghia, M. Bånkestad, F. Lindsten, T.B. Schön (2019) Constructing the Matrix Multilayer Perceptron and its Application to the VAE, arXiv:1902.01182v1","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"S. Umeyama (1988) An Eigendecomposition Approach to Weighted Graph Matching Problems, IEEE Trans. Pattern. Anal. Mach. Intell., 10(5), 695-703.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"O. Yair, M. Ben-Chen, R. Talmon (2019) Parallel Transport on the Cone Manifold of SPD Matrices for Domain Adaptation IEEE Trans. Sig. Process. 67(7), 1797-1811.","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"T. Yamazaki (2019) The Ando-Hiai inequalities for the solution of the generalized Karcher Equation and related results arXiv:1802.06200v2.","category":"page"}] +[{"location":"signalProcessing/#signalProcessing.jl","page":"signalProcessing.jl","title":"signalProcessing.jl","text":"","category":"section"},{"location":"signalProcessing/","page":"signalProcessing.jl","title":"signalProcessing.jl","text":"This unit contains miscellaneous signal processing functions useful in relation to the Riemannian geometry of the manifold of Symmetric Positive Definite (SPD) or Hermitian Positive Definite (HPD) matrices. In Julia those are Hermitian matrices, see typecasting matrices.","category":"page"},{"location":"signalProcessing/","page":"signalProcessing.jl","title":"signalProcessing.jl","text":"Function Description\nrandChi², randχ² Generate a random variable distributed as a chi-squared\nrandEigvals, randλ Generate a random vectors of real positive eigenvalues\nrandEigvalsMat, randΛ Generate a random diagonal matrix of real positive eigenvalues\nrandUnitaryMat, randU Generate a random orthogonal or unitary matrix\nrandPosDefMat, randP Generate one or an array of random positive definite matrices\nregularize! Regularize an array of positive definite matrices\ngram Gram matrix of a matrix\ntrade trace and determinant of a matrix as a 2-tuple","category":"page"},{"location":"signalProcessing/","page":"signalProcessing.jl","title":"signalProcessing.jl","text":"⋅","category":"page"},{"location":"signalProcessing/","page":"signalProcessing.jl","title":"signalProcessing.jl","text":"randChi²\r\nrandEigvals\r\nrandEigvalsMat\r\nrandUnitaryMat\r\nrandPosDefMat\r\nregularize!\r\ngram\r\ntrade","category":"page"},{"location":"signalProcessing/#PosDefManifold.randChi²","page":"signalProcessing.jl","title":"PosDefManifold.randChi²","text":"randChi²(df::Int)\n\nalias: randχ²\n\nGenerate a random variable distributed as a chi-squared with df degrees of freedom.\n\nIt uses the Wilson–Hilferty transformation for df>=20 - see chi-squared distribution.\n\nExamples\n\nusing Plots, PosDefManifold\nchi=[randχ²(2) for i=1:10000]\nhistogram(chi) # needs Plots package. Check your plots back-end.\n\n\n\n\n\n","category":"function"},{"location":"signalProcessing/#PosDefManifold.randEigvals","page":"signalProcessing.jl","title":"PosDefManifold.randEigvals","text":" randEigvals(n::Int;\n <\n df::Int=2,\n eigvalsSNR::Real=10e3 >)\n\nalias: randλ\n\nGenerate an n-vector of random real positive eigenvalues. The eigenvalues are generated as in function randΛ(randEigvalsMat), the syntax of which is used.\n\nSee also: randU (randUnitaryMat), randP (randPosDefMat).\n\nExamples\n\nusing Plots, PosDefManifold\nλ=sort(randλ(10), rev=true)\nσ=sort(randλ(10, eigvalsSNR=10), rev=true)\nplot(λ) # needs Plots package. Check your plots back-end.\nplot!(σ) # needs Plots package. Check your plots back-end.\n\n\n\n\n\n","category":"function"},{"location":"signalProcessing/#PosDefManifold.randEigvalsMat","page":"signalProcessing.jl","title":"PosDefManifold.randEigvalsMat","text":" (1) randEigvalsMat(n::Int;\n <\n df::Int=2,\n eigvalsSNR::Real=10e3 >)\n\n (2) randEigvalsMat(n::Int, k::Int;\n < same keyword arguments as in (1) >)\n\nalias: randΛ\n\n(1) Generate an nn diagonal matrix of random real positive eigenvalues.\n\n(2) An array 1d (of 𝔻Vector type) of k matrices of the kind in (1)\n\nThe eigenvalues are generated according to model\n\nλ_i=χ_df^2+ηhspace6pttextrmforhspace2pti=1n\n\nwhere\n\nχ_df^2 (signal term) is randomly distributed as a chi-square with df degrees of freedom,\nη is a white noise term, function of eigvalsSNR, such that\n\ntextrmeigenvalues SNR=mathbbEbig(sum_i=1^nλ_ibig)bignη\n\nThe expected sum mathbbEbig(sum_i=1^nλ_ibig) here above is the expected variance of the signal term, i.e., n(df), since the expectation of a random chi-squared variable is equal to its degrees of freedom.\n\nIf eigvalsSNR=Inf is passed as argument, then η is set to zero, i.e., no white noise is added. In any case eigvalsSNR must be positive.\n\nNote that with the default value of df (df=2) the generating model assumes that the eigenvalues have exponentially decaying variance, which is often observed on real data.\n\nnote: Nota Bene\nThe eigvalsSNR expresses the expected eigenvalues SNR (signal-to-noise ratio), not the real one, and is not expressed in decibels, but as the expected SNR variance ratio.\n\nThis function is used by function randP (randPosDefMat) to generate random positive definite matrices with added white noise in order to emulate eigenvalues observed in real data and to improve the conditioning of the generated matrices with respect to inversion.\n\nSee also: randλ (randEigvals), randU (randUnitaryMat), randP (randPosDefMat), randχ² (randChi²).\n\nExamples\n\nusing PosDefManifold\n# (1)\nn=3;\nU=randU(n);\nΛ=randΛ(n, eigvalsSNR=100)\nP=U*Λ*U' # generate an SPD matrix\nusing LinearAlgebra\nQ=ℍ(U*Λ*U') # generate an SPD matrix and flag it as 'Hermitian'\n\n# (2) generate an array of 10 matrices of simulated eigenvalues\nDvec=randΛ(n, 10)\n\n\n\n\n\n","category":"function"},{"location":"signalProcessing/#PosDefManifold.randUnitaryMat","page":"signalProcessing.jl","title":"PosDefManifold.randUnitaryMat","text":"(1) randUnitaryMat(n::Int)\n(2) randUnitaryMat(::Type{Complex{T}}, n::Int)\n\naliases: randOrthMat, randU\n\nGenerate a random nn\n\n(1) orthogonal matrix (real)\n(2) unitary matrix (complex)\n\nThe matrices are generated running the modified (stabilized) Gram-Schmidt orthogonalization procedure (mgs) on an nn matrix filled with random Gaussian elements.\n\nSee also: randΛ (randEigvals), randP (randPosDefMat).\n\nExamples\n\nusing PosDefManifold\nn=3;\nX=randU(n)*sqrt(randΛ(n))*randU(n)' # (1) generate a random square real matrix\n\nU=randU(ComplexF64, n);\nV=randU(ComplexF64, n);\nY=U*sqrt(randΛ(n))*V' # (2) generate a random square complex matrix\n\n\n\n\n\n","category":"function"},{"location":"signalProcessing/#PosDefManifold.randPosDefMat","page":"signalProcessing.jl","title":"PosDefManifold.randPosDefMat","text":" (1) randPosDefMat(n::Int;\n <\n df::Int=2,\n eigvalsSNR::Real=10e3 >)\n\n (2) randPosDefMat(::Type{Complex{T}}, n:: Int;\n < same keyword arguments as in (1) >)\n\n (3) randPosDefMat(n::Int, k::Int;\n <\n df::Int=2,\n eigvalsSNR::Real=10e3,\n SNR::Real=100,\n commuting=false >)\n\n (4) randPosDefMat(::Type{Complex{T}}, n::Int, k::Int;\n < same keyword arguments as in (3) >)\n\nalias: randP\n\nGenerate\n\n(1) one random Hermitian positive definite matrix (real) of size nn\n(2) one random Hermitian positive definite matrix (complex) of size nn\n(3) an array 1d (of ℍVector type) of k matrices of the kind in (1)\n(4) an array 1d (of ℍVector type) of k matrices of the kind in (2).\n\nMethods (3) and (4) are multi-threaded. See Threads.\n\nFor (1) and (2) the matrix is generated according to model\n\nUΛU^H+ηI,\n\nwhere U is a random orthogonal (1) or unitary (2) matrix generated by function randU(randUnitaryMat) and Λ, η are a positive definite diagonal matrix and a non-negative scalar depending on df and eigvalsSNR randomly generated calling function randΛ(randEigvalsMat).\n\nFor (3) and (4), if the commuting=true is passed, the k matrices are generated according to model\n\nUΛ_iU^H+ηIhspace8pt, for i=1:k\n\notherwise they are generated according to model\n\n(UΛ_iU^H+ηI)+φ(V_iΔ_iV_i^H+ηI)hspace8pt, for i=1:k Eq.[1]\n\nwhere\n\nU and the V_i are random (3) orthogonal/(4) unitary matrices,\nΛ_i and Δ_i are positive definite diagonal matrices\nη is a non-negative scalar.\n\nAll variables here above are randomly generated as in (1) and (2)\n\nφ is adjusted so as to obtain a desired output SNR (signal-to-noise ratio), which is also an\n\n, such as\n\nSNR=fracdisplaystylesum_i=1^ktextrmtr(UΛ_iU^H+ηI)displaystylesum_i=1^ktextrmtrφ(V_iΔ_iV_i^H+ηI).\n\nnote: Nota Bene\nThe keyword arguments SNR is not expressed in decibels, but as the expected SNR variance ratio. It must be a positive number.\n\nA slightly different version of this model for generating positive definite matrices has been proposed in (Congedo et al., 2017b)[🎓]; in the model of Eq. [1]\n\nUΛ_iU^H is the signal term, where the signal is supposed sharing the same coordinates for all matrices,\nφ(V_iΔ_iV_i^H) is a structured noise term, which is different for all matrices\nηI is a white noise term, with same variance for all matrices.\n\nSee also: the aforementioned paper and randΛ (randEigvalsMat).\n\nExamples\n\nusing PosDefManifold\nR=randP(10, df=10, eigvalsSNR=1000) # 1 SDP Matrix of size 10x10 #(1)\nH=randP(ComplexF64, 5, eigvalsSNR=10) # 1 Hermitian Matrix of size 5x5 # (2)\nℛ=randP(10, 1000, eigvalsSNR=100) # 1000 SPD Matrices of size 10x10 # (3)\nusing Plots\nheatmap(Matrix(ℛ[1]), yflip=true, c=:bluesreds)\nℋ=randP(ComplexF64, 20, 1000) # 1000 Hermitian Matrices of size 20x20 # (4)\n\n\n\n\n\n","category":"function"},{"location":"signalProcessing/#PosDefManifold.regularize!","page":"signalProcessing.jl","title":"PosDefManifold.regularize!","text":"(1) regularize!(P::ℍ; )\n(2) regularize!(𝐏::ℍVector; )\n\nAdd white noise to either\n\n(1) a positive definite matrix P of size nn, or\n(2) a 1d array 𝐏 of k positive definite matrices of size nn, of ℍVector type.\n\nThe added noise improves the matrix conditioning with respect to inversion. This is used to avoid numerical errors when decomposing these matrices or when evaluating some functions of their eigevalues such as the log.\n\nA constant value is added to all diagonal elements of (1) P or (2) af all matrices in 𝐏, that is, on output:\n\ntextrm(1)hspace2ptPleftarrow P+ηI\n\ntextrm(2)hspace2pt𝐏_ileftarrow 𝐏_i+ηI hspace2pttextrmforhspace2pt i=1k\n\nThe amount of added noise η is determined by the SNR , which by default is 10000. This is such that\n\ntextrm(1)hspace2ptSNR=fracdisplaystyletextrmtr(P)displaystyletextrmtr(ηI)\n\ntextrm(2)hspace2ptSNR=fracdisplaystylesum_i=1^ktextrmtr(𝐏_i)displaystyle khspace1pttextrmtr(ηI)\n\nP in (1) must be flagged as Hermitian. See typecasting matrices.\n\nnote: Nota Bene\nThe keyword argument SNR expresses a SNR (signal-to-noise ratio), and is not expressed in decibels, but as the SNR variance ratio. It must be a positive number. Differently from function randΛrandEigvalsMat, randλrandEigvals and randPrandPosDefMat, the SNR here is not the expected SNR, but the actual SNR.\n\nSee also: randP (randPosDefMat).\n\nExamples\n\n# (1)\nusing LinearAlgebra, Plots, PosDefManifold\nn=3\nU=randU(n)\n# in Q we will write two matrices,\n# the unregularized and regularized matrix side by side\nQ=Matrix{Float64}(undef, n, n*2)\nP=ℍ(U*Diagonal(randn(n).^2)*U') # generate a real 3x3 positive matrix\nfor i=1:n, j=1:n Q[i, j]=P[i, j] end\nregularize!(P, SNR=5)\nfor i=1:n, j=1:n Q[i, j+n]=P[i, j] end # the regularized matrix is on the right\nheatmap(Matrix(Q), yflip=true, c=:bluesreds)\n\n# (2)\n𝐏=[ℍ(U*Diagonal(randn(3).^2)*U') for i=1:5] # 5 real 3x3 positive matrices\nregularize!(𝐏, SNR=1000)\n\n## Run a test\nusing LinearAlgebra\n𝐏=randP(10, 100, SNR=1000); # 100 real Hermitian matrices\nsignalVar=sum(tr(P) for P in 𝐏);\nregularize!(𝐏, SNR=1000);\nsignalPlusNoiseVar=sum(tr(P) for P in 𝐏);\noutput_snr=signalVar/(signalPlusNoiseVar-signalVar)\n# output_snr should be approx. equal to 1000\n\n\n\n\n\n","category":"function"},{"location":"signalProcessing/#PosDefManifold.gram","page":"signalProcessing.jl","title":"PosDefManifold.gram","text":"gram(X::Matrix{T}) where T<:RealOrComplex\n\nGiven a generic data matrix X, comprised of real or complex elements, return the normalized Gram matrix, that is, the covariance matrix of X corrected by sample size, but without subtracting the mean.\n\nThe result is flagged as Hermitian. See typecasting matrices.\n\nnote: Nota Bene\nIf X is wide or square (r<=c) return XX^Hc. If X is tall (r>c) return X^HXr.\n\nExamples\n\nusing PosDefManifold\nX=randn(5, 150);\nG=gram(X) # => G=X*X'/150\nX=randn(100, 2);\nF=gram(X); # => G=X'*X/100\n\n\n\n\n\n","category":"function"},{"location":"signalProcessing/#PosDefManifold.trade","page":"signalProcessing.jl","title":"PosDefManifold.trade","text":"trade(P::ℍ{T}) where T<:RealOrComplex\n\nGiven a positive definite matrix P, return as a 2-tuple the trace and the determinant of P. This is used to plot positive matrices in two dimensions (TraDe plots: log(trace/n) vs. log(determinant), see exemple here below).\n\nP must be flagged by julia as Hermitian. See typecasting matrices.\n\nExamples\n\nusing PosDefManifold\nP=randP(3)\nt, d=trade(P) # equivalent to (t, d)=trade(P)\n\n# TraDe plot\nusing Plots\nk=100\nn=10\n𝐏=randP(n, k, SNR=1000); # 100 real Hermitian matrices\nx=Vector{Float64}(undef, k)\ny=Vector{Float64}(undef, k)\nfor i=1:k\n x[i], y[i] = trade(𝐏[i])\nend\nx=log.(x./n)\ny=log.(y)\nplot(x, y, seriestype=:scatter)\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#linearAlgebra.jl","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"","category":"section"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"This unit contains linear algebra functions useful in relation to the Riemannian geometry of the manifold of Symmetric Positive Definite (SPD) or Hermitian Positive Definite (HPD) matrices. In Julia those are Hermitian matrices, see typecasting matrices.","category":"page"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"In general they take a matrix as input (some may take other arrays as input) and are divided in eight categories depending on what kind of functions thay are and what they give as output:","category":"page"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"Category Output\n1. Utilities - - -\n2. Matrix normalizations and approximations matrix\n3. Boolean functions of matrices matrix\n4. Scalar functions of matrices scalar\n5. Diagonal functions of matrices diagonal matrix\n6. Unitary functions of matrices orthogonal/unitary matrix\n7. Matrix function of matrices matrix\n8. Spectral decompositions of positive matrices spectral function of input\n9. Decompositions involving triangular matrices triangular matrix","category":"page"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"⋅","category":"page"},{"location":"linearAlgebra/#Utilities","page":"linearAlgebra.jl","title":"Utilities","text":"","category":"section"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"Function Description\ntypeofMatrix, typeofMat Return the type of the matrix argument\ntypeofVector, typeofVec Return the type of the matrix vector argument\ndim length of the dimensions of matrices and vectors of matrices\nremove Remove one or more elements from a vector or one or more\ncolumns or rows from a matrix \nisSquare Return true if matrix arguement is square, false otherwise","category":"page"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"⋅","category":"page"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"typeofMatrix\r\ntypeofVector\r\ndim\r\nremove\r\nisSquare","category":"page"},{"location":"linearAlgebra/#PosDefManifold.typeofMatrix","page":"linearAlgebra.jl","title":"PosDefManifold.typeofMatrix","text":"function typeofMatrix(\narray::Union{AnyMatrix, AnyMatrixVector, AnyMatrixVector₂})\n\nalias: typeofMat\n\nReturn the type of a matrix, either Hermitian, Diagonal, LowerTriangular, or Matrix. Argument array may be a matrix of one of these types, but also one of the following:\n\nℍVector, ℍVector₂, 𝔻Vector, 𝔻Vector₂, 𝕃Vector, 𝕃Vector₂, 𝕄Vector, 𝕄Vector₂.\n\nThose are Array of Matrices types. See also aliases for the symbols ℍ, 𝔻, 𝕃 and 𝕄.\n\nNote that this function is different from Julia function typeof, which returns the concrete type (see example below), thus cannot be used for typecasting matrices.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\nP=randP(3) # generate a 3x3 Hermitian matrix\ntypeofMatrix(P) # returns `Hermitian`\ntypeof(P) # returns `Hermitian{Float64,Array{Float64,2}}`\n# typecast P as a `Matrix` M\nM=Matrix(P)\n# typecast M as a matrix of the same type as P and write the result in A\nA=typeofMatrix(P)(M)\n\nPset=randP(3, 4) # generate a set of 4 3x3 Hermitian matrix\n# Pset is an ℍVector type\ntypeofMatrix(Pset) # again returns `Hermitian`\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.typeofVector","page":"linearAlgebra.jl","title":"PosDefManifold.typeofVector","text":"function typeofVector(\narray::Union{AnyMatrix, AnyMatrixVector, AnyMatrixVector₂})\n\nalias: typeofVec\n\nReturn the type of a Vector, either HermitianVector, DiagonalVector, LowerTriangularVector, or MatrixVector. The aliases of those are, respectvely, ℍVector, 𝔻Vector, 𝕃Vector and 𝕄Vector. Argument array may be a vector of one of these types, but also one of the following:\n\nℍ, 𝔻, 𝕃 and 𝕄, ℍVector₂, 𝔻Vector₂, 𝕃Vector₂, 𝕄Vector₂.\n\nSee aliases for the symbols ℍ, 𝔻, 𝕃 and 𝕄. The last four are Array of Matrices types.\n\nNote that this function is different from Julia function typeof only in that it returns the vector type also if array is not of the ℍVector, 𝔻Vector, 𝕃Vector or 𝕄Vector type.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\nP=randP(3, 4) # generate 4 3x3 Hermitian matrix\ntypeofMatrix(P) # returns `Array{Hermitian,1}`\ntypeof(P) # also returns `Array{Hermitian,1}`\n\ntypeofMatrix(P[1]) # returns `Array{Hermitian,1}`\ntypeof(P[1]) # returns `Hermitian{Float64,Array{Float64,2}}`\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.dim","page":"linearAlgebra.jl","title":"PosDefManifold.dim","text":"(1) function dim(X::AnyMatrix, [d])\n(2) function dim(vector::AnyMatrixVector, [d])\n(3) function dim(vector₂::AnyMatrixVector₂, [d])\n\n(1) X is a real or complex Matrix, Diagonal, LowerTriangular or Hermitian matrix. Return a 2-tuple containing the dimensions of X, which is two times the same dimension for all possible types of X with the exception of the Matrix type, which can be rectangular. Optionally you can specify a dimension (1 or 2) to get just the length of that dimension.\n\n(2) vector is an 𝕄Vector, 𝔻Vector, 𝕃Vector or ℍVector type (see AnyMatrixVector type). Return a 3-tuple containing the number of matrices it holds (dimension 1) and their dimensions (dimension 2 and 3). Optionally you can specify a dimension (1, 2, or 3) to get just the length of that dimension.\n\n(3) vector₂ is an 𝕄Vector₂, 𝔻Vector₂, 𝕃Vector₂ or ℍVector₂ type (see AnyMatrixVector type). Return a 4-tuple containing\n\nthe number of vectors of matrices it holds (dimension 1),\na vector holding the number of matrices in each vector of matrices (dimensions 2),\nthe two dimensions of the matrices (dimension 3 and 4).\n\nOptionally you can specify a dimension (1, 2, 3 or 4) to get just the length of that dimension.\n\nvector and vector₂ are Array of Matrices types. See also aliases for the symbols ℍ, 𝔻, 𝕃 and 𝕄.\n\nnote: Nota Bene\nIf you specify a dimension and this is out of the valid range, the function returns zero.Both the vector(2) and the vector₂(3) object are meant to hold matrices living in the same manifold, therefore it is assumed that all matrices they holds are of the same dimension. The dimensions of the matrices are retrived fromthe first matrix in vector(2),\nthe first matrix in the first vector of vector₂(3).\n\nThis function replaces Julia size function, which cannot be used to retrive dimension for matrix vectors. It is not possible to overload the size function for matrix vectors since this causes problems to other Julia functions.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\n# (1)\nM=randn(3, 4) # generate a 3x4 `Matrix`\ndim(M) # returns (3, 4)\ndim(M, 1) # returns 3\ndim(M, 2) # returns 4\ndim(M, 3) # out of range: returns 0\n\n# (2)\nPset=randP(3, 4) # generate an ℍVector holding 4 3x3 Hermitian matrices\ndim(Pset) # returns (4, 3, 3)\ndim(Pset, 1) # returns 4\ndim(Pset, 2) # returns 3\ndim(Pset, 3) # returns 3\n\n# (3)\n# Generate a set of 4 random 3x3 SPD matrices\nPset=randP(3, 4)\n# Generate a set of 40 random 4x4 SPD matrices\nQset=randP(3, 40)\nA=ℍVector₂([Pset, Qset])\ndim(A) # return (2, [4, 40], 3, 3)\ndim(A, 1) # return 2\ndim(A, 2) # return [4, 40]\ndim(A, 2)[1] # return 4\ndim(A, 3) # return 3\ndim(A, 4) # return 3\ndim(A, 5) # out of range: return 0\n\n# note: to create an ℍVector₂ object holding k ℍVector objects use\nsets=ℍVector₂(undef, k) # and then fill them\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.remove","page":"linearAlgebra.jl","title":"PosDefManifold.remove","text":"function remove(X::Union{Vector, Matrix}, what::Union{Int, Vector{Int}};\n\t\t\t\tdims=1)\n\nRemove one or more elements from a vector or one or more columns or rows from a matrix.\n\nIf X is a Matrix, dims=1 (default) remove rows, dims=2 remove columns.\n\nIf X is a Vector, dims has no effect.\n\nThe second argument is either an integer or a vector of integers.\n\nExamples\n\na=randn(5)\nb=remove(a, 2)\nb=remove(a, collect(1:3)) # remove rows 1 to 3\nA=randn(3, 3)\nB=remove(A, 2)\nB=remove(A, 2; dims=2)\nA=randn(5, 5)\nB=remove(A, collect(1:2:5)) # remove rows 1, 3 and 5\nC=remove(A, [1, 4])\nA=randn(10, 10)\nA=remove(A, [collect(2:3); collect(8:10)]; dims=2)\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.isSquare","page":"linearAlgebra.jl","title":"PosDefManifold.isSquare","text":"function isSquare(X::Matrix)=size(X, 1)==size(X, 2)\n\nReturn true if matrix X is square, false otherwise.\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#Matrix-normalizations-and-approximations","page":"linearAlgebra.jl","title":"Matrix normalizations and approximations","text":"","category":"section"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"Function Description\ndet1 Normalize the determinant\ntr1 Normalize the trace\nnearestPosDef Nearest Symmetric/Hermitian Positive Semi-definite matrix\nnearestOrthogonal nearestOrth Nearest Orthogonal matrix\nnormalizeCol! Normalize one or more columns","category":"page"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"⋅","category":"page"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"det1\r\ntr1\r\nnearestPosDef\r\nnearestOrthogonal\r\nnormalizeCol!","category":"page"},{"location":"linearAlgebra/#PosDefManifold.det1","page":"linearAlgebra.jl","title":"PosDefManifold.det1","text":"function det1(X::AnyMatrix; )\n\nReturn the argument matrix X normalized so as to have unit determinant. For square positive definite matrices this is the best approximant from the set of matrices in the special linear group - see Bhatia and Jain (2014)🎓.\n\nX can be a real or complex Diagonal, LowerTriangular, Matrix, or Hermitian matrix. (see AnyMatrix type)\n\nIf the determinant is not greater than tol (which defalts to zero) a warning is printed and X is returned.\n\nnote: Nota Bene\nThis function is meant for positive definite matrices. Julia may throws an error while computing the determinant if the matrix is defective.\n\nSee Julia det function.\n\nSee also: tr1.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\nP=randP(5) # generate a random real positive definite matrix 5x5\nQ=det1(P)\ndet(Q) # must be 1\n# using a tolerance\nQ=det1(P; tol=1e-12)\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.tr1","page":"linearAlgebra.jl","title":"PosDefManifold.tr1","text":"tr1(X::AnyMatrix; tol::Real=0.)\n\nReturn the argument matrix X normalized so as to have unit trace.\n\nX can be a real or complex Diagonal, LowerTriangular, Matrix or Hermitian matrix (see AnyMatrix type). Its trace must be real. If the absolute value of its imaginary part is greater than tol (which defalts to zero) a warning is printed and X is returned. Also, if the trace is not greater than tol a warning is printed and X is returned.\n\nSee: Julia trace function.\n\nSee also: tr, det1.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\n\nP=randP(5) # generate a random real positive definite matrix 5x5\nQ=tr1(P)\ntr(Q) # must be 1\n# using a tolerance\nQ=tr1(P; tol=1e-12)\n\nPc=randP(ComplexF64, 5) # generate a random real positive definite matrix 5x5\nQc=tr1(Pc)\ntr(Qc) # must be 1\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.nearestPosDef","page":"linearAlgebra.jl","title":"PosDefManifold.nearestPosDef","text":"nearestPosDef(X::Union{𝔻, 𝕄}; tol::Real=0.)\n\nReturn the nearest symmetric/Hermitian positive semi-definite matrix of a diagonal or of an arbitary square matrix X according to the Frobenius norm. If the eigenvalues of the symmetric part of X are all non-negative, the result is positive definite and will be flagged as Hermitian, otherwise it is positive semi-definite and will not be flagged. The nearest matrix is given by\n\n(Y+H)2\n\nwhere\n\nY=(X+X^H)2\n\nis the symmetric part of X, and H is the symmetric polar factor of Y. See Higham(1988)🎓 for details and for the way it is computed.\n\nSee also: det1, procrustes.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\nX=randn(5, 5) # generate an arbitrary 5x5 matrix\nS=nearestPosDef(X)\n\nP=randP(5) # generate a random real positive definite 5x5 matrix\nS=nearestPosDef(Matrix(P)) # typecasting an Hermitian matrix as a `Matrix`\n# Since P is a positive definite matrix S must be equal to P\nS ≈ P ? println(\" ⭐ \") : println(\" ⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.nearestOrthogonal","page":"linearAlgebra.jl","title":"PosDefManifold.nearestOrthogonal","text":"nearestOrthogonal(X::AnyMatrix)\n\nalias: nearestOrth\n\nReturn the nearest orthogonal matrix of a square Hermitian, LowerTriangular, Diagonal or generic Matrix X (see AnyMatrix type). This is given by\n\nUV^H,\n\nwhere\n\ntextrm(SVD)=UΛV^H.\n\nIf X is Diagonal, return X.\n\nSee also: nearestPosDef, procrustes.\n\nExamples\n\nusing PosDefManifold\nU=nearestOrth(randn(5, 5))\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.normalizeCol!","page":"linearAlgebra.jl","title":"PosDefManifold.normalizeCol!","text":"(1) normalizeCol!(X::𝕄{T}, j::Int)\n(2) normalizeCol!(X::𝕄{T}, j::Int, by::Number)\n(3) normalizeCol!(X::𝕄{T}, range::UnitRange)\n(4) normalizeCol!(X::𝕄{T}, range::UnitRange, by::Number)\nfor all above: where T<:RealOrComplex\n\nGiven a Matrix type X comprised of real or complex elements,\n\n(1) normalize the j^th column to unit norm\n(2) divide the elements of the j^th column by number by\n(3) normalize the columns in range to unit norm\n(4) divide the elements of columns in range by number by.\n\nby is a number of abstract supertype Number. It should be an integer, real or complex number. For efficiency, it should be of the same type as the elements of X.\n\nrange is a UnitRange type.\n\nMethods (1) and (3) call the BLAS.nrm2 routine for computing the norm of concerned columns. See Threads.\n\nnote: Nota Bene\nJulia does not allow normalizing the columns of Hermitian matrices. If you want to call this function for an Hermitian matrix see typecasting matrices.\n\nSee norm and also randn for the example below.\n\nSee also: colNorm, colProd.\n\nExamples\n\nusing PosDefManifold\nX=randn(10, 20)\nnormalizeCol!(X, 2) # (1) normalize columns 2\nnormalizeCol!(X, 2, 10.0) # (2) divide columns 2 by 10.0\nnormalizeCol!(X, 2:4) # (3) normalize columns 2 to 4\nX=randn(ComplexF64, 10, 20)\nnormalizeCol!(X, 3) # (1) normalize columns 3\nnormalizeCol!(X, 3:6, (2.0 + 0.5im)) # (4) divide columns 3 to 5 by (2.0 + 0.5im)\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#Boolean-functions-of-matrices","page":"linearAlgebra.jl","title":"Boolean functions of matrices","text":"","category":"section"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"Function Description\nispos Check whether a real vector or diagonal matrix are comprised of all positive elements","category":"page"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"ispos","category":"page"},{"location":"linearAlgebra/#PosDefManifold.ispos","page":"linearAlgebra.jl","title":"PosDefManifold.ispos","text":" (1) ispos(λ::Vector{T};\n\t<\n\ttol::Real=0,\n\trev=true,\n\t🔔=true,\n\tmsg=\"\">)\n\n (2) ispos(Λ::𝔻{T};\n\t< same optional keyword arguments as in (1) > )\n\n\tfor all above: where T<:Real\n\nReturn true if all numbers in (1) real vector λ or in (2) real Diagonal matrix Λ are not inferior to tol, otherwise return false. This is used, for example, in spectral functions to check that all eigenvalues are positive.\n\nnote: Nota Bene\ntol defaults to the square root of Base.eps of the type of λ (1) or Λ (2). This corresponds to requiring positivity beyond about half of the significant digits.\n\nThe following are :\n\nIf rev=true the (1) elements in λ or (2) the diagonal elements\n\nin Λ will be chacked in reverse order. This is done for allowing a very fast check when the elements are sorted and it is known from where is best to start checking.\n\nIf the result is false:\n\nif =true a bell character will be printed. In most systems this will ring a bell on the computer.\nif string msg is provided, a warning will print msg followed by:\n\n\"at position pos\", where pos is the position where the first non-positive element has been found.\n\nExamples\n\nusing PosDefManifold\na=[1, 0, 2, 8]\nispos(a, msg=\"non-positive element found\")\n\n# it will print:\n# ┌ Warning: non-positive element found at position 2\n# └ @ [here julie will point to the line of code issuing the warning]\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#Scalar-functions-of-matrices","page":"linearAlgebra.jl","title":"Scalar functions of matrices","text":"","category":"section"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"Function Description\ncolProd Sum of products of the elements in two columns\nsumOfSqr, ss Sum of squares of all elements or of specified columns\nsumOfSqrDiag, ssd Sum of squares of the diagonal elements\ncolNorm Eucliden norm of a column\nsumOfSqrTril, sst Sum of squares of the lower triangle elements up to a given underdiagonal\ntr Fast trace of the product of two Hermitian matrices\nquadraticForm, qf Fast quadratic form\nfidelity (Quantum) Fidelity of two positive matrices","category":"page"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"⋅","category":"page"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"colProd\r\nsumOfSqr\r\nsumOfSqrDiag\r\ncolNorm\r\nsumOfSqrTril\r\ntr\r\nquadraticForm\r\nfidelity","category":"page"},{"location":"linearAlgebra/#PosDefManifold.colProd","page":"linearAlgebra.jl","title":"PosDefManifold.colProd","text":"(1) colProd(X::Union{𝕄{T}, ℍ{T}}, j::Int, l::Int)\n(2) colProd(X::Union{𝕄{T}, ℍ{T}}, Y::Union{𝕄{T}, ℍ{T}}, j::Int, l::Int)\nfor all above: where T<:RealOrComplex\n\n(1) Given a real or complex Matrix or Hermitian matrix X, return the dot product of the j^th and l^th columns, defined as,\n\nsum_i=1^r big(x_ij^*x_ilbig)\n\nwhere r is the number of rows of X and ^* denotes complex conjugate (nothing if the matrix is real).\n\n(2) Given real or complex Matrix or Hermitian matrices X and Y, return the dot product of the j^th column of X and the l^th column of Y, defined as,\n\nsum_i=1^r big(x_ij^*y_ilbig)\n\nwhere r is the number of rows of X and of Y and ^* is as above.\n\nnote: Nota Bene\nX and of Y may have a different number of columns, but must have the same number of rows.\n\nArguments j and l must be positive integers in range\n\n(1) j,l in 1:size(X, 2),\n(2) j in 1:size(X, 2), l in 1:size(Y, 2).\n\nSee also: normalizeCol!, colNorm.\n\nExamples\n\nusing PosDefManifold\nX=randn(10, 20)\np=colProd(X, 1, 3)\nY=randn(10, 30)\nq=colProd(X, Y, 2, 25)\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.sumOfSqr","page":"linearAlgebra.jl","title":"PosDefManifold.sumOfSqr","text":"(1) sumOfSqr(A::Array)\n(2) sumOfSqr(H::ℍ{T})\n(3) sumOfSqr(L::𝕃{T})\n(4) sumOfSqr(D::𝔻{T})\n(5) sumOfSqr(X::Union{𝕄{T}, ℍ{T}}, j::Int)\n(6) sumOfSqr(X::Union{𝕄{T}, ℍ{T}}, range::UnitRange)\nfor (1)-(6) above: where T<:RealOrComplex\n\nalias: ss\n\nReturn\n\n(1) the sum of squares of the elements in an array A of any dimensions.\n(2) as in (1), but for an Hermitian matrix H, using only the lower triangular part.\n(3) as in (1), but for a LowerTriangular matrix L.\n(4) as in (1), but for a Diagonal matrix D (sum of squares of diagonal elements).\n(5) the sum of square of the j^th column of a Matrix or Hermitian X.\n(6) the sum of square of the columns of a Matrix or Hermitian X in a given range.\n\nAll methods support real and complex matrices.\n\nOnly method (1) works for arrays of any dimensions.\n\nMethods (1)-(4) return the square of the Frobenius norm.\n\nFor method (5), j is a positive integer in range 1:size(X, 1).\n\nFor method (6), range is a UnitRange type.\n\nSee also: colNorm, sumOfSqrDiag, sumOfSqrTril.\n\nExamples\n\nusing PosDefManifold\nX=randn(10, 20)\nsum2=sumOfSqr(X) # (1) sum of squares of all elements\nsum2=sumOfSqr(X, 1) # (2) sum of squares of elements in column 1\nsum2=sumOfSqr(X, 2:4) # (3) sum of squares of elements in column 2 to 4\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.sumOfSqrDiag","page":"linearAlgebra.jl","title":"PosDefManifold.sumOfSqrDiag","text":"sumOfSqrDiag(X::AnyMatrix)\n\nalias: ssd\n\nSum of squares of the diagonal elements in real or complex Matrix, Diagonal, Hermitian or LowerTriangular matrix X. If X is rectangular (which can be only if it is of the Matrix type), the main diagonal is considered.\n\nSee AnyMatrix type\n\nSee also: sumOfSqr, sumOfSqrTril.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\nX=randn(10, 20)\nsumDiag2=sumOfSqrDiag(X) # (1)\nsumDiag2=sumOfSqrDiag(𝔻(X)) # (2)\n# 𝔻=LinearAlgebra.Diagonal is declated in the main module\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.colNorm","page":"linearAlgebra.jl","title":"PosDefManifold.colNorm","text":"colNorm(X::Union{𝕄{T}, ℍ{T}}, j::Int) where T<:RealOrComplex\n\nGiven a real or complex Matrix or Hermitian matrix X, return the Euclidean norm of its j^th column.\n\nThis function calls the BLAS.nrm2 routine. See Threads.\n\nSee also: normalizeCol!, colProd, sumOfSqr.\n\nExamples\n\nusing PosDefManifold\nX=randn(10, 20)\nnormOfSecondColumn=colNorm(X, 2)\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.sumOfSqrTril","page":"linearAlgebra.jl","title":"PosDefManifold.sumOfSqrTril","text":"sumOfSqrTril(X::AnyMatrix, k::Int=0)\n\nalias: sst\n\nGiven a real or complex Matrix, Diagonal, Hermitian or LowerTriangular matrix X (see AnyMatrix type), return the sum of squares of the elements in its lower triangle up to the k^th underdiagonal.\n\nMatrix X may be rectangular.\n\nk must be in range\n\n1-size(X, 1):c-1 for X Matrix, Diagonal or Hermitian,\n1-size(X, 1):0 for X LowerTriangular.\n\nFor X Diagonal the result is\n\n0 if k0,\nthe sum of the squares of the diagonal elements otherwise.\n\nSee julia tril(M, k::Integer) function for numbering of diagonals.\n\nSee also: sumOfSqr, sumOfSqrDiag.\n\nExamples\n\nusing PosDefManifold\nA=[4. 3.; 2. 5.; 1. 2.]\n#3×2 Array{Float64,2}:\n# 4.0 3.0\n# 2.0 5.0\n# 1.0 2.0\n\ns=sumOfSqrTril(A, -1)\n# 9.0 = 1²+2²+2²\n\ns=sumOfSqrTril(A, 0)\n# 50.0 = 1²+2²+2²+4²+5²\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#LinearAlgebra.tr","page":"linearAlgebra.jl","title":"LinearAlgebra.tr","text":"(1) tr(P::ℍ{T}, Q::ℍ{T})\n(2) tr(P::ℍ{T}, M::𝕄{T})\n(3) tr(D::𝔻{T}, H::Union{ℍ{T}, 𝕄{T}})\n(4) tr(H::Union{ℍ{T}, 𝕄{T}}, D::𝔻{T})\nfor all above: where T<:RealOrComplex\n\nGiven (1) two Hermitian positive definite matrix P and Q, return the trace of the product PQ. This is real even if P and Q are complex.\n\nP must always be flagged as Hermitian. See typecasting matrices.\n\nIn (2) Q is a Matrix object, in which case return\n\na real trace if the product PQ is real or if it has all positive real eigenvalues.\na complex trace if the product PQ is not real and has complex eigenvalues.\n\nMethods (3) and (4) return the trace of the product DH or HD, where D is a Diagonal matrix and H an Hermitian or Matrix object. The result is of the same type as the input matrices.\n\nFor all methods all arguments must be of the same type.\n\nMath\n\nLet P and Q be Hermitian matrices, using the properties of the trace (e.g., the cyclic property and the similarity invariance) you can use this function to fast compute the trace of several expressions. For example:\n\ntextrmtr(PQ)=textrmtr(P^12QP^12)\n\nand\n\ntextrmtr(PQP)=textrmtr(P^2Q) (see example below).\n\nSee: trace.\n\nSee also: DiagOfProd, tr1.\n\nExamples\n\nusing PosDefManifold\nP=randP(ComplexF64, 5) # generate a random complex positive definite matrix 5x5\nQ=randP(ComplexF64, 5) # generate a random complex positive definite matrix 5x5\ntr(P, Q) ≈ tr(P*Q) ? println(\" ⭐ \") : println(\" ⛔ \")\ntr(P, Q) ≈ tr(sqrt(P)*Q*sqrt(P)) ? println(\" ⭐ \") : println(\" ⛔ \")\ntr(sqr(P), Q) ≈ tr(P*Q*P) ? println(\" ⭐ \") : println(\" ⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.quadraticForm","page":"linearAlgebra.jl","title":"PosDefManifold.quadraticForm","text":"(1) quadraticForm(v::Vector{T}, P::ℍ{T}) where T<:Real\n(2) quadraticForm(v::Vector{T}, L::𝕃{T}) where T<:Real\n(3) quadraticForm(v::Vector{T}, X::𝕄{T}, forceLower::Bool=false) where T<:Real\n(4) quadraticForm(v::Vector{S}, X::Union{𝕄{S}, ℍ{S}, 𝕃{S}}) where S<:Complex\n\nalias: qf\n\n(1) Given a real vector v and a real Hermitian matrix P, compute the quadratic form\n\nv^TPv,\n\nwhere the superscript T denotes transpose. It uses only the lower triangular part of P.\n\n(2) As in (1), given a real vector v and a LowerTriangular matrix L.\n\n(3) As in (1), given a real vector v and a real generic Matrix M, if forceLower=true. If forceLower=false, the product v^TMv is evaluated instead using the whole matrix M.\n\n(4) Quadratic form v^HPv, where superscript H denotes complex conjugate and transpose, for a complex vector v and a complex Matrix, LowerTrianglar or Hermitian matrix. The whole matrix is used.\n\nMath\n\nFor v and X real and X symmetric, the quadratic form is\n\nsum_i(v_i^2x_ii)+sum_ij(2v_iv_jx_ij).\n\nFor L lower triangular is\n\nsum_i(v_i^2x_ii)+sum_ij(v_iv_jx_ij).\n\nThese formula are used in methods (1), (2) and (3).\n\nExamples\n\nusing PosDefManifold\nP=randP(5) # generate a random real positive definite matrix 5x5\nv=randn(5)\nq1=quadraticForm(v, P) # or q1=qf(v, P)\nq2=v'*P*v\nq1 ≈ q2 ? println(\" ⭐ \") : println(\" ⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.fidelity","page":"linearAlgebra.jl","title":"PosDefManifold.fidelity","text":"fidelity(P::ℍ{T}, Q::ℍ{T}) where T<:RealOrComplex\n\nGiven two positive definte Hermitian matrices P and Q, return their fidelity:\n\ntrbig(P^12QP^12big)^12\n\nThis is used in quantum physics and is related to the Wasserstein metric. See for example Bhatia, Jain and Lim (2019b)🎓.\n\nExamples\n\nusing PosDefManifold\nP=randP(5);\nQ=randP(5);\nf=fidelity(P, Q)\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#Diagonal-functions-of-matrices","page":"linearAlgebra.jl","title":"Diagonal functions of matrices","text":"","category":"section"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"Function Description\nfDiag, 𝑓𝔻 Elemen-wise functions of matrix diagonals\nDiagOfProd, dop Diagonal of the product of two matrices","category":"page"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"⋅","category":"page"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"fDiag\r\nDiagOfProd","category":"page"},{"location":"linearAlgebra/#PosDefManifold.fDiag","page":"linearAlgebra.jl","title":"PosDefManifold.fDiag","text":"fDiag(func::Function, X::AnyMatrix, k::Int=0)\n\nalias: 𝑓𝔻\n\nApplies function func element-wise to the elements of the k^th diagonal of real or complex Diagonal, LowerTriangular, Matrix or Hermitian matrix X and return a diagonal matrix with these elements. X must be square in all cases, but for the 𝕄=Matrix type argument, in which case it may be of dimension r⋅c, with r ≠ c.\n\nSee julia tril(M, k::Integer) function for numbering of diagonals.\n\nBt default the main diagonal is considered.\n\nIf X is Diagonal, k is set automatically to zero (main diagonal).\nIf X is LowerTriangular, k cannot be positive.\n\nNote that if X is rectangular the dimension of the result depends on the size of X and on the chosen diagonal. For example,\n\nr ≠ c and k=0 (main diagonal), the result will be of dimension min(r,c)⋅min(r,c),\nX 3⋅4 and k=-1, the result will be 2⋅2,\nX 3⋅4 and k=1, the result will be 3⋅3, etc.\n\nnote: Nota Bene\nThe function func must support the func. syntax and therefore must be able to apply element-wise to the elements of the chosen diagonal (this includes anonymous functions). If the input matrix is complex, the function func must be able to support complex arguments.\n\nSee also: DiagOfProd, tr.\n\nExamples\n\nusing PosDefManifold\nP=randP(5) # use P=randP(ComplexF64, 5) for generating an Hermitian matrix\n\n# diagonal matrix with the inverse of the first sub-diagonal of P\nD=fDiag(inv, P, -1)\n\n(Λ, U) = evd(P) # Λ holds the eigenvalues of P, see evd\n\n# diagonal matrix with the log of the eigenvalues\nΔ=fDiag(log, Λ)\n\n# using an anonymous function for the square of the eigenvalues\nΔ=fDiag(x->x^2, Λ)\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.DiagOfProd","page":"linearAlgebra.jl","title":"PosDefManifold.DiagOfProd","text":"DiagOfProd(P::ℍ{T}, Q::ℍ{T}) where T<:RealOrComplex\n\nalias: dop\n\nReturn the Diagonal matrix holding the diagonal of the product PQ of two Hermitian matrices P and Q. Only the diagoanl part of the product is computed.\n\nSee also: tr, fDiag.\n\nExamples\n\nusing PosDefManifold, LinearAlgebra\nP, Q=randP(5), randP(5)\nDiagOfProd(P, Q)≈Diagonal(P*Q) ? println(\"⭐ \") : println(\"⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#Unitary-functions-of-matrices","page":"linearAlgebra.jl","title":"Unitary functions of matrices","text":"","category":"section"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"Function Description\nmgs Modified Gram-Schmidt orthogonalization","category":"page"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"⋅","category":"page"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"mgs","category":"page"},{"location":"linearAlgebra/#PosDefManifold.mgs","page":"linearAlgebra.jl","title":"PosDefManifold.mgs","text":"mgs(X::𝕄{T}, numCol::Int=0) where T<:RealOrComplex\n\nModified (stabilized) Gram-Schmidt orthogonalization of the columns of square or tall matrix X, which can be comprised of real or complex elements. The orthogonalized X is returned by the function. X is not changed.\n\nAll columns are orthogonalized by default. If instead argument numCol is provided, then only the first numCol columns of X are orthogonalized. In this case only the firt numCol columns will be returned.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\nX=randn(10, 10);\nU=mgs(X) # result is 10⋅10\nU=mgs(X, 3) # result is 10⋅3\nU'*U ≈ I ? println(\" ⭐ \") : println(\" ⛔ \")\n# julia undertands also:\nU'U ≈ I ? println(\" ⭐ \") : println(\" ⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#Matrix-function-of-matrices","page":"linearAlgebra.jl","title":"Matrix function of matrices","text":"","category":"section"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"Function Description\nfVec General function for multi-threaded computation of means and sums of matrix vectors\ncongruence, cong Compute congruent transformations","category":"page"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"⋅","category":"page"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"fVec\r\ncongruence","category":"page"},{"location":"linearAlgebra/#PosDefManifold.fVec","page":"linearAlgebra.jl","title":"PosDefManifold.fVec","text":"\t(1) fVec(f::Function, 𝐏::AnyMatrixVector;\n\t<\n\tw::Vector=[],\n\t✓w=false,\n\tallocs=[])\n\t>\n\n\t(2) fVec(f::Function, g::Function, 𝐏::AnyMatrixVector;\n\t< same optional keyword arguments in (1) >)\n\nGiven a 1d array 𝐏=P_1P_k of k matrices of the 𝕄Vector type, 𝔻Vector type, 𝕃Vector type or ℍVector type and an optional non-negative real weights vector w=w_1w_k, return expression\n\n(1)hspace6ptf_i=1^k(w_iP_i),\n\nor\n\n(2)hspace6ptf_i=1^k(w_ig(P_i)),\n\nwhere f is either the mean or the sum standard julia functions and g is whatever matrix function applying to each matrix P_k, such as exp, log,sqrt`, etc, and anonymous functions.\n\nThis function is multi-threaded. It works by partitioning the k operations required by the f function in several groups, passing each group to a separate thread and combining the result of the intermediate operations. This function allows a gain in computational time only when the number of matrices (1) and/or their size (2) is high. Use mean and sum otherwise. The maximal gain is obtained when the number of matrices in 𝐏 is an exact multiple of the number of threads Julia is instructed to use. For this latter, see Threads.\n\n!!! note \"Nota Bene\"\n\n Contrarily to Julia `mean` and `sum` function (v 1.1.0) the `fVec` function\n returns a matrix of the same type of the matrices in ``𝐏``.\n\n allocs allows to pass pre-allocated memory for holding the intermediate result of each thread. Argument allocs must be a vector of as many matrices as threads and where the matrices have the same dimension as the the matrices in 𝐏 (see the example here below). Using this option is worthwhile only if the size of the matrices is very high and/or when fVec is to be called repeatedly on many vector of matrices, where the matrices have always the same size, so that one allocation works for all calls.\n\nIf ✓w=true is passed, the weights are normalized so as to sum up to 1, otherwise they are used as they are passed. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time. By default ✓w is false.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\nPset=randP(4, 1000); # generate 1000 positive definite 4x4 matrices\nmean(Pset) # arithmetic mean calling Julia function\nThreads.nthreads() # check how many threads are available\nfVec(mean, Pset) # multi-threaded arithmetic mean\n\ninv(mean(inv, Pset)) # Harmonic mean calling Julia function\ninv(fVec(mean, inv, Pset)) # multi-threaded Harmonic mean\n\nexp(mean(log, Pset)) # log Euclidean mean calling Julia function\nexp(fVec(mean, log, Pset)) # multi-threaded log Euclidean mean\n\n# notice that Julia `exp` function has changed the type of the result\n# to `Symmetric`. To obtain an `Hermitian` output use\nℍ(exp(fVec(mean, log, Pset)))\n\nw=(randn(1000)).^2\nw=w./sum(w) \t\t# generate normalized random weights\n\n# weighted arithmetic mean calling Julia function\nsum(Pset[i]*w[i] for i=1:length(w))\n# multi-threaded weighted arithmetic mean\nfVec(sum, Pset, w=w)\n\n# weighted harmonic mean calling Julia function\ninv(sum(inv(Pset[i])*w[i] for i=1:length(w)))\n# multi-threaded weighted harmonic mean\ninv(fVec(sum, inv, Pset, w=w))\n\n# pre-allocating memory\nPset=randP(100, 1000); # generate 1000 positive definite 100x100 matrices\nQset=MatrixVector(repeat([similar(Pset[1])], Threads.nthreads()))\nfVec(mean, log, Pset, allocs=Qset)\n\n# How much computing time we save ?\n# (example min time obtained with 4 threads & 4 BLAS threads)\nusing BenchmarkTools\n# standard Julia function\n@benchmark(mean(log, Pset)) \t\t\t\t\t# (5.271 s)\n# fVec\n@benchmark(fVec(mean, log, Pset))\t\t\t\t# (1.540 s)\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.congruence","page":"linearAlgebra.jl","title":"PosDefManifold.congruence","text":"(1) congruence(B::AnyMatrix, P::AnyMatrix, matrixType)\n(2) congruence(B::AnyMatrix, 𝐏::AnyMatrixVector, matrixVectorType)\n(3) congruence(B::AnyMatrix, 𝑷::AnyMatrixVector₂, matrixVector₂Type)\n(4) congruence(𝐁::AnyMatrixVector, 𝑷::AnyMatrixVector₂, matrixVector₂Type)\n\nalias: cong\n\n(1) Return the congruent transformation\n\nBPB^H,\n\nfor B and P any combination of Hermitian, LowerTriangular, Diagonal or general Matrix type.\n\nThe result is of the matrixType argument, which must be provided and must be one of these four abstract type (not an instance of them). See aliases for shortening these type using symbols ℍ, 𝔻, 𝕃 and 𝕄.\n\n(2) Return a vector of matrices holding the congruent transformations\n\nBP_kB^H,\n\nfor all k matrices in 𝐏=P_1P_k, for B and 𝐏 any combination of matrix type Hermitian, LowerTriangular, Diagonal or Matrix (B) and vector of matrices type ℍVector, 𝔻Vector, 𝕃Vector and 𝕄Vector (𝐏). See Array of Matrices types.\n\nThe result is a vector of matrices of the matrixVectorType argument, which must be provided and must be one of the following abstract types: ℍVector, 𝔻Vector, 𝕃Vector or 𝕄Vector (and not an instance of these types).\n\n(3) Return a vector of vector of matrices holding the congruent transformations\n\nBP_mkB^H,\n\nfor all m vectors of km vectors of matrices in 𝑷, for B and 𝑷 any combination of matrix type Hermitian, LowerTriangular, Diagonal or Matrix (B) and vector of matrices type ℍVector₂, 𝔻Vector₂, 𝕃Vector₂ and 𝕄Vector₂ (𝑷). See Array of Matrices types.\n\nThe result is a vector of vector of matrices of the matrixVector₂Type argument, which must be provided and must be one of the following abstract types: ℍVector₂, 𝔻Vector₂, 𝕃Vector₂ or 𝕄Vector₂ (and not an instance of these types).\n\n(4) Return a vector of vector of matrices holding the congruent transformations\n\nB_iP_ijB_j^H, for ij1m.\n\nfor 𝐁 holding m matrices and 𝑷 holding m vectors holding m matrices each. Note that, differently from method (3), here the vectors of 𝑷 are all of the same length and this is eaxctly the length of 𝐁. 𝐁 and 𝑷 may be any combination of matrix vector type ℍVector, 𝔻Vector, 𝕃Vector and 𝕄Vector (𝐁) and vector of matrices type ℍVector₂, 𝔻Vector₂, 𝕃Vector₂ and 𝕄Vector₂ (𝑷). See Array of Matrices types.\n\nNote that this function computes the following algebraic expression:\n\nbeginpmatrix B_1 hspace001cm 0 hspace001cm ddots hspace001cm 0 hspace001cm B_m endpmatrix beginpmatrix C_11 cdots C_1m vdots ddots vdots C_m1 cdots C_mm endpmatrix beginpmatrixB_1^T hspace001cm 0 hspace001cm ddots hspace001cm 0 hspace001cm B_m^Tendpmatrix .\n\nThe result is a vector of vector of matrices of the matrixVector₂Type argument, which must be provided and must be one of the following abstract types: ℍVector₂, 𝔻Vector₂, 𝕃Vector₂ or 𝕄Vector₂ (and not an instance of these types).\n\nWhen you pass it to this function, make sure to typecast 𝐁 as an ℍVector, 𝔻Vector, 𝕃Vector or 𝕄Vector type if it is not already created as one of these types. See the example here below and typecasting matrices.\n\nMethod (2), (3) and (4) are multi-threaded. See Threads.\n\nnote: Nota Bene\nTypes ℍ, 𝔻, 𝕃 or 𝕄 are actually constructors, thus they may modify the result of the congruence(s). This greatly expand the possibilities of this function, but it is your responsibility to pick the right argument matrixType in (1), matrixVectorType in (2) and matrixVector₂Type in (3)-(4). For example, in (1) if B and P are Hermitian, calling cong(B, P, 𝔻) will actually return the diagonal part of B*P*B and calling cong(B, P, 𝕃) will actually return its lower triangular part. The full congruence can be obtained as an Hermitian matrix by cong(B, P, ℍ) and as a generic matrix object by cong(B, P, 𝕄).\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\n\n# (1)\nP=randP(3) # generate a 3x3 positive matrix\nM=randn(3, 3)\nC=cong(M, P, ℍ) # equivalent to C=ℍ(M*P*M')\n\n# (2)\nPset=randP(4, 100); # generate 100 positive definite 4x4 matrices\nM=randn(4, 4)\nQset=cong(M, Pset, ℍVector) # = [M*Pset_1*M',...,M*Pset_k*M'] as an ℍVector type\n\n# recenter the matrices in Pset to their Fisher mean:\nQset=cong(invsqrt(mean(Fisher, Pset)), Pset, ℍVector)\n\n# as a check, the Fisher mean of Qset is now the identity\nmean(Fisher, Qset)≈I ? println(\"⭐\") : println(\"⛔\")\n\n# (3)\nPset1=randP(4, 10); # generate 10 positive definite 4x4 matrices\nPset2=randP(4, 8);\nPset=ℍVector₂([Pset1, Pset2]);\nM=randn(4, 4)\nQset=cong(M, Pset, MatrixVector₂)\nQset[1][1]≈M*Pset[1][1]*M' ? println(\"⭐\") : println(\"⛔\")\nQset[1][5]≈M*Pset[1][5]*M' ? println(\"⭐\") : println(\"⛔\")\nQset[2][1]≈M*Pset[2][1]*M' ? println(\"⭐\") : println(\"⛔\")\nQset[2][4]≈M*Pset[2][4]*M' ? println(\"⭐\") : println(\"⛔\")\n\n# (4)\nPset1=randP(4, 2); # generate 2 positive definite 4x4 matrices\nPset2=randP(4, 2);\nPset=ℍVector₂([Pset1, Pset2]);\nU=𝕄Vector([randU(4), randU(4)])\nQset=cong(U, Pset, MatrixVector₂)\nQset[1][1]≈U[1]*Pset[1][1]*U[1]' ? println(\"⭐\") : println(\"⛔\")\nQset[1][2]≈U[1]*Pset[1][2]*U[2]' ? println(\"⭐\") : println(\"⛔\")\nQset[2][1]≈U[2]*Pset[2][1]*U[1]' ? println(\"⭐\") : println(\"⛔\")\nQset[2][2]≈U[2]*Pset[2][2]*U[2]' ? println(\"⭐\") : println(\"⛔\")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#Spectral-decompositions-of-positive-matrices","page":"linearAlgebra.jl","title":"Spectral decompositions of positive matrices","text":"","category":"section"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"Function Description\nevd Eigenvalue-Eigenvector decomposition of a matrix in UΛU=P form\nfrf Full-rank factorization of an Hermitian matrix\ninvfrf Inverse of the full-rank factorization of an Hermitian matrix (whitening)\nspectralFunctions Mother function for creating spectral functions of eigenvalues\npow Power of a positive matrix for any number of exponents in one pass\ninvsqrt Principal square root inverse (whitening) of a positive matrix\nsqr Square of a positive matrix\npowerIterations, powIter Power method for estimating any number of eigenvectors and associated eigenvalues","category":"page"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"⋅","category":"page"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"evd\r\nfrf\r\ninvfrf\r\nspectralFunctions\r\npow\r\ninvsqrt\r\nsqr\r\npowerIterations","category":"page"},{"location":"linearAlgebra/#PosDefManifold.evd","page":"linearAlgebra.jl","title":"PosDefManifold.evd","text":"evd(S::Union{𝕄{T}, ℍ{T}}) where T<:RealOrComplex\n\nGiven a positive semi-definite matrix S, returns a 2-tuple (Λ U), where U is the matrix holding in columns the eigenvectors and Λ is the matrix holding the eigenvalues on the diagonal. This is the output of Julia eigen function in UΛU=S form.\n\nAs for the eigen function, the eigenvalues and associated eigenvectors are sorted by increasing values of eigenvalues.\n\nS may be real or complex and may be flagged by Julia as Hermitian (in this case PosDefManifold assumes it is positive definite).\n\nSee typecasting matrices.\n\nSee also: spectralFunctions.\n\nExamples\n\nusing PosDefManifold\nA=randn(3, 3);\nS=A+A';\nΛ, U=evd(S); # which is equivalent to (Λ, U)=evd(P)\n(U*Λ*U') ≈ S ? println(\" ⭐ \") : println(\" ⛔ \")\n# => UΛU'=S, UΛ=SU, ΛU'=U'S\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.frf","page":"linearAlgebra.jl","title":"PosDefManifold.frf","text":"frf(P::ℍ{T}) where T<:RealOrComplex\n\nFull-rank factorization of Hermitian matrix P. It is given by\n\nF=UD^12,\n\nwhere\n\ntextrmEVD(P)=UDU^H\n\nis the eigenvalue-eigenvector decomposition of P. It verifies\n\nFF^H=P,\n\nthus F^-1 is a whitening matrix.\n\nSee also: invfrf.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\nP=randP(3)\nF = frf(P)\nF*F'≈P ? println(\" ⭐ \") : println(\" ⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.invfrf","page":"linearAlgebra.jl","title":"PosDefManifold.invfrf","text":"invfrf(P::ℍ{T}) where T<:RealOrComplex\n\nInverse of the full-rank factorization of Hermitian matrix P. It is given by\n\nF=D^-12U^H,\n\nwhere\n\ntextrmEVD(P)=UDU^H\n\nis the eigenvalue-eigenvector decomposition of P. It verifies\n\nFPF^H=I,\n\nthus F is a whitening matrix.\n\nSee also: frf.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\nP=randP(3)\nF = invfrf(P)\nF*P*F'≈I ? println(\" ⭐ \") : println(\" ⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.spectralFunctions","page":"linearAlgebra.jl","title":"PosDefManifold.spectralFunctions","text":"(1) spectralFunctions(P::ℍ{T}, func) where T<:RealOrComplex\n(2) spectralFunctions(D::𝔻{S}, func) where S<:Real\n\n(1) This is the mother function for all spectral functions of eigenvalues implemented in this library, which are:\n\npow (power),\nisqrt (inverse square root).\n\nThe function sqr (square) does not use it, as it can be obtained more efficiently by simple multiplication.\n\nYou can use this function if you need another spectral function of eigenvalues besides those and those already implemented in the standard package LinearAlgebra. In general, you won't call it directly.\n\nfunc is the function that will be applied on the eigenvalues.\n\nP must be flagged as Hermitian. See typecasting matrices. It must be a positive definite or positive semi-definite matrix, depending on func.\n\nA special method is provided for real Diagonal matrices (2).\n\nnote: Nota Bene\nThe function func must support the func. syntax and therefore must be able to apply element-wise to the eigenvalues (those include anonymous functions).\n\nMaths\n\nThe definition of spectral functions for a positive definite matrix P is at it follows:\n\nfbig(Pbig)=Ufbig(Λbig)U^H\n\nwhere U is the matrix holding in columns the eigenvectors of P, Λ is the matrix holding on diagonal its eigenvalues and f is a function applying element-wise to the eigenvalues.\n\nSee also: evd.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\nn=5\nP=randP(n) # P=randP(ComplexF64, 5) to generate an Hermitian complex matrix\nnoise=0.1;\nQ=spectralFunctions(P, x->x+noise) # add white noise to the eigenvalues\ntr(Q)-tr(P) ≈ noise*n ? println(\" ⭐ \") : println(\" ⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.pow","page":"linearAlgebra.jl","title":"PosDefManifold.pow","text":"(1) pow(P::ℍ{T}, args...) where T<:RealOrComplex\n(2) pow(D::𝔻{S}, args...) where S<:Real\n\n(1) Given a positive semi-definite Hermitian matrix P, return the power P^r_1 P^r_2 for any number of exponents r_1 r_2. It returns a tuple comprising as many elements as arguments passed after P.\n\nP must be flagged as Hermitian. See typecasting matrices.\n\narg1 arg2 are real numbers.\n\nA special method is provided for real Diagonal matrices (2).\n\nSee also: invsqrt.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\nP=randP(5); # use P=randP(ComplexF64, 5) for generating an Hermitian matrix\nQ=pow(P, 0.5); # => QQ=P\nQ, W=pow(P, 0.5, -0.5);\nW*P*W ≈ I ? println(\" ⭐ \") : println(\" ⛔ \")\nQ*Q ≈ P ? println(\" ⭐ \") : println(\" ⛔ \")\nR, S=pow(P, 0.3, 0.7);\nR*S ≈ P ? println(\" ⭐ \") : println(\" ⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.invsqrt","page":"linearAlgebra.jl","title":"PosDefManifold.invsqrt","text":"(1) invsqrt(P{T}::ℍ) where T<:RealOrComplex\n(2) invsqrt(D{S}::𝔻) where S<:Real\n\nGiven a positive definite Hermitian matrix P, compute the inverse of the principal square root P^-12.\n\nP must be flagged as Hermitian. See typecasting matrices.\n\nA special method is provided for real Diagonal matrices (2).\n\nMaths\n\nThe principal square root of a positive definite matrix P is the only symmetric (if P is real) or Hermitian (if P is complex) square root. Its inverse P^-12 is also named the whitening or sphering matrix sinceP^-12PP^-12=I.\n\nSee: typecasting matrices.\n\nSee also: pow.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\nP=randP(ComplexF64, 5);\nQ=invsqrt(P);\nQ*P*Q ≈ I ? println(\" ⭐ \") : println(\" ⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.sqr","page":"linearAlgebra.jl","title":"PosDefManifold.sqr","text":"(1) sqr(P::ℍ{T}) where T<:RealOrComplex\n(2) sqr(X::Union{𝕄{T}, 𝕃{T}, 𝔻{S}}) where T<:RealOrComplex where S<:Real\n\n(1) Given a positive semi-definite Hermitian matrix P, compute its square P^2.\n\nP must be flagged as Hermitian. See typecasting matrices.\n\nA method is provided also for generic matrices of the Matrix type, LowerTriangular matrices and real Diagonal matrices (2). The output is of the same type as the input.\n\nSee also: pow.\n\nExamples\n\nusing PosDefManifold\nP=randP(5);\nP²=sqr(P); # => P²=PP\nsqrt(P²)≈ P ? println(\" ⭐ \") : println(\" ⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.powerIterations","page":"linearAlgebra.jl","title":"PosDefManifold.powerIterations","text":"powerIterations(H::Union{ℍ{T}, 𝕄{T}}, q::Int;\n<\nevalues=false,\ntol::Real=0,\nmaxiter::Int=300,\nverbose=false>) where T<:RealOrComplex\n\npowerIterations(L::𝕃{S}, q::Int;\n< same optional keyword arguments in (1)>) where S<:Real\n\nalias: powIter\n\n(1) Compute the q eigenvectors associated to the q largest (real) eigenvalues of real or complex Hermitian or Matrix H using the power iterations + Gram-Schmidt orthogonalization as suggested by Strang. The eigenvectors are returned with the same type as the elements of H.\n\nH must have real eigenvalues, that is, it must be a symmetric matrix if it is real or an Hermitian matrix if it is complex.\n\n(2) as in (1), but using only the LowerTriangular view L of a matrix. This option is available only for real matrices (see below).\n\nThe following are :\n\n`tol is the tolerance for the convergence of the power method (see below),\n`maxiter is the maximum number of iterations allowed for the power method,\nif `verbose=true, the convergence of all iterations will be printed,\nif evalues=true, return the 4-tuple(Λ, U, iterations, covergence)`,\nif evalues=false return the 3-tuple(U, iterations, covergence)`.\n\nnote: Nota Bene\nDifferently from the evd function, the eigenvectors and eigenvalues are sorted by decreasing order of eigenvalues.If H is Hermitian and real, only its lower triangular part is used for computing the power iterations, like in (2). In this case the BLAS.symm routine is used. Otherwise the BLAS.gemm routine is used. See Threads.tol defaults to 100 times the square root of Base.eps of the type of H. This corresponds to requiring the relative convergence criterion over two successive iterations to vanish for about half the significant digits minus 2.\n\nSee also: mgs.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\n# Generate an Hermitian (complex) matrix\nH=randP(ComplexF64, 10);\n# 3 eigenvectors and eigenvalues\nU, iterations, convergence=powIter(H, 3, verbose=true)\n# all eigenvectors\nΛ, U, iterations, convergence=powIter(H, size(H, 2), evalues=true, verbose=true);\nU'*U ≈ I && U*Λ*U'≈H ? println(\" ⭐ \") : println(\" ⛔ \")\n\n# passing a `Matrix` object\nΛ, U, iterations, convergence=powIter(Matrix(H), 3, evalues=true)\n\n# passing a `LowerTriangular` object (must be a real matrix in this case)\nL=𝕃(randP(10))\nΛ, U, iterations, convergence=powIter(L, 3, evalues=true)\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#Decompositions-involving-triangular-matrices","page":"linearAlgebra.jl","title":"Decompositions involving triangular matrices","text":"","category":"section"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"Function Description\nchoL Lower triangular factor of Cholesky decomposition\nchoInv Lower triangular factor of Cholesky decomposition and its inverse in one pass\nchoInv! as choInv, but destroying the input matrix","category":"page"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"⋅","category":"page"},{"location":"linearAlgebra/","page":"linearAlgebra.jl","title":"linearAlgebra.jl","text":"choL\r\nchoInv\r\nchoInv!","category":"page"},{"location":"linearAlgebra/#PosDefManifold.choL","page":"linearAlgebra.jl","title":"PosDefManifold.choL","text":"(1) choL(P::ℍ{T}) where T<:RealOrComplex\n(2) choL(D::𝔻{S}) where S<:Real\n\n(1) Given a real or complex positive definite Hermitian matrix P, return the Cholesky lower triangular factor L such that LL^H=P. To obtain L^H or both L and L^H, use instead julia function cholesky.\n\nOn output, L is of type LowerTriangular.\n\n(2) For a real Diagonal matrix D, return D^12.\n\nSee also: choInv.\n\nExamples\n\nusing PosDefManifold\nP=randP(5);\nL=choL(P);\nL*L'≈ P ? println(\" ⭐ \") : println(\" ⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.choInv","page":"linearAlgebra.jl","title":"PosDefManifold.choInv","text":"choInv(P::AbstractArray{T};\n\tkind::Symbol = :LLt, tol::Real = √eps(T)) where T<:RealOrComplex\n\nFor a real or complex positive definite matrix P, let P=LL^H be its Cholesky decomposition and P=L_1DL_1^H the related LDLt decomposition. In the above, L is a lower triangular matrix, D a positive-definite diagonal matrix and L_1 a unit lower triangular matrix. Return:\n\nif kindis :LLt (default), the 2-tuple L, L^-H\nif kindis :LDLt, the 3-tuple L_1, D, L_1^-H.\n\nThose are obtained in one pass and for small matrices this is faster then calling Julia's chelosky function and inverting the lower factor unless you set\n\n BLAS.set_num_threads(1).\n\nInput matrix P may be of type Matrix or Hermitian. Since only the lower triangle is used, P may also be a LowerTriangular view of a positive definite matrix. If P is real, it can also be of the Symmetric type.\n\nThe algorithm is a multiplicative Gaussian elimination. If run completely, in input matrix P there will be the Identity at the end.\n\nNotes: Output L^-H is an inverse square root (whitening matrix) of P, since L^-1PL^-H=I. It therefore yields the inversion of P as P^-1=L^-HL^-1. It is the fastest whitening matrix to be computed, however it yields poor numerical precision, especially for large matrices.\n\nThe following relations holds:\n\nL=PL^-H\nL^H=L^-1P\nL^-H=P^-1L\nL^-1=L^HP^-1.\n\nWe also have\n\nL^HL=L^-1P^2L^-H=UPU^H, with U orthogonal (see below) and\nL^-1L^-H=L^HP^-2L=UP^-1U^H.\n\nLL^H and L^HL are unitarily similar, that is,\n\nULL^HU^H=L^HL,\n\nwhere U=L^-1P^12, with P^12=H the principal (unique symmetric) square root of P. This is seen writing PP^-1=HHL^-HL^-1; multiplying both sides on the left by L^-1 and on the right by L we obtain\n\nL^-1PP^-1L=L^-1HHL^-H=I=(L^-1H)(L^-1H)^H\n\nand since L^-1H is square it must be unitary.\n\nFrom these expressions we have\n\nH=LU=U^HL^H\nL=HU^H\nH^-1=U^HL^-1\nL^-1=UH^-1.\n\nU is the polar factor of L^H, i.e., L^H=UH, since LL^H=HU^HUH^H=H^2=P.\n\nFrom L^HL=UCU^H we have L^HLU=UC=ULL^H and from U=L^-1H we have L=HU^H.\n\nSee also: choInv!, choL.\n\nExamples\n\nusing PosDefManifold\nn, t = 800, 6000\netol = 1e-9\nZ=randn(t, n)\nY=Z'*Z\nYi=inv(Y)\n\nA, B=choInv!(copy(Y))\nnorm(A*A'-Y)/√n < etol ? println(\" ⭐ \") : println(\" ⛔ \")\nnorm(B*B'-Yi)/√n < etol ? println(\" ⭐ \") : println(\" ⛔ \")\n\nA, D, B=choInv!(copy(Y); kind=:LDLt)\nnorm(Y-A*D*A')/√n < etol ? println(\" ⭐ \") : println(\" ⛔ \")\nnorm(Yi-B*inv(D)*B')/√n < etol ? println(\" ⭐ \") : println(\" ⛔ \")\n\n# repeat the test for complex matrices\nZ=randn(ComplexF64, t, n)\nY=Z'*Z\nYi=inv(Y)\n\nA, B=choInv!(copy(Y))\nnorm(A*A'-Y)/√n < etol ? println(\" ⭐ \") : println(\" ⛔ \")\nnorm(B*B'-Yi)/√n < etol ? println(\" ⭐ \") : println(\" ⛔ \")\n\nA, D, B=choInv!(copy(Y); kind=:LDLt)\nnorm(Y-A*D*A')/√n < etol ? println(\" ⭐ \") : println(\" ⛔ \")\nnorm(Yi-B*inv(D)*B')/√n < etol ? println(\" ⭐ \") : println(\" ⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"linearAlgebra/#PosDefManifold.choInv!","page":"linearAlgebra.jl","title":"PosDefManifold.choInv!","text":"choInv!(P::AbstractArray{T};\n\tkind::Symbol = :LLt, tol::Real = √eps(T)) where T<:RealOrComplex\n\nThe same thing as choInv, but destroys the input matrix. This function does nt require copying the input matrix, thus it is slightly faster.\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#riemannianGeometry.jl","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"","category":"section"},{"location":"riemannianGeometry/","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"This is the fundamental unit of PosDefManifold. It contains functions for manipulating points in the Riemannian manifold of Symmetric Positive Definite (SPD) or Hermitian Positive Definite (HPD) matrices. In Julia those are Hermitian matrices, see typecasting matrices.","category":"page"},{"location":"riemannianGeometry/","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"The functions are divided in six categories:","category":"page"},{"location":"riemannianGeometry/","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"Category Output\n1. Geodesic equations interpolation, extrapolation, weighted mean of two matrices, ...\n2. Distances length of geodesics\n3. Graphs and Laplacians inter-distance matrices, spectral embedding, eigenmaps, ...\n4. Means mid-points of geodesics, Fréchet means of several points, midrange,...\n5. Tangent Space operations maps from the manifold to the tangent space and viceversa, parallel transport,...\n6. Procrustes problems data matching, transfer learning (domain adaptation), ...","category":"page"},{"location":"riemannianGeometry/","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"⋅","category":"page"},{"location":"riemannianGeometry/#Geodesic-equations","page":"riemannianGeometry.jl","title":"Geodesic equations","text":"","category":"section"},{"location":"riemannianGeometry/","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"Function Description\ngeodesic Geodesic equations (weighted mean of two positive definite matrices) for any metric","category":"page"},{"location":"riemannianGeometry/","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"⋅","category":"page"},{"location":"riemannianGeometry/","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"geodesic","category":"page"},{"location":"riemannianGeometry/#PosDefManifold.geodesic","page":"riemannianGeometry.jl","title":"PosDefManifold.geodesic","text":"(1) geodesic(metric::Metric, P::ℍ{T}, Q::ℍ{T}, a::Real) where T<:RealOrComplex\n(2) geodesic(metric::Metric, D::𝔻{S}, E::𝔻{S}, a::Real) where S<:Real\n\n(1) Move along the geodesic from point P to point Q (two positive definite matrices) with arclegth 0=a=1, using the specified metric, of type Metric::Enumerated type.\n\nFor all metrics,\n\nwith a=0 we stay at P,\nwith a=1 we move up to Q,\nwith a=12 we move to the mid-point of P and Q (mean).\n\nUsing the Fisher metric, argument a can be any real number, for instance:\n\nwith 0a1 we move toward Q (attraction),\nwith a1 we move over and beyond Q (extrapolation),\nwith a0 we move back away from Q (repulsion).\n\nP and Q must be flagged by julia as Hermitian. See typecasting matrices.\n\nThe Fisher geodesic move is computed by the Cholesky-Schur algorithm given in Eq. 4.2 by Iannazzo(2016)🎓. If Q=I, the Fisher geodesic move is simply P^a (no need to call this funtion).\n\nnote: Nota Bene\nFor the logdet zero and Jeffrey metric no closed form expression for the geodesic is available to the best of authors' knowledge, so in this case the geodesic is found as the weighted mean using the mean function. For the Von Neumann not even an expression for the mean is available, so in this case the geodesic is not provided and a warning is printed.\n\n(2) Like in (1), but for two real positive definite diagonal matrices D and E.\n\nMaths\n\nFor points P, Q and arclength a, letting b=1-a, the geodesic equations for the supported metrics are:\n\nMetric geodesic equation\nEuclidean bP + aQ\ninvEuclidean big(bP^-1 + aQ^-1big)^-1\nChoEuclidean TT^*, where T=bL_P + aL_Q\nlogEuclidean textexpbig(bhspace2pttextlog(P) + ahspace2pttextlog(Q)big)\nlogCholesky TT^*, where T=S_P+a(S_Q-S_P)+D_Phspace2pttextexpbig(a(textlogD_Q-textlogD_P)big)\nFisher P^12 big(P^-12 Q P^-12big)^a P^12\nlogdet0 uses weighted mean algorithm logdet0Mean\nJeffrey uses weighted mean mean\nVonNeumann N.A.\nWasserstein b^2P+a^2Q +abbig(PQ)^12 +(QP)^12big\n\nlegend: L_X, S_X and D_X are the Cholesky lower triangle of X, its strictly lower triangular part and diagonal part, respectively (hence, S_X+D_X=L_X, L_XL_X^*=X).\n\nSee also: mean.\n\nExamples\n\nusing PosDefManifold\nP=randP(10)\nQ=randP(10)\n# Wasserstein mean\nM=geodesic(Wasserstein, P, Q, 0.5)\n# extrapolate suing the Fisher metric\nE=geodesic(Fisher, P, Q, 2)\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#Distances","page":"riemannianGeometry.jl","title":"Distances","text":"","category":"section"},{"location":"riemannianGeometry/","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"Function Description\ndistanceSqr, distance² Squared distance between positive definite matrices\ndistance Distance between positive definite matrices","category":"page"},{"location":"riemannianGeometry/","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"⋅","category":"page"},{"location":"riemannianGeometry/","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"distanceSqr\r\ndistance","category":"page"},{"location":"riemannianGeometry/#PosDefManifold.distanceSqr","page":"riemannianGeometry.jl","title":"PosDefManifold.distanceSqr","text":"(1) distanceSqr(metric::Metric, P::ℍ{T}) where T<:RealOrComplex\n(2) distanceSqr(metric::Metric, P::ℍ{T}, Q::ℍ{T}) where T<:RealOrComplex\n(3) distanceSqr(metric::Metric, D::𝔻{S}) where S<:Real\n(4) distanceSqr(metric::Metric, D::𝔻{S}, E::𝔻{S}) where S<:Real\n\nalias: distance²\n\n(1) Return δ^2(P I), the square of the distance (or divergence) of positive definite matrix P from the the identity matrix. See distance from the origin.\n\n(2) Return δ^2(P Q), the square of the distance (or divergence) between two positive definite matrices P and Q. See distance.\n\nIn both cases the distance function δ is induced by the argument metric of type Metric::Enumerated type.\n\nP in (1) and P, Q in (2) must be flagged by julia as Hermitian. See typecasting matrices.\n\n(3) and (4) are specialized methods of (1) and (2), respectively, for real positive definite Diagonal matrices. See ℍVector type and 𝔻Vector type.\n\nMaths\n\nFor point P the squared distances from the identity for the supported metrics are:\n\nMetric Squared Distance from the identity\nEuclidean P-I^2\ninvEuclidean P^-1-I^2\nChoEuclidean L_P-I^2\nlogEuclidean textrmlogP^2\nlogCholesky S_P^2+textrmlogD_P^2\nFisher textrmlogP^2\nlogdet0 textrmlogdetfrac12(P+I) - frac12textrmlogdet(P)\nJeffrey frac12textrmtr(P+P^-1)-n\nVonNeumann frac12textrmtr(PtextrmlogP-textrmlogP)\nWasserstein textrmtr(P+I) -2textrmtr(P^12)\n\nFor points P and Q their squared distances for the supported metrics are:\n\nMetric Squared Distance\nEuclidean P-Q^2\ninvEuclidean P^-1-Q^-1^2\nChoEuclidean L_P - L_Q ^2\nlogEuclidean textrmlogP-textrmlogQ^2\nlogCholesky S_P-S_Q^2+textrmlogD_P-textrmlogD_Q^2\nFisher textrmlog(P^-12QP^-12)^2\nlogdet0 textrmlogdetfrac12(P+Q) - frac12textrmlogdet(PQ)\nJeffrey frac12textrmtr(Q^-1P+P^-1Q)-n\nVonNeumann frac12textrmtr(PtextrmlogP-PtextrmlogQ+QtextrmlogQ-QtextrmlogP)\nWasserstein textrmtr(P+Q) -2textrmtr(P^12QP^12)^12\n\nlegend: L_X, S_X and D_X are the Cholesky lower triangle of X, its strictly lower triangular part and diagonal part, respectively (hence, S_X+D_X=L_X, L_XL_X^*=X).\n\nSee also: distanceSqrMat.\n\nExamples (1)\n\nusing PosDefManifold\nP=randP(10)\nd=distanceSqr(Wasserstein, P)\ne=distanceSqr(Fisher, P)\nmetric=Metric(Int(logdet0)) # or metric=logdet0\ns=string(metric) # check what is the current metric\nf=distance²(metric, P) #using the alias distance²\n\nExamples (2)\n\nusing PosDefManifold\nP=randP(10)\nQ=randP(10)\nd=distanceSqr(logEuclidean, P, Q)\ne=distance²(Jeffrey, P, Q)\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.distance","page":"riemannianGeometry.jl","title":"PosDefManifold.distance","text":"(1) distance(metric::Metric, P::ℍ{T}) where T<:RealOrComplex\n(2) distance(metric::Metric, P::ℍ{T}, Q::ℍ{T}) where T<:RealOrComplex\n(3) distance(metric::Metric, D::𝔻{S}) where S<:Real\n(4) distance(metric::Metric, D::𝔻{S}, E::𝔻{S}) where S<:Real\n\n(1) Return δ(P I), the distance between positive definite matrix P and the identity matrix.\n\n(2) Return δ(P Q), the distance between positive definite matrices P and Q.\n\n(3) and (4) are specialized methods of (1) and (2), respectively, for real positive definite Diagonal matrices.\n\nThis is the square root of distanceSqr and is invoked with the same syntax therein.\n\nSee also: distanceMat.\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#Graphs-and-Laplacians","page":"riemannianGeometry.jl","title":"Graphs and Laplacians","text":"","category":"section"},{"location":"riemannianGeometry/","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"Function Description\ndistanceSqrMat, distance²Mat Lower triangular matrix of all squared inter-distances\ndistanceMat Lower triangular matrix of all inter-distances\nlaplacian Laplacian of a squared inter-distances matrix\nlaplacianEigenMaps, laplacianEM Eigen maps (eigenvectors) of a Laplacian\nspectralEmbedding, spEmb Spectral Embedding (the above functions run in series)","category":"page"},{"location":"riemannianGeometry/","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"⋅","category":"page"},{"location":"riemannianGeometry/","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"distanceSqrMat\r\ndistanceMat\r\nlaplacian\r\nlaplacianEigenMaps\r\nspectralEmbedding","category":"page"},{"location":"riemannianGeometry/#PosDefManifold.distanceSqrMat","page":"riemannianGeometry.jl","title":"PosDefManifold.distanceSqrMat","text":" (1) distanceSqrMat(metric::Metric, 𝐏::ℍVector;\n <⏩=true>)\n\n (2) distanceSqrMat(type::Type{T}, metric::Metric, 𝐏::ℍVector;\n <⏩=true>) where T<:AbstractFloat\n\nalias: distance²Mat\n\nGiven a 1d array 𝐏 of k positive definite matrices P_1P_k of ℍVector type, create the kk real LowerTriangular matrix comprising elements δ^2(P_i P_j)textrm for all i=j.\n\nThis is the lower triangular matrix holding all squared inter-distances (zero on diagonal), using the specified metric, of type Metric::Enumerated type, giving rise to distance function δ. See distanceSqr.\n\nOnly the lower triangular part is computed in order to optimize memory use.\n\nBy default, the result matrix is of type Float32. The type can be changed to another real type using method (2).\n\n:\n\nif ⏩=true (default) the computation of inter-distances is multi-threaded.\n\nnote: Nota Bene\nMulti-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.\n\nSee: distance.\n\nSee also: laplacian, laplacianEigenMaps, spectralEmbedding.\n\nExamples\n\nusing PosDefManifold\n# Generate a set of 8 random 10x10 SPD matrices\nPset=randP(10, 8) # or, using unicode: 𝐏=randP(10, 8)\n# Compute the squared inter-distance matrix according to the log Euclidean metric.\n# This is much faster as compared to the Fisher metric and in general\n# it is a good approximation.\nΔ²=distanceSqrMat(logEuclidean, Pset)\n\n# return a matrix of type Float64\nΔ²64=distanceSqrMat(Float64, logEuclidean, Pset)\n\n# Get the full matrix of inter-distances\nfullΔ²=Hermitian(Δ², :L)\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.distanceMat","page":"riemannianGeometry.jl","title":"PosDefManifold.distanceMat","text":" (1) distanceMat(metric::Metric, 𝐏::ℍVector;\n <⏩=true>)\n\n (2) distanceMat(type::Type{T}, metric::Metric, 𝐏::ℍVector;\n <⏩=true>) where T<:AbstractFloat\n\nGiven a 1d array 𝐏 of k positive definite matrices P_1P_k of ℍVector type, create the kk real LowerTriangular matrix comprising elements δ(P_i P_j)textrm for all i=j.\n\nThis is the lower triangular matrix holding all inter-distances (zero on diagonal), using the specified metric, of type Metric::Enumerated type, giving rise to distance δ. See distance.\n\nOnly the lower triangular part is computed in order to optimize memory use.\n\nBy default, the result matrix is of type Float32. The type can be changed to another real type using method (2).\n\nThe elements of this matrix are the square root of distanceSqrMat.\n\n:\n\nif ⏩=true the computation of inter-distances is multi-threaded.\n\nnote: Nota Bene\nMulti-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.\n\nSee: distance.\n\nExamples\n\nusing PosDefManifold\n# Generate a set of 4 random 10x10 SPD matrices\nPset=randP(10, 4) # or, using unicode: 𝐏=randP(10, 4)\nΔ=distanceMat(Fisher, Pset)\n\n# return a matrix of type Float64\nΔ64=distanceMat(Float64, Fisher, Pset)\n\n# Get the full matrix of inter-distances\nfullΔ=Hermitian(Δ, :L)\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.laplacian","page":"riemannianGeometry.jl","title":"PosDefManifold.laplacian","text":"laplacian(Δ²::𝕃{S}, epsilon::Real=0;\n ) where S<:Real\n\nGiven a LowerTriangular matrix of squared inter-distances Δ^2, return the lower triangular part of the so-called normalized Laplacian or density-invariant normalized Laplacian, which in both cases is a symmetric Laplacian. The elements of the Laplacian are of the same type as the elements of Δ^2. The result is a LowerTriangular matrix.\n\nThe definition of Laplacian given by Lafon (2004)🎓 is implemented:\n\nFirst, a Gaussian radial basis functions, known as Gaussian kernel or heat kernel, is applied to all elements of Δ^2, such as\n\nW_ij = expbigg(fracdisplaystyle-Δ^2_ijdisplaystyle2εbigg),\n\nwhere ε is the bandwidth of the kernel.\n\nIf densityInvariant=true is used, then the density-invariant transformation is applied\n\nW leftarrow E^-1WE^-1\n\nwhere E is the diagonal matrix holding on the main diagonal the sum of the rows (or columns) of W.\n\nFinally, the normalized Laplacian (density-invariant or not) is defined as\n\nΩ = D^-12WD^-12,\n\nwhere D is the diagonal matrix holding on the main diagonal the sum of the rows (or columns) of W.\n\nIf you do not provide argument epsilon, the bandwidth ε is set to the median of the elements of squared distance matrix Δ^2_ij. Another educated guess is the dimension of the original data, that is, the data that has been used to compute the squared distance matrix. For positive definite matrices this is n(n-1)2, where n is the dimension of the matrices. Still another is the dimension of the ensuing spectralEmbedding space. Keep in mind that by tuning the epsilon parameter (which must be positive) you can control both the rate of compression of the embedding space and the spread of points in the embedding space. See Coifman et al. (2008)🎓 for a discussion on ε.\n\nnote: Nota Bene\nThe Laplacian as here defined can be requested for any input matrix of squared inter-distances, for example, those obtained on scalars or on vectors using appropriate metrics. In any case, only the lower triangular part of the Laplacian is taken as input. See typecasting matrices.\n\nSee also: distanceSqrMat, laplacianEigenMaps, spectralEmbedding.\n\nExamples\n\nusing PosDefManifold\n# Generate a set of 4 random 10x10 SPD matrices\nPset=randP(10, 4) # or, using unicode: 𝐏=randP(10, 4)\nΔ²=distanceSqrMat(Fisher, Pset)\nΩ=laplacian(Δ²)\n\n# density-invariant Laplacian\nΩ=laplacian(Δ²; densityInvariant=true)\n\n# increase the bandwidth\nr=size(Δ², 1)\nmyεFactor=0.1\nmed=Statistics.median([Δ²[i, j] for j=1:r-1 for i=j+1:r])\nε=2*myεFactor*med\nΩ=laplacian(Δ², ε; densityInvariant=true)\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.laplacianEigenMaps","page":"riemannianGeometry.jl","title":"PosDefManifold.laplacianEigenMaps","text":" laplacianEigenMaps(Ω::𝕃{S}, q::Int;\n <\n tol::Real=0.,\n maxiter::Int=300,\n verbose=false >) where S<:Real\n\nalias: laplacianEM\n\nGiven the lower triangular part of a Laplacian Ω (see laplacian ) return the eigen maps in q dimensions, i.e., the q eigenvectors of the Laplacian associated with the largest q eigenvalues, excluding the first (which is always equal to 1.0). The eigenvectors are of the same type as Ω. They are all divided element-wise by the first eigenvector (see Lafon, 2004🎓).\n\nThe eigenvectors of the Laplacian are computed by the power iterations+modified Gram-Schmidt method (see powerIterations), allowing the execution of this function for Laplacian matrices of very large size.\n\nReturn the 4-tuple (Λ U iterations convergence), where:\n\nΛ is a qq diagonal matrix holding on diagonal the eigenvalues corresponding to the q dimensions of the Laplacian eigen maps,\nU holds in columns the q eigenvectors holding the q coordinates of the points in the embedding space,\niterations is the number of iterations executed by the power method,\nconvergence is the convergence attained by the power method.\n\nUsing the notion of Laplacian, spectral embedding seek a low-dimension representation of the data emphasizing local neighbothood information while neglecting long-distance information. The embedding is non-linear, however the embedding space is Euclidean. The eigenvectors of U holds the coordinates of the points in the embedding space (typically two- or three-dimensional for plotting or more for clustering). Spectral embedding is done for plotting data in low-dimension, clustering, imaging, classification, following their trajectories over time or other dimensions, and much more. For examples of applications see Ridrigues et al. (2018) 🎓 and references therein.\n\nArguments:\n\nΩ is a real LowerTriangular normalized Laplacian obtained by the laplacian function,\nq is the dimension of the Laplacian eigen maps;\nThe following are for the power iterations:\ntol is the tolerance for convergence (see below),\nmaxiter is the maximum number of iterations allowed,\nif verbose is true, the convergence at all iterations will be printed.\n\nnote: Nota Bene\nThe maximum value of q that can be requested is n-1, where n is the size of the Laplacian. In general, q=2 or q=3 is requested.tol defaults to the square root of Base.eps of the (real) type of Ω. This corresponds to requiring equality for the convergence criterion over two successive power iterations of about half of the significant digits.\n\nSee also: distanceSqrMat, laplacian, spectralEmbedding.\n\nExamples\n\nusing PosDefManifold\n# Generate a set of 4 random 10x10 SPD matrices\nPset=randP(10, 4)\nΔ²=distanceSqrMat(Fisher, Pset)\nΩ=laplacian(Δ²)\nevalues, maps, iterations, convergence=laplacianEM(Ω, 2)\nevalues, maps, iterations, convergence=laplacianEM(Ω, 2; verbose=true)\nevalues, maps, iterations, convergence=laplacianEM(Ω, 2; verbose=true, maxiter=500)\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.spectralEmbedding","page":"riemannianGeometry.jl","title":"PosDefManifold.spectralEmbedding","text":" (1) spectralEmbedding(metric::Metric, 𝐏::ℍVector, q::Int, epsilon::Real=0;\n <\n tol::Real=0.,\n maxiter::Int=300,\n densityInvariant=false,\n verbose=false,\n ⏩=true >)\n\n (2) spectralEmbedding(type::Type{T}, metric::Metric, 𝐏::ℍVector, q::Int, epsilon::Real=0;\n < same optional keyword arguments as in (1) >) where T<:Real\n\nalias: spEmb\n\nGiven a 1d array 𝐏 of k positive definite matrices P_1P_k (real or complex), compute its eigen maps in q dimensions.\n\nThis function runs one after the other the functions:\n\ndistanceSqrMat (compute the squared inter-distance matrix),\nlaplacian (compute the normalized Laplacian),\nlaplacianEigenMaps (get the eigen maps).\n\nBy default all computations above are done with Float32 precision. Another real type can be requested using method (2), where the type argument is defined.\n\nReturn the 4-tuple (Λ, U, iterations, convergence), where:\n\nΛ is a qq diagonal matrix holding on diagonal the eigenvalues corresponding to the q dimensions of the Laplacian eigen maps,\nU holds in columns the q eigenvectors holding the q coordinates of the points in the embedding space,\niterations is the number of iterations executed by the power method,\nconvergence is the convergence attained by the power method.\n\nArguments:\n\nmetric is the metric of type Metric::Enumerated type used for computing the inter-distances,\n𝐏 is a 1d array of k positive matrices of ℍVector type,\nq is the dimension of the Laplacian eigen maps,\nepsilon is the bandwidth of the Laplacian (see laplacian);\nThe following applyies for computing the inter-distances:\nif ⏩=true (default) the computation of inter-distances is multi-threaded.\nThe following applyies to the computation of the Laplacian by the laplacian function:\nif densityInvariant=true the density-invariant Laplacian is computed (see laplacian).\nThe following are for the power method iterative algorithm invoked by laplacianEigenMaps:\ntol is the tolerance for convergence of the power method (see below),\nmaxiter is the maximum number of iterations allowed for the power method,\nif verbose=true the convergence at all iterations will be printed;\n\nnote: Nota Bene\ntol defaults to the square root of Base.eps of the Float32 type (1) or of the type passed as argumant (2). This corresponds to requiring equality for the convergence criterion over two successive power iterations of about half of the significant digits.Multi-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.\n\nSee also: distanceSqrMat, laplacian, laplacianEigenMaps.\n\nExamples\n\nusing PosDefManifold\n# Generate a set of k random 10x10 SPD matrices\nk=10\nPset=randP(10, k)\nevalues, maps, iter, conv=spectralEmbedding(Fisher, Pset, 2)\n\n# show convergence information\nevalues, maps, iter, conv=spectralEmbedding(Fisher, Pset, 2; verbose=true)\n\n# use Float64 precision.\nevalues, maps, iter, conv=spectralEmbedding(Float64, Fisher, Pset, 2)\n\nusing Plots\n# check eigevalues and eigenvectors\nplot(diag(evalues))\nplot(maps[:, 1])\nplot!(maps[:, 2])\nplot!(maps[:, 3])\n\n# plot the data in the embedded space\nplot(maps[:, 1], maps[:, 2], seriestype=:scatter, title=\"Spectral Embedding\", label=\"Pset\")\n\n# try a different value of epsilon\nevalues, maps, iter, conv=spEmb(Fisher, Pset, k-1, 0.01; maxiter=1000)\nplot(maps[:, 1], maps[:, 2], seriestype=:scatter, title=\"Spectral Embedding\", label=\"Pset\")\n# see the example in `Laplacian` function for more on this\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#Means","page":"riemannianGeometry.jl","title":"Means","text":"","category":"section"},{"location":"riemannianGeometry/","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"Function Description\nmean Weighted Fréchet mean (wFm) of a scalar or matrix set using any metric\nmeans As above for several sets at once\ngeneralizedMean Generalized wFm of a matrix set\ngeometricMean, gMean wFm of a matrix set minimizing the dispersion according to the Fisher metric (iterative)\ngeometricpMean, gpMean robust wFm of a matrix set minimizing the p-dispersion according to the Fisher metric (iterative)\nlogdet0Mean, ld0Mean wFm of a matrix set according to the logdet0 metric (iterative)\nwasMean wFm of a matrix set according to the Wasserstein metric (iterative)\npowerMean Power wFm of a matrix set (iterative)\ninductiveMean, indMean Recursive Fréchet mean of a matrix set (constructive)\nmidrange Geometric midrange of two matrices","category":"page"},{"location":"riemannianGeometry/","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"⋅","category":"page"},{"location":"riemannianGeometry/","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"mean\r\nmeans\r\ngeneralizedMean\r\ngeometricMean\r\ngeometricpMean\r\nlogdet0Mean\r\nwasMean\r\npowerMean\r\ninductiveMean\r\nmidrange","category":"page"},{"location":"riemannianGeometry/#Statistics.mean","page":"riemannianGeometry.jl","title":"Statistics.mean","text":" (1) mean(metric::Metric, P::ℍ{T}, Q::ℍ{T}) where T<:RealOrComplex\n\n (2) mean(metric::Metric, D::𝔻{T}, E::𝔻{T}) where T<:Real\n\n (3) mean(metric::Metric, 𝐏::ℍVector;\n <\n w::Vector=[],\n ✓w=true,\n init::Union{ℍ, Nothing}=nothing,\n tol::Real=0.,\n verbose=false,\n ⏩=true >)\n\n (4) mean(metric::Metric, 𝐃::𝔻Vector;\n < same optional keyword arguments as in (3) >)\n\n(1) Mean of two positive definite matrices, passed in arbitrary order as arguments P and Q, using the specified metric of type Metric::Enumerated type. The order is arbitrary as all metrics implemented in PosDefManifold are symmetric. This is the midpoint of the geodesic. For the weighted mean of two positive definite matrices use instead the geodesic function. P and Q must be flagged as Hermitian. See typecasting matrices.\n\n(2) Like in (1), but for two real diagonal positive definite matrices D and E.\n\n(3) Fréchet mean of an 1d array 𝐏 of k positive definite matrices 𝐏=P_1P_k of ℍVector type, with optional non-negative real weights w=w_1w_k and using the specified metricas in (1).\n\n(4) Fréchet mean of an 1d array 𝐃 of k positive definite matrices 𝐃=D_1D_k of 𝔻Vector type, with optional non-negative real weights w=w_1w_k and using the specified metricas in (1).\n\nIf you don't pass a weight vector with w, return the unweighted mean.\n\nIf ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time.\n\nAdopting the Fisher, logdet0 and Wasserstein metric in (3) and the logdet0 metric in (4), the mean is computed by means of an iterative algorithm. A particular initialization for these algorithms can be provided passing an Hermitian matrix as init. The convergence for these algorithm is required with a tolerance given by tol. if verbose=true the covergence attained at each iteration is printed. Other information such as if the algorithm has diverged is also printed. For more options in computing these means call directly functions geometricMean, logdet0Mean and wasMean, which are called hereby. For the meaning of the tol default value see the documentation of these functions. See also the robust mean function geometricpMean, which cannot be called from here. Notice that arguments init and tol have an effect only for the aferomentioned metrics in methods (3) and (4).\n\nFor (3) and (4), if ⏩=true (default), the computation of the mean is multi-threaded for all metrics.\n\nnote: Nota Bene\nMulti-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.\n\nMath\n\nThe Fréchet mean of a set of k matrices P_1 P_2 P_k weighted by w_1 w_2 w_ksum_i=1^kw_i=1 for the supported metrics are, for those with closed form expression:\n\nMetric weighted Fréchet mean\nEuclidean sum_i=1^kw_i P_i\ninvEuclidean big(sum_i=1^kw_i P_i^-1big)^-1\nChoEuclidean TT^*, where T=bL_P + aL_Q\nlogEuclidean textrmexpbig(sum_i=1^kw_ihspace1pt textrmlogP_i big)\nlogCholesky TT^*, where T=sum_i=1^k(w_kS_k)+sum_i=1^k(w_ktextrmlogD_k)\nJeffrey A^12big(A^-12HA^-12big)^12A^12\n\nand for those that are found by an iterative algorithm and that verify an equation:\n\nMetric equation verified by the weighted Fréchet mean\nFisher sum_i=1^kw_itextrmlogbig(G^-12 P_k G^-12big)=0\nlogdet0 sum_i=1^kw_ibig(frac12P_i+frac12Gbig)^-1=G^-1\nVonNeumann N.A.\nWasserstein G=sum_i=1^kw_ibig( G^12 P_i G^12big)^12\n\nlegend: L_X, S_X and D_X are the Cholesky lower triangle of X, its strictly lower triangular part and diagonal part, respectively (hence, S_X+D_X=L_X, L_XL_X^*=X). A and H are the weighted arithmetic and weighted harmonic mean, respectively.\n\nSee: geodesic, mean, Fréchet mean.\n\nExamples\n\nusing LinearAlgebra, Statistics, PosDefManifold\n# Generate 2 random 3x3 SPD matrices\nP=randP(3)\nQ=randP(3)\nM=mean(logdet0, P, Q) # (1)\nM=mean(Euclidean, P, Q) # (1)\n\n# passing several matrices and associated weights listing them\n# weights vector, does not need to be normalized\nR=randP(3)\nmean(Fisher, ℍVector([P, Q, R]); w=[1, 2, 3])\n\n# Generate a set of 4 random 3x3 SPD matrices\nPset=randP(3, 4)\nweights=[1, 2, 3, 1]\n# passing a vector of Hermitian matrices (ℍVector type)\nM=mean(Euclidean, Pset; w=weights) # (2) weighted Euclidean mean\nM=mean(Wasserstein, Pset) # (2) unweighted Wassertein mean\n# display convergence information when using an iterative algorithm\nM=mean(Fisher, Pset; verbose=true)\n\n# run multi-threaded when the number of matrices is high\nusing BenchmarkTools\nPset=randP(20, 160)\n@benchmark(mean(logEuclidean, Pset; ⏩=false)) # single-threaded\n@benchmark(mean(logEuclidean, Pset)) # multi-threaded\n\n\n\n\n\nmean(metric::Metric, ν::Vector{T}) where T<:RealOrComplex\n\nMean of k real or complex scalars, using the specified metric of type Metric::Enumerated type. Note that using the Fisher, logEuclidean and Jeffrey metric, the resulting mean is the scalar geometric mean. Note also that the code of this method is in unit statistics.jl, while the code for all the others is in unit riemannianGeometry.jl.\n\nExamples\n\nusing PosDefManifold\n# Generate 10 random numbers distributed as a chi-square with 2 df.\nν=[randχ²(2) for i=1:10]\narithmetic_mean=mean(Euclidean, ν)\ngeometric_mean=mean(Fisher, ν)\nharmonic_mean=mean(invEuclidean, ν)\nharmonic_mean<=geometric_mean<=arithmetic_mean # AGH inequality\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.means","page":"riemannianGeometry.jl","title":"PosDefManifold.means","text":" (1) means(metric::Metric, 𝒫::ℍVector₂;\n <⏩=true>)\n\n (2) means(metric::Metric, 𝒟::𝔻Vector₂;\n <⏩=true>)\n\n(1) Given a 2d array 𝒫 of positive definite matrices as an ℍVector₂ type compute the Fréchet mean for as many ℍVector type objects as hold in 𝒫, using the specified metric of type Metric::Enumerated type. Return the means in a vector of Hermitian matrices, that is, as an ℍVector type.\n\n(2) Given a 2d array 𝒟 of real positive definite matrices as an 𝔻Vector₂ type compute the Fréchet mean for as many 𝔻Vector type objects as hold in 𝒟, using the specified metric of type Metric::Enumerated type. Return the means in a vector of Diagonal matrices, that is, as a 𝔻Vector type.\n\nThe weigted Fréchet mean is not supported in this function.\n\nIf ⏩=true (default) the computation of the means is multi-threaded.\n\nnote: Nota Bene\nMulti-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.\n\nSee also: mean.\n\nExamples\n\nusing PosDefManifold\n# Generate a set of 4 random 3x3 SPD matrices\nPset=randP(3, 4) # or, using unicode: 𝐏=randP(3, 4)\n# Generate a set of 40 random 4x4 SPD matrices\nQset=randP(3, 40) # or, using unicode: 𝐐=randP(3, 40)\n# listing directly ℍVector objects\nmeans(logEuclidean, ℍVector₂([Pset, Qset])) # or: means(logEuclidean, ℍVector₂([𝐏, 𝐐]))\n# note that [𝐏, 𝐐] is actually a ℍVector₂ type object\n\n# creating and passing an object of ℍVector₂ type\nsets=ℍVector₂(undef, 2) # or: 𝒫=ℍVector₂(undef, 2)\nsets[1]=Pset # or: 𝒫[1]=𝐏\nsets[2]=Qset # or: 𝒫[2]=𝐐\nmeans(logEuclidean, sets) # or: means(logEuclidean, 𝒫)\n\n# going multi-threated\n\n# first, create 20 sets of 200 50x50 SPD matrices\nsets=ℍVector₂([randP(50, 200) for i=1:20])\n\n# How much computing time we save ?\n# (example min time obtained with 4 threads & 4 BLAS threads)\nusing BenchmarkTools\n\n# non multi-threaded, mean with closed-form solution\n@benchmark(means(logEuclidean, sets; ⏩=false)) # (6.196 s)\n\n# multi-threaded, mean with closed-form solution\n@benchmark(means(logEuclidean, sets)) # (1.897 s)\n\nsets=ℍVector₂([randP(10, 200) for i=1:10])\n\n# non multi-threaded, mean with iterative solution\n# wait a bit\n@benchmark(means(Fisher, sets; ⏩=false)) # (4.672 s )\n\n# multi-threaded, mean with iterative solution\n@benchmark(means(Fisher, sets)) # (1.510 s)\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.generalizedMean","page":"riemannianGeometry.jl","title":"PosDefManifold.generalizedMean","text":" generalizedMean(𝐏::Union{ℍVector, 𝔻Vector}, p::Real;\n <\n w::Vector=[],\n ✓w=true,\n ⏩=true >)\n\nGiven a 1d array 𝐏=P_1P_k of k positive definite matrices of ℍVector type or real positive definite diagonal matrices of 𝔻Vector type and optional non-negative real weights vector w=w_1w_k, return the weighted generalized means G with real parameter p, that is,\n\nG=big(sum_i=1^kw_iP_i^pbig)^1p.\n\nIf you don't pass a weight vector with w, return the unweighted generalized mean\n\nG=big(sum_i=1^kP_i^pbig)^1p.\n\nIf ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the weights each time.\n\nIf ⏩=true the computation of the generalized mean is multi-threaded.\n\nnote: Nota Bene\nMulti-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.\n\nThe following special cases for parameter p are noteworthy:\n\nFor p=frac12 the generalized mean is the modified Bhattacharyya mean.\nFor p=1 the generalized mean is the Euclidean mean.\nFor p=-1 the generalized mean is the inverse Euclidean mean.\nFor (the limit of) p=0 the generalized mean is the log Euclidean mean, which is the Fisher mean when matrices in 𝐏 all pair-wise commute.\n\nNotice that when matrices in 𝐏 all pair-wise commute, for instance if the matrices are diagonal, the generalized means coincide with the power means for any p-1 1 and for p=05 it coincides also with the Wasserstein mean. For this reason the generalized means are used as default initialization of both the powerMean and wasMean algorithm.\n\nSee: generalized means.\n\nSee also: powerMean, wasMean, mean.\n\nExamples\n\nusing LinearAlgebra, Statistics, PosDefManifold\n# Generate a set of 4 random 3x3 SPD matrices\nPset=randP(3, 4) # or, using unicode: 𝐏=randP(3, 4)\n\n# weights vector, does not need to be normalized\nweights=[1, 2, 3, 1]\n\n# unweighted mean\nG = generalizedMean(Pset, 0.25) # or: G = generalizedMean(𝐏, 0.25)\n\n# weighted mean\nG = generalizedMean(Pset, 0.5; w=weights)\n\n# with weights previously normalized we can set ✓w=false\nweights=weights./sum(weights)\nG = generalizedMean(Pset, 0.5; w=weights, ✓w=false)\n\n# run multi-threaded when the number of matrices is high\nusing BenchmarkTools\nPset=randP(20, 160)\n@benchmark(generalizedMean(Pset; ⏩=false)) # single-threaded\n@benchmark(generalizedMean(Pset)) # multi-threaded\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.geometricMean","page":"riemannianGeometry.jl","title":"PosDefManifold.geometricMean","text":" geometricMean(𝐏::Union{ℍVector, 𝔻Vector};\n <\n w::Vector=[],\n ✓w=true,\n init=nothing,\n tol::Real=0.,\n maxiter::Int=500,\n adaptStepSize::Bool=true,\n verbose=false,\n ⏩=true >)\n\nalias: gmean\n\nGiven a 1d array 𝐏=P_1P_k of k positive definite matrices of ℍVector type or diagonal matrices of 𝔻Vector type and optional non-negative real weights vector w=w_1w_k, return the 3-tuple (G iter conv), where G is the mean according to the Fisher metric and iter, conv are the number of iterations and convergence attained by the algorithm. Mean G is the unique positive definite matrix satisfying\n\nsum_i=1^kw_itextrmlogbig(G^-12 P_i G^-12big)=0\n\nFor estimating it, this function implements the well-known gradient descent algorithm, but with an exponential decaying step size ς, yielding iterations\n\nG G^12textrmexpbig(ςsum_i=1^kw_itextrmlog(G^-12 P_i G^-12)big)G^12\n\nIf you don't pass a weight vector with w, return the unweighted geometric mean.\n\nIf ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time.\n\nThe following are more :\n\ninit is a matrix to be used as initialization for the mean. If no matrix is provided, the log Euclidean mean will be used,\ntol is the tolerance for the convergence (see below).\nmaxiter is the maximum number of iterations allowed\nif verbose=true, the convergence attained at each iteration and the step size ς is printed. Also, a warning is printed if convergence is not attained.\nif ⏩=true the iterations are multi-threaded (see below).\nif adaptStepSize=false the step size ς is fixed to 1 at all iterations.\n\nIf the input is a 1d array of k real positive definite diagonal matrices the solution is available in closed-form as the log Euclidean mean, hence the init, tol and verbose have no effect and return the 3-tuple (G 1 0). See the log Euclidean metric.\n\nnote: Nota Bene\nMulti-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.In normal circumstances this algorithm converges monothonically. If the algorithm diverges and verbose is true a warning is printed indicating the iteration when this happened.The exponential decaying step size features a faster convergence rate as compared to the fixed step size ς=1 that is usually adopted. The decaying rate is inversely proportional to maxiter, thus, increase/decrease maxiter in order to set a slower/faster decaying rate. maxiter should not be set too low though.tol defaults to the square root of Base.eps of the nearest real type of data input 𝐏. This corresponds to requiring the norm of the satisfying matrix equation divided by the number of elements to vanish for about half the significant digits.\n\nSee: Fisher metric.\n\nSee also: geometricpMean, powerMean, wasMean, logdet0Mean, mean.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\n# Generate a set of 4 random 3x3 SPD matrices\nPset=randP(3, 4) # or, using unicode: 𝐏=randP(3, 4)\n\n# unweighted mean\nG, iter, conv = geometricMean(Pset) # or G, iter, conv = geometricMean(𝐏)\n\n# weights vector, does not need to be normalized\nweights=[1, 2, 3, 1]\n\n# weighted mean\nG, iter, conv = geometricMean(Pset, w=weights)\n\n# print the convergence at all iterations\nG, iter, conv = geometricMean(Pset; verbose=true)\n\n# now suppose Pset has changed a bit, initialize with G to hasten convergence\nPset[1]=ℍ(Pset[1]+(randP(3)/100))\nG, iter, conv = geometricMean(Pset; w=weights, ✓w=true, verbose=true, init=G)\n\n# run multi-threaded when the number of matrices is high\nusing BenchmarkTools\nPset=randP(20, 120)\n@benchmark(geometricMean(Pset; ⏩=false)) # single-threaded\n@benchmark(geometricMean(Pset)) # multi-threaded\n\n# show the mean and the input points using spectral embedding\nusing Plots\nk=80\nPset=randP(20, k)\nG, iter, conv = geometricMean(Pset)\npush!(Pset, G)\nΛ, U, iter, conv=spectralEmbedding(Fisher, Pset, 2; verbose=true)\nplot(U[1:k, 1], U[1:k, 2], seriestype=:scatter, title=\"Spectral Embedding\", label=\"Pset\")\nplot!(U[k+1:k+1, 1], U[k+1:k+1, 2], seriestype=:scatter, label=\"mean\")\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.geometricpMean","page":"riemannianGeometry.jl","title":"PosDefManifold.geometricpMean","text":" geometricpMean(𝐏::ℍVector, p::Real=0.5;\n <\n w::Vector=[],\n ✓w=true,\n init=nothing,\n tol::Real=0.,\n maxiter::Int=500,\n adaptStepSize=true,\n verbose=false,\n ⏩=true >)\n\nalias: gpmean\n\nGiven a 1d array 𝐏=P_1P_k of k positive definite matrices of ℍVector type, a real parameter 0p=1 and optional non-negative real weights vector w=w_1w_k, return the 3-tuple (G iter conv), where G is the p-mean, i.e., the mean according to the Fisher metric minimizing the p-dispersion (see below) and iter, conv are the number of iterations and convergence attained by the algorithm.\n\nThis function implements the p-dispersion gradient descent algorithm with step-size ς (to be published), yielding iterations\n\nG G^12textrmexpbig(ςsum_i=1^kpδ^2(G P_i)^p-1w_itextrmlog(G^-12 P_i G^-12)big)G^12\n\nif p=1 this yields the geometric mean (implemented specifically in geometricMean).\nif p=05 this yields the geometric median (default).\n\nIf you don't pass a weight vector with w, return the unweighted geometric-p mean.\n\nIf ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time.\n\nThe following are more :\n\ninit is a matrix to be used as initialization for the mean. If no matrix is provided, the log Euclidean mean will be used,\ntol is the tolerance for the convergence (see below).\nmaxiter is the maximum number of iterations allowed.\nif adaptStepSize=true (default) the step size ς for the gradient descent is adapted at each iteration (see below).\nif verbose=true, the step-size and convergence attained at each iteration is printed. Also, a warning is printed if convergence is not attained.\nif ⏩=true the iterations are multi-threaded (see below).\n\nnote: Nota Bene\nMulti-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.If the algorithm diverges and verbose is true a warning is printed indicating the iteration when this happened. This algorithm may temporary diverge, still reach convergence. Overall, while all other iterative algorithms implemented in PosDefMaifold are very stable, this is not.The smaller the parameter p is, the slower and less likely the convergence is. If the algorithm does not converge, try increasing p, initializing the algorithm with the output of geometricMean and/or eliminating the otliers from the input set 𝐏.If adaptStepSize is true (default) the step-size ς is adapted at each iteration, otherwise a fixed step size ς=1 is used. Adapting the step size in general hastens convergence and improves the convergence behavior.tol defaults to the square root of Base.eps of the nearest real type of data input 𝐏. This corresponds to requiring the norm of the satisfying matrix equation divided by the number of elements to vanish for about half the significant digits.\n\nSee: Fisher metric.\n\nSee also: geometricMean, powerMean, wasMean, logdet0Mean, mean.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold, Plots\n\n# This examples show that this algorithm is more robust to outliers\n# as compared to the standard geometric mean algorithm\n\n# Generate a set of 100 random 10x10 SPD matrices\nPset=randP(10, 100)\n\n# Get the usual geometric mean for comparison\nG, iter1, conv1 = geometricMean(Pset, verbose=true)\n\n# change p to observe how the convergence behavior changes accordingly\n# Get the median (default)\nH, iter2, conv2 = geometricpMean(Pset, verbose=true)\n# Get the p-mean for p=0.25\nH, iter2, conv2 = geometricpMean(Pset, 0.25, verbose=true)\n\nprintln(iter1, \" \", iter2); println(conv1, \" \", conv2)\n\n# move the first matrix in Pset to possibly create an otlier\nPset[1]=geodesic(Fisher, G, Pset[1], 3)\nG1, iter1, conv1 = geometricMean(Pset, verbose=true)\nH1, iter2, conv2 = geometricpMean(Pset, 0.25, verbose=true)\nprintln(iter1, \" \", iter2); println(conv1, \" \", conv2)\n\n# collect the geometric and p-means, before and after the\n# introduction of the outier in vector of Hermitian matrices `S`.\nS=HermitianVector([G, G1, H, H1])\n\n# check the interdistance matrix Δ² to verify that the geometric mean\n# after the introduction of the outlier (``G1``) is farther away from\n# the geometric mean as compared to how much ``H1`` is further away\n# from ``H``, i.e., that element (4,3) is much smaller than element (2,1).\nΔ²=distance²Mat(Float64, Fisher, S)\n\n# how far are all these matrices from all the others?\nfullΔ²=Hermitian(Δ², :L)\ndist=[sum(fullΔ²[:, i]) for i=1:size(fullΔ², 1)]\n\n# plot the matrices in `S` using spectral embedding.\nusing Plots\nΛ, U, iter, conv = laplacianEM(laplacian(Δ²), 3; verbose=true)\nplot([U[1, 1]], [U[1, 2]], seriestype=:scatter, label=\"g-mean\")\nplot!([U[2, 1]], [U[2, 2]], seriestype=:scatter, label=\"g-mean outlier\")\nplot!([U[3, 1]], [U[3, 2]], seriestype=:scatter, label=\"p-mean\")\nplot!([U[4, 1]], [U[4, 2]], seriestype=:scatter, label=\"p-mean outlier\")\n\n# estimate how much you gain running the algorithm in multi-threaded mode\nusing BenchmarkTools\nPset=randP(20, 120)\n@benchmark(geometricpMean(Pset; ⏩=true)) # single-threaded\n@benchmark(geometricpMean(Pset)) # multi-threaded\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.logdet0Mean","page":"riemannianGeometry.jl","title":"PosDefManifold.logdet0Mean","text":" logdet0Mean(𝐏::Union{ℍVector, 𝔻Vector};\n <\n w::Vector=[],\n ✓w=true,\n init=nothing,\n tol::Real=0.,\n maxiter::Int=500,\n verbose=false,\n ⏩=true >)\n\nalias: ld0Mean\n\nGiven a 1d array 𝐏=P_1P_k of k positive definite matrices of ℍVector type or real positive definite diagonal matrices of 𝔻Vector type and optional non-negative real weights vector w=w_1w_k, return the 3-tuple (G iter conv), where G is the mean according to the logdet zero metric and iter, conv are the number of iterations and convergence attained by the algorithm. Mean G is the unique positive definite matrix satisfying\n\nsum_i=1^kw_ibig(frac12P_i+frac12Gbig)^-1-G^-1=0.\n\nFor estimating it, this function implements the fixed-point iteration algorithm suggested by (Moakher, 2012, p315)🎓, yielding iterations\n\nG frac12big(sum_i=1^kw_i(P_i+G)^-1big)^-1.\n\nIf you don't pass a weight vector with w, return the unweighted logdet zero mean.\n\nIf ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time.\n\nThe following are more :\n\ninit is a matrix to be used as initialization for the mean. If no matrix is provided, the log Euclidean mean will be used,\ntol is the tolerance for the convergence (see below).\nmaxiter is the maximum number of iterations allowed.\nif verbose=true, the convergence attained at each iteration is printed and a warning is printed if convergence is not attained.\nif ⏩=true the iterations are multi-threaded (see below).\n\nnote: Nota Bene\nMulti-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.In normal circumstances this algorithm converges monothonically. If the algorithm diverges and verbose is true a warning is printed indicating the iteration when this happened.tol defaults to 100 times the square root of Base.eps of the nearest real type of data input 𝐏. This corresponds to requiring the square root of the relative convergence criterion over two successive iterations to vanish for about half the significant digits minus 2.\n\nSee: logdet zero metric, modified Bhattacharyya mean.\n\nSee also: powerMean, wasMean, logdet0Mean, mean.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\n# Generate a set of 4 random 3x3 SPD matrices\nPset=randP(3, 4) # or, using unicode: 𝐏=randP(3, 4)\n\n# unweighted mean\nG, iter, conv = logdet0Mean(Pset) # or G, iter, conv = logdet0Mean(𝐏)\n\n# weights vector, does not need to be normalized\nweights=[1, 2, 3, 1]\n\n# weighted mean\nG, iter, conv = logdet0Mean(Pset, w=weights)\n\n# print the convergence at all iterations\nG, iter, conv = logdet0Mean(Pset; w=weights, verbose=true)\n\n# suppose Pset has changed a bit; initialize with G to hasten convergence\nPset[1]=ℍ(Pset[1]+(randP(3)/100))\nG, iter, conv = logdet0Mean(Pset; w=weights, ✓w=false, verbose=true, init=G)\n\n# estimate how much you gain running the algorithm in multi-threaded mode\nusing BenchmarkTools\nPset=randP(20, 120)\n@benchmark(logdet0Mean(Pset; ⏩=false)) # single-threaded\n@benchmark(logdet0Mean(Pset)) # multi-threaded\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.wasMean","page":"riemannianGeometry.jl","title":"PosDefManifold.wasMean","text":" wasMean(𝐏::Union{ℍVector, 𝔻Vector};\n <\n w::Vector=[],\n ✓w=true,\n init=nothing,\n tol::Real=0.,\n maxiter::Int=500,\n verbose=false,\n ⏩=true >)\n\nGiven a 1d array 𝐏=P_1P_k of k positive definite matrices of ℍVector type or real positive definite diagonal matrices of 𝔻Vector type and optional non-negative real weights vector w=w_1w_k, return the 3-tuple (G iter conv), where G is the mean according to the Wasserstein metric and iter, conv are the number of iterations and convergence attained by the algorithm. Mean G is the unique positive definite matrix satisfying\n\nG=sum_i=1^kw_ibig( G^12 P_i G^12big)^12.\n\nFor estimating it, this function implements the fixed-point iterative algorithm proposed by (Álvarez-Esteban et al., 2016)🎓:\n\nG G^-12big(sum_i=1^k w_i(G^12P_i G^12)^12big)^2 G^-12.\n\nIf you don't pass a weight vector with w, return the unweighted Wassertein mean.\n\nIf ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and they should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time.\n\nThe following are more :\n\ninit is a matrix to be used as initialization for the mean. If no matrix is provided, the instance of generalized means with p=05 will be used,\ntol is the tolerance for the convergence (see below).\nmaxiter is the maximum number of iterations allowed.\nif verbose=true, the convergence attained at each iteration is printed and a warning is printed if convergence is not attained.\nif ⏩=true the iterations are multi-threaded (see below).\n\nIf the input is a 1d array of k real positive definite diagonal matrices the solution is available in closed-form as the modified Bhattacharyya mean, hence the init, tol and verbose have no effect and return the 3-tuple (G 1 0). See modified Bhattacharyya mean.\n\nnote: Nota Bene\nMulti-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.In normal circumstances this algorithm converges monothonically. If the algorithm diverges and verbose is true a warning is printed indicating the iteration when this happened.tol defaults to the square root of Base.eps of the nearest real type of data input 𝐏. This corresponds to requiring the norm of the satisfying matrix equation divided by the number of elements to vanish for about half the significant digits.\n\nSee: Wasserstein metric.\n\nSee also: powerMean, wasMean, logdet0Mean, mean.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\n# Generate a set of 4 random 3x3 SPD matrices\nPset=randP(3, 4) # or, using unicode: 𝐏=randP(3, 4)\n\n# unweighted mean\nG, iter, conv = wasMean(Pset) # or: G, iter, conv = wasMean(𝐏)\n\n# weights vector, does not need to be normalized\nweights=[1, 2, 3, 1]\n\n# weighted mean\nG, iter, conv = wasMean(Pset; w=weights)\n\n# print the convergence at all iterations\nG, iter, conv = wasMean(Pset; w=weights, verbose=true)\n\n# suppose 𝐏 has changed a bit; initialize with G to hasten convergence\nPset[1]=ℍ(Pset[1]+(randP(3)/100))\nG, iter, conv = wasMean(Pset; w=weights, verbose=true, init=G)\n\n# estimate how much you gain running the algorithm in multi-threaded mode\nusing BenchmarkTools\nPset=randP(20, 120)\n@benchmark(wasMean(Pset; ⏩=false)) # single-threaded\n@benchmark(wasMean(Pset)) # multi-threaded\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.powerMean","page":"riemannianGeometry.jl","title":"PosDefManifold.powerMean","text":" powerMean(𝐏::Union{ℍVector, 𝔻Vector}, p::Real;\n <\n w::Vector=[],\n ✓w=true,\n init=nothing,\n tol::Real=0.,\n maxiter::Int=500,\n verbose=false,\n ⏩=true >)\n\nGiven a 1d array 𝐏=P_1P_k of k positive definite matrices of ℍVector type or real positive definite diagonal matrices of 𝔻Vector type, an optional non-negative real weights vector w=w_1w_k and a real parameter p in-1 1, return the 3-tuple (G iter conv), where G is Lim and Palfia (2012)'s power means of order p and iter, conv are the number of iterations and convergence attained by the algorithm, respectively. Mean G is the unique positive definite matrix satisfying\n\nG=sum_i=1^k(w_iGtextrm_pP_i),\n\nwhere Gtextrm_pP_i is the Fisher geodesic equation. In particular:\n\nwith p=-1 this is the harmonic mean (see the inverse Euclidean metric),\nwith p=+1 this is the arithmetic mean (see the Euclidean metric),\nat the limit of p evaluated at zero from both side this is the geometric mean (see Fisher metric).\n\nFor estimating power means for pin(-1 1), this function implements the fixed-point iterative algorithm of (Congedo et al., 2017b)🎓. For p=0 (geometric mean) this algorithm is run two times with a small positive and negative value of p and the geometric mean of the two resulting means is returned, as suggested in (Congedo et al., 2017b)🎓. This way of estimating the geometric mean of a set of matrices is faster as compared to the usual gradient descent algorithm.\n\nIf you don't pass a weight vector with w, return the unweighted power mean.\n\nIf ✓w=true (default), the weights are normalized so as to sum up to 1, otherwise they are used as they are passed and should be already normalized. This option is provided to allow calling this function repeatedly without normalizing the same weights vector each time.\n\nThe following are more :\n\ninit is a matrix to be used as initialization for the mean. If no matrix is provided, the instance of generalized means with parameter p will be used.\ntol is the tolerance for the convergence (see below).\nmaxiter is the maximum number of iterations allowed.\nif verbose=true, the convergence attained at each iteration is printed and a warning is printed if convergence is not attained.\nif ⏩=true the iterations are multi-threaded.\n\nIf the input is a 1d array of k real positive definite diagonal matrices the solution is available in closed-form as the generalized mean of order p, hence the init, tol and verbose have no effect and return the 3-tuple (G 1 0). See generalized means.\n\nnote: Nota Bene\nMulti-threading is automatically disabled if Julia is instructed to use only one thread. See Threads.In normal circumstances this algorithm converges monothonically. If the algorithm diverges and verbose is true a warning is printed indicating the iteration when this happened.tol defaults to the square root of Base.eps of the nearest real type of data input 𝐏. This corresponds to requiring the norm of the difference of the matrix solution over two successive iterations divided by the number of elements in the matrix to vanish for about half the significant digits.\n\n(2) Like in (1), but for a 1d array 𝐃=D_1D_k of k real positive definite diagonal matrices of 𝔻Vector type. In this case the solution is available in closed-form, hence the init, tol and verbose have no effect and return the 3-tuple (G 1 0). See generalized means.\n\nSee: power means, generalized means, modified Bhattacharyya mean.\n\nSee also: generalizedMean, wasMean, logdet0Mean, mean.\n\nExamples\n\nusing LinearAlgebra, PosDefManifold\n# Generate a set of 4 random 3x3 SPD matrices\nPset=randP(3, 4) # or, using unicode: 𝐏=randP(3, 4)\n\n# unweighted mean\nG, iter, conv = powerMean(Pset, 0.5) # or G, iter, conv = powerMean(𝐏, 0.5)\n\n# weights vector, does not need to be normalized\nweights=[1, 2, 3, 1]\n\n# weighted mean\nG, iter, conv = powerMean(Pset, 0.5; w=weights)\n\n# print the convergence at all iterations\nG, iter, conv = powerMean(Pset, 0.5; w=weights, verbose=true)\n\n# suppose 𝐏 has changed a bit; initialize with G to hasten convergence\nPset[1]=ℍ(Pset[1]+(randP(3)/100))\nG, iter, conv = powerMean(Pset, 0.5; w=weights, verbose=true, init=G)\n\n# estimate how much you gain running the algorithm in multi-threaded mode\nusing BenchmarkTools\nPset=randP(20, 120)\n@benchmark(powerMean(Pset, 0.5; ⏩=false)) # single-threaded\n@benchmark(powerMean(Pset, 0.5)) # multi-threaded\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.inductiveMean","page":"riemannianGeometry.jl","title":"PosDefManifold.inductiveMean","text":"(1) inductiveMean(metric::Metric, 𝐏::ℍVector)\n\n(2) inductiveMean(metric::Metric, 𝐏::ℍVector, q::Int, Q::ℍ)\n\nalias: indMean\n\n(1) Compute the Fréchet mean of 1d array 𝐏=P_1P_k of k positive definite matrices of ℍVector type with a law of large number inductive procedure (Ho et al., 2013; Lim and Palfia, 2019; Massart et al., 2018)🎓, such as\n\nG_1=P_1\n\nG_i=γ(i^-1 G_(i-1) P_i) i=2k\n\nwhere γ(i^-1 G_(i-1) P_i) is a step on the geodesic relying G_(i-1) to P_i with arclength i^-1 using the specified metric, of type Metric::Enumerated type.\n\n(2) Like (1), but for the set of matrices 𝐐 𝐏, where it is assumed knowledge only of the set 𝐏, the mean of 𝐐 (Hermitian matrix argument Q) and the number of matrices in 𝐐 (integer argument q). This method can be used, for example, for updating a block on-line algorithm, where 𝐏 is the incoming block, Q the previous mean estimation and q the cumulative number of matrices on which the mean has been computed on-line.\n\nFor Fréchet means that do not have a closed form expression, this procedure features a computational complexity amounting to less than two iterations of gradient descent or fixed-point algorithms. This comes at the price of an approximation. In fact, the solution is not invariant to permutations of the matrices in array 𝐏 and convergence to the Fréchet mean for finite samples is not ensured (see Lim and Palfia, 2019; Massart et al., 2018)🎓.\n\nSince the inductive mean uses the geodesic function, it is not available for the Von Neumann metric.\n\nExamples\n\n# A set of 100 matrices for which we want to compute the mean\n𝐏=randP(10, 100)\n\n𝐏1=ℍVector(collect(𝐏[i] for i=1:50)) # first 50\n𝐏2=ℍVector(collect(𝐏[i] for i=51:100)) # last 50\n\n# inductive mean of the whole set 𝐏\nG=inductiveMean(Fisher, 𝐏)\n\n# mean using the usual gradient descent algorithm\nH, iter, conv=geometricMean(𝐏)\n\n# inductive mean of 𝐏 given only 𝐏2,\n# the number of matrices in 𝐏1 and the mean of 𝐏1\nG2=inductiveMean(Fisher, 𝐏2, length(𝐏1), mean(Fisher, 𝐏1))\n\n# average error\nnorm(G-H)/(dim(G, 1)^2)\nnorm(G2-H)/(dim(G, 1)^2)\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.midrange","page":"riemannianGeometry.jl","title":"PosDefManifold.midrange","text":"midrange(metric::Metric, P::ℍ{T}, Q::ℍ{T}) where T<:RealOrComplex\n\nMidrange (average of extremal values) of positive definite matrices P and Q. Only the Fisher metric is supported, allowing the so-called geometric midrange. This has been defined in Mostajeran et al. (2019) 🎓 as\n\nP * Q = frac1sqrtlambda_(min)+sqrtlambda_(max)Big(Q+sqrtlambda_(min)*lambda_(max)PBig),\n\nwhere lambda_(min) and lambda_(max) are the extremal generalized eigenvalues of P and Q.\n\nExamples\n\nP=randP(3)\nQ=randP(3)\nM=midrange(Fisher, P, Q)\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#Tangent-Space-operations","page":"riemannianGeometry.jl","title":"Tangent Space operations","text":"","category":"section"},{"location":"riemannianGeometry/","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"Function Description\nlogMap Logarithmic map (from manifold to tangent space)\nexpMap Exponential map (from tangent space to manifold)\nvecP vectorization of matrices in the tangent space\nmatP matrization of matrices in the tangent space (inverse of vecp)\nparallelTransport, pt Parallel transport of tangent vectors and matrices","category":"page"},{"location":"riemannianGeometry/","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"⋅","category":"page"},{"location":"riemannianGeometry/","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"logMap\r\nexpMap\r\nvecP\r\nmatP\r\nparallelTransport","category":"page"},{"location":"riemannianGeometry/#PosDefManifold.logMap","page":"riemannianGeometry.jl","title":"PosDefManifold.logMap","text":"(1) logMap(metric::Metric, P::ℍ{T}, G::ℍ{T})\n\n(2) logMap(metric::Metric, 𝐏::ℍVector, G::ℍ{T})\nfor all the above: where T<:RealOrComplex\n\n(1) Logaritmic Map: map a positive definite matrix P from the SPD or Hermitian manifold into the tangent space at base-point G using the Fisher metric.\n\nP and G must be flagged as Hermitian. See typecasting matrices.\n\nThe map is defined as\n\nLog_G(P)=S=G^12textrmlogbig(G^-12PG^-12big)G^12.\n\nmetric is a metric of type Metric::Enumerated type.\n\nThe result is an Hermitian matrix.\n\n(2) Logarithmic map (1) at base-point G at once for k positive definite matrices in 1d array 𝐏=P_1P_k of ℍVector type.\n\nThe result is an ℍVector.\n\nnote: Nota Bene\nCurrently only the Fisher metric is supported for tangent space operations.\n\nThe inverse operation is expMap.\n\nSee also: vecP, parallelTransport.\n\nExamples\n\nusing PosDefManifold\n(1)\nP=randP(3)\nQ=randP(3)\nmetric=Fisher\nG=mean(metric, P, Q)\n# projecting P at the base point given by the geometric mean of P and Q\nS=logMap(metric, P, G)\n\n(2)\nPset=randP(3, 4)\n# projecting all matrices in Pset at the base point given by their geometric mean.\nSset=logMap(Fisher, Pset, mean(Fisher, Pset))\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.expMap","page":"riemannianGeometry.jl","title":"PosDefManifold.expMap","text":"(1) expMap(metric::Metric, S::ℍ{T}, G::ℍ{T})\n\n(2) expMap(metric::Metric, 𝐒::ℍVector, G::ℍ{T})\nfor all the above: where T<:RealOrComplex\n\n(1) Exponential Map: map a tangent vector (a matrix) S from the tangent space at base-point G into the SPD or Hermitian manifold (using the Fisher metric).\n\nS and G must be flagged as Hermitian. See typecasting matrices.\n\nThe map is defined as\n\nExp_G(S)=P=G^12textrmexpbig(G^-12SG^-12big)G^12.\n\nmetric is a metric of type Metric::Enumerated type.\n\nThe result is an Hermitian matrix.\n\n(2) Exponential map (1) at base-point G at once for k tangent vectors (matrices) in 1d array 𝐒=S_1S_k of ℍVector type.\n\nThe result is an ℍVector.\n\nnote: Nota Bene\nCurrently only the Fisher metric is supported for tangent space operations.\n\nThe inverse operation is logMap.\n\nExamples\n\n(1)\nusing PosDefManifold, LinearAlgebra\nP=randP(3)\nQ=randP(3)\nG=mean(Fisher, P, Q)\n# projecting P on the tangent space at the Fisher mean base point G\nS=logMap(Fisher, P, G)\n# projecting back onto the manifold\nP2=expMap(Fisher, S, G)\n\n(2)\nPset=randP(3, 4)\n# projecting all matrices in Pset at the base point given by their geometric mean.\nG=mean(Fisher, Pset)\nSset=logMap(Fisher, Pset, G)\n# projecting back onto the manifold\nPset2=expMap(Fisher, Sset, G)\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.vecP","page":"riemannianGeometry.jl","title":"PosDefManifold.vecP","text":"vecP(S::Union{ℍ{T}, Symmetric{R}};\n range::UnitRange=1:size(S, 2)) where T<:RealOrComplex where R<:Real =\n\nVectorize a tangent vector (which is an Hermitian or Symmetric matrix) S: mat ↦ vec.\n\nIt gives weight 1 to diagonal elements and 2 to off-diagonal elements so as to preserve the norm (Barachant et al., 201E)🎓, such as\n\nS_F=vecP(S)_F.\n\nThe result is a vector holding n(n+1)2 elements, where n is the size of S.\n\nS must be flagged as Hermitian or Symmetric. See typecasting matrices.\n\nThe reverse operation is provided by matP, which always return an Hermitian matrix.\n\nIf an optional keyword argument range is provided, the vectorization concerns only the rows (or columns, since the input matrix is symmetric or Hermitian) in the range. Note that in this case the operation cannot be reverted by the matP, that is, in this case the matrix is 'stuck' in the tangent space.\n\nExamples\n\nusing PosDefManifold\nP=randP(3)\nQ=randP(3)\nG=mean(Fisher, P, Q)\n# projecting P at the base point given by the geometric mean of P and Q\nS=logMap(Fisher, P, G)\n# vectorize S\nv=vecP(S)\n# vectorize onlt the first two columns of S\nv=vecP(S; range=1:2)\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.matP","page":"riemannianGeometry.jl","title":"PosDefManifold.matP","text":"matP(ς::Vector{T}) where T<:RealOrComplex\n\nMatrizize a tangent vector (vector) ς : vec -> mat.\n\nThis is the function reversing the vecP function, thus the weighting applied therein is reversed as well.\n\nIf ς=vecP(S) and S is a nn Hermitian or Symmetric matrix, ς is a tangent vector of size n(n+1)2. The result of calling matP(ς) is then nn matrix S. S is always returned flagged as Hermitian.\n\nTo Do: This function may be rewritten more efficiently.\n\nExamples\n\nusing PosDefManifold\nP=randP(3)\nQ=randP(3)\nG=mean(Fishr, P, Q)\n# projecting P at onto the tangent space at the Fisher mean base point\nS=logMap(Fisher, P, G)\n# vectorize S\nv=vecP(S)\n# Rotate the vector by an orthogonal matrix\nn=Int(size(S, 1)*(size(S, 1)+1)/2)\nU=randP(n)\nz=U*v\n# Get the point in the tangent space\nS=matP(z)\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#PosDefManifold.parallelTransport","page":"riemannianGeometry.jl","title":"PosDefManifold.parallelTransport","text":"(1) parallelTransport(S::ℍ{T}, P::ℍ{T}, Q::ℍ{T})\n\n(2) parallelTransport(S::ℍ{T}, P::ℍ{T})\n\n(3) parallelTransport(𝐒::ℍVector, P::ℍ{T}, Q::ℍ{T})\n\n(4) parallelTransport(𝐒::ℍVector, P::ℍ{T})\nfor all the above: where T<:RealOrComplex\n\nalias: pt\n\n(1) Parallel transport of tangent vector S (a matrix) lying on the tangent space at base-point P to the tangent space at base-point Q.\n\nS, P and Q must all be Hermitian matrices. Return an Hermitian matrix. The transport is defined as:\n\n_(PQ)(S)=big(QP^-1big)^12Sbig(QP^-1big)^H2.\n\nIf S is a positive definite matrix in the manifold (and not a tangent vector) it will be 'trasported' from P to Q, amounting to (Yair et al., 2019🎓)\n\nproject S onto the tangent space at base-point P,\nparallel transport it to the tangent space at base-point Q,\nproject it back onto the manifold at base-point Q.\n\n(2) Parallel transport as in (1), but to the tangent space at base-point the identity matrix.\n\nThe transport reduces in this case to:\n\n_(PI)(S)=P^-12SP^-12.\n\n(3) Parallel transport as in (1) at once for k tangent vectors (matrices) in 1d array 𝐒=S_1S_k of ℍVector type.\n\n(4) Parallel transport as in (2) at once for k tangent vectors (matrices) in 1d array 𝐒=S_1S_k of ℍVector type.\n\nnote: Nota Bene\nCurrently only the Fisher metric is supported for parallel transport.\n\nSee also: logMap, expMap, vecP, matP.\n\nExamples\n\nusing PosDefManifold\n\n(1)\nP=randP(3)\nQ=randP(3)\nG=mean(Fisher, P, Q)\n\n# i. projecting P onto the tangent space at base-point G\nS=logMap(Fisher, P, G)\n# ii. parallel transport S to the tangent space at base-point Q\nS_=parallelTransport(S, G, Q)\n# iii. projecting back into the manifold at base-point Q\nP_=expMap(Fisher, S_, Q)\n\n# i., ii. and iii. can be done simply by\nPP_=parallelTransport(P, G, Q)\n# check\nP_≈PP_ ? println(\" ⭐ \") : println(\" ⛔ \")\n\n(2)\nP=randP(3)\nQ=randP(3)\nG=mean(Fisher, P, Q)\n# transport to the tangent space at base-point the identity\nPP_=parallelTransport(P, G)\n\n(3)\nPset=randP(3, 4)\nQ=randP(3)\nG=mean(Fisher, Pset)\n# trasport at once all matrices in Pset\nPset2=parallelTransport(Pset, G, Q)\n\n(4)\nPset=randP(3, 4)\nG=mean(Fisher, Pset)\n# recenter all matrices so to have mean=I\nPset2=parallelTransport(Pset, G)\n# check\nmean(Fisher, Pset2) ≈ I ? println(\" ⭐ \") : println(\" ⛔ \")\n\n\n\n\n\n","category":"function"},{"location":"riemannianGeometry/#Procrustes-problems","page":"riemannianGeometry.jl","title":"Procrustes problems","text":"","category":"section"},{"location":"riemannianGeometry/","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"Function Description\nprocrustes Solution to the Procrustes problem in the manifold of positive definite matrices","category":"page"},{"location":"riemannianGeometry/","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"⋅","category":"page"},{"location":"riemannianGeometry/","page":"riemannianGeometry.jl","title":"riemannianGeometry.jl","text":"procrustes","category":"page"},{"location":"riemannianGeometry/#PosDefManifold.procrustes","page":"riemannianGeometry.jl","title":"PosDefManifold.procrustes","text":"procrustes(P::ℍ{T}, Q::ℍ{T}, extremum=\"min\") where T<:RealOrComplex\n\nGiven two positive definite matrices P and Q, return by default the solution of problem\n\ntextrmargmin_Uδ(PU^HQU),\n\nwhere U varies over the set of unitary matrices and δ() is a distance or divergence function.\n\nU^HQU is named in physics the unitary orbit of Q.\n\nIf the argument extremum is passed as \"max\", it returns instead the solution of\n\ntextrmargmax_Uδ(PU^HQU).\n\nP and Q must be flagged as Hermitian. See typecasting matrices.\n\nAs it has been shown in Bhatia and Congedo (2019)🎓, using each of the Fisher, logdet zero, Wasserstein and the Kullback-Leibler divergence (see logdet α), the best approximant to P from the unitary orbit of Q commutes with P and, surprisingly, has the same closed-form expression, namely\n\nU_Q^U_P^H for the argmin and U_Q^U_P^H for the argmax,\n\nwhere U^ denotes the eigenvector matrix of the subscript argument with eigenvectors in columns sorted by decreasing order of corresponding eigenvalues and U^ denotes the eigenvector matrix of the subscript argument with eigenvectors in columns sorted by increasing order of corresponding eigenvalues.\n\nThe same solutions are known since a long time also by solving the extremal problem here above using the Euclidean metric (Umeyama, 1988).\n\nThe generalized Procrustes problem\n\ntextrmargmin_Usum_i=1^kδ(P_iU^HQ_iU)\n\ncan be solved using Julia package Manopt.\n\nExamples\n\nusing PosDefManifold\nP=randP(3)\nQ=randP(3)\n# argmin problem\nU=procrustes(P, Q)\n# argmax problem\nV=procrustes(P, Q, \"max\")\n\n\n\n\n\n","category":"function"},{"location":"test/#test.jl","page":"test.jl","title":"test.jl","text":"","category":"section"},{"location":"test/","page":"test.jl","title":"test.jl","text":"Most functions in PosDefManifold are tested, both for real and complex data input. This unit declares the function testall() that performs all tests.","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"Some functions are fully tested, the others are just executed. Unce you ran it, for each method of each function, a ⭐ sign is printed if the test is succesful, while a ⛔ sign is printed if the test is not succesful. A ☆ sign is printed if the function has been executed correctly.","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"Tests on functions for which a multi-threated version exist are indicated by symbol ( ⏩ ).","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"If there are fails, the concerned functions will be listed as warnings.","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"Note that the first time you execute the test it will take some time as the code will be compiled.","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"This here below is the output of the testall() function (v0.1.3) run on the 20th of May 2019:","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"⭐ PosDefManifold testing utility⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"Starting tests...","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"Unit 'linearAlgebra.jl'","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"typeofMatrix: ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"dim: ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"det1: ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"function tr1: ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"normalizeCol!: ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"ispos: ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"colProd: ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"colNorm: ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"sumOfSqr: ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"sumOfSqrDiag: ⭐ ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"sumOfSqrTril: ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"tr: ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"quadraticForm: ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"fidelity: ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"fDiag: ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"DiagOfProd: ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"mgs: ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"fVec: ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"evd: ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"spectralFunctions: ☆ ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"pow: ⭐ ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"invsqrt: ⭐ ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"sqr: ⭐ ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"powerIterations: ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"choL: ⭐ ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"Unit 'signalProcessing.jl'","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"randλ: ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"randΛ: ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"randU: ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"randP: ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"regularize!: ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"gram: ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"trade: ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"Unit 'riemannianGeometry.jl'","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"geodesic: ☆ ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"distanceSqr (I): ☆ ☆ ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"distanceSqr (II): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"distanceSqr (III): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"distance (I): ☆ ☆ ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"distance (II): ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"distanceSqrMat (I): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"distanceSqrMat (I ⏩ ): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"distanceSqrMat (II): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"distanceSqrMat (II ⏩ ): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"distanceMat (I): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"distanceMat (I ⏩ ): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"distanceMat (II): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"distanceMat (II ⏩ ): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"laplacian: ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"laplacianEigenMaps: ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"spectralEmbedding: ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"mean (I): ☆ ☆ ☆ ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"mean (II): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"mean (⏩ ): ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"means: ☆ ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"means (⏩ ): ☆ ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"generalizedMean: ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"generalizedMean(⏩ ): ☆ ☆ ☆ ☆ ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"geometricMean: ☆ ☆ ☆ ☆ ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"geometricMean(⏩ ): ☆ ☆ ☆ ☆ ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"logdet0Mean: ⭐ ⭐ ⭐ ⭐ ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"logdet0Mean(⏩ ): ☆ ☆ ☆ ☆ ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"wasMean: ☆ ☆ ☆ ☆ ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"wasMean(⏩ ): ☆ ☆ ☆ ☆ ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"powerMean: ☆ ☆ ☆ ☆ ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"powerMean(⏩ ): ☆ ☆ ☆ ☆ ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"logMap: ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"expMap: ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"vecP: ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"matP: ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"procrustes: ☆ ☆","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"Unit 'classification.jl'","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"softmax: ⭐","category":"page"},{"location":"test/","page":"test.jl","title":"test.jl","text":"[ Info: All tests were succesful!","category":"page"},{"location":"MainModule/#MainModule-(PosDefManifold.jl)","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"","category":"section"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"This is the main unit containing the PosDefManifold module.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"It uses the following standard Julia packages:","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"using\nLinearAlgebra\nStatistics","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"Examples in some units of PosDefManifold also uses the Plots package. Take a look at this tutorial for an introduction to data plotting with Julia.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"The main module does not contains functions, but it declares all constant, types and aliases of Julia functions used in all units.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"Contents\nconstants\naliases\ntypes\ntips & tricks","category":"page"},{"location":"MainModule/#constants","page":"MainModule (PosDefManifold.jl)","title":"constants","text":"","category":"section"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"constant value numeric value\nsqrt2 √2 1.4142135623730951\nsqrt2inv 1/√2 0.7071067811865475\ngolden (√5+1)/2 1.618033988749...\ngoldeninv (√5-1)/2 0.618033988749...\nmaxpos 1e15 100000000000000","category":"page"},{"location":"MainModule/#aliases","page":"MainModule (PosDefManifold.jl)","title":"aliases","text":"","category":"section"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"alias Julia function in Package tab-completition REPL support\n𝚺 sum Base \\bfSigma ⛔\n𝛍 mean Statistics \\bfmu ⛔\n𝕄 Matrix Base \\bbM ⛔\n𝔻 Diagonal LinearAlgebra \\bbD ⛔\nℍ Hermitian LinearAlgebra \\bbH ✓\n𝕃 LowerTriangular LinearAlgebra \\bbH ⛔","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"All packages above are built-in julia packages.","category":"page"},{"location":"MainModule/#types","page":"MainModule (PosDefManifold.jl)","title":"types","text":"","category":"section"},{"location":"MainModule/#Metric::Enumerated-type","page":"MainModule (PosDefManifold.jl)","title":"Metric::Enumerated type","text":"","category":"section"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"@enum Metric begin\r\n Euclidean =1\r\n invEuclidean =2\r\n ChoEuclidean =3\r\n logEuclidean =4\r\n LogCholesky =5\r\n Fisher =6\r\n logdet0 =7\r\n Jeffrey =8\r\n VonNeumann =9\r\n Wasserstein =10\r\nend","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"Riemannian manipulations are defined for a given metric (see metrics). An instance for this type is requested as an argument in many functions contained in the riemannianGeometry.jl unit in order to specify the metric, for example:","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":" # generate a 15x15 symmetric positive definite matrix\r\n P=randP(15)\r\n # distance from P to the identity matrix according to the logdet0 metric\r\n d=distance(logdet0, P)","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"If you want to work consistently with a specific metric, you may want to declare in your script a global variable such as","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"global metric=logdet0 or global metric=Metric(Int(logdet0)),","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"and then pass metric as argument in all your computations, e.g., referring to the above example,","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"d=distance(metric, P).","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"To know what is the current metric, you can get it as a string using:","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"s=string(metric)","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"To see the list of metrics in type Metric use:","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"instances(Metric)","category":"page"},{"location":"MainModule/#Array-of-Matrices-types","page":"MainModule (PosDefManifold.jl)","title":"Array of Matrices types","text":"","category":"section"},{"location":"MainModule/#𝕄Vector-type","page":"MainModule (PosDefManifold.jl)","title":"𝕄Vector type","text":"","category":"section"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"𝕄Vector=Vector{𝕄}","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"This is a vector of general Matrix matrices, alias of MatrixVector. Julia sees it as: Array{Array{T,2} where T,1}. See aliases for the 𝕄 symbol and typecasting matrices for the use of matrices in PosDefManifold.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"warning: Nota bene\nThis object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"See dim, typeofMatrix","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"𝕄Vector₂ type","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"𝕄Vector₂=Vector{𝕄Vector}","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"This is a vector of 𝕄Vector type objects, i.e., a vector of vectors of matrices. It is the alias of MatrixVector₂. Julia sees it as: Array{Array{Array{T,2} where T,1},1}.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"warning: Nota bene\nThis object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension. However the several 𝕄Vector objects it holds do not need to have the same length.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"See dim, typeofMatrix","category":"page"},{"location":"MainModule/#𝔻Vector-type","page":"MainModule (PosDefManifold.jl)","title":"𝔻Vector type","text":"","category":"section"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"𝔻Vector=Vector{𝔻}","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"This is a vector of Diagonal matrices, alias of DiagonalVector. Julia sees it as: Array{Diagonal,1}. See aliases for the 𝔻 symbol and typecasting matrices for the use of Diagonal matrices in PosDefManifold.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"warning: Nota bene\nThis object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"See dim, typeofMatrix","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"𝔻Vector₂ type","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"𝔻Vector₂=Vector{𝔻Vector}","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"This is a vector of 𝔻Vector type objects, i.e., a vector of vectors of Diagonal matrices. It is the alias of DiagonalVector₂. Julia sees it as: Array{Array{Diagonal,1},1}.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"warning: Nota bene\nThis object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension. However the several 𝔻Vector objects it holds do not need to have the same length.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"See dim, typeofMatrix","category":"page"},{"location":"MainModule/#𝕃Vector-type","page":"MainModule (PosDefManifold.jl)","title":"𝕃Vector type","text":"","category":"section"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"𝕃Vector=Vector{𝕃}","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"This is a vector of LowerTriangular matrices, alias of LowerTriangularVector. Julia sees it as: Array{LowerTriangular,1}. See aliases for the 𝕃 symbol and typecasting matrices for the use of LowerTriangular matrices in PosDefManifold.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"warning: Nota bene\nThis object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"See dim, typeofMatrix","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"𝕃Vector₂ type","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"𝕃Vector₂=Vector{𝕃Vector}","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"This is a vector of 𝕃Vector type objects, i.e., a vector of vectors of LowerTriangular matrices. It is the alias of LowerTriangularVector₂. Julia sees it as: Array{Array{LowerTriangular,1},1}.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"warning: Nota bene\nThis object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension. However the several 𝕃Vector objects it holds do not need to have the same length.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"See dim, typeofMatrix","category":"page"},{"location":"MainModule/#ℍVector-type","page":"MainModule (PosDefManifold.jl)","title":"ℍVector type","text":"","category":"section"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"ℍVector=Vector{ℍ}","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"This is a vector of Hermitian matrices, alias of HermitianVector. Julia sees is at: Array{Hermitian,1}.See aliases for the ℍ symbol and typecasting matrices for the use of Hermitian matrices in PosDefManifold.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"warning: Nota bene\nThis object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"See dim, typeofMatrix","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"ℍVector₂ type","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"`ℍVector₂=Vector{ℍVector}`","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"This is a vector of ℍVector type objects, i.e., a vector of vectors of Hermitian matrices. It is the alias of HermitianVector₂. Julia sees it as: Array{Array{Hermitian,1},1}.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"warning: Nota bene\nThis object is meant to hold matrices living in the same manifold, therefore it is assumed by all methods that all matrices it holds are of the same dimension. However the several ℍVector objects it holds do not need to have the same length.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"See dim, typeofMatrix","category":"page"},{"location":"MainModule/#RealOrComplex-type","page":"MainModule (PosDefManifold.jl)","title":"RealOrComplex type","text":"","category":"section"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"RealOrComplex=Union{Real, Complex}","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"This is the Union of Real and Complex types.","category":"page"},{"location":"MainModule/#AnyMatrix-type","page":"MainModule (PosDefManifold.jl)","title":"AnyMatrix type","text":"","category":"section"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"AnyMatrix=Union{𝔻{T}, 𝕃{T}, ℍ{T}, 𝕄{T}} where T<:RealOrComplex","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"This is the Union of real or complex Diagonal, LowerTriangular, Hermitian and Matrix types. It is often used in the definition of functions.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"See aliases","category":"page"},{"location":"MainModule/#AnyMatrixVector-type","page":"MainModule (PosDefManifold.jl)","title":"AnyMatrixVector type","text":"","category":"section"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"AnyMatrixVector=Union{𝕄Vector, 𝔻Vector, 𝕃Vector, ℍVector}","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"This is the Union of 𝕄Vector, 𝔻Vector, 𝕃Vector and ℍVector. It is often used in the definition of functions. See Array of Matrices types.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"AnyMatrixVector₂ type","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"AnyMatrixVector₂=Union{𝕄Vector₂, 𝔻Vector₂, 𝕃Vector₂, ℍVector₂}","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"This is the Union of 𝕄Vector₂, 𝔻Vector₂, 𝕃Vector₂, ℍVector₂. It is often used in the definition of functions. See Array of Matrices types.","category":"page"},{"location":"MainModule/#tips-and-tricks","page":"MainModule (PosDefManifold.jl)","title":"tips & tricks","text":"","category":"section"},{"location":"MainModule/#typecasting-matrices","page":"MainModule (PosDefManifold.jl)","title":"typecasting matrices","text":"","category":"section"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"Several functions in PosDefManifold implement multiple dispatch and can handle several kinds of matrices as input, however the core functions for manipulating objects on the Riemannian manifold of positive definite matrices act by definition on positive definite matrices only. Those matrices must therefore be either symmetric positive definite (SPD, real) or Hermitian positive definite (HPD, complex). Such matrices are uniformly identified in PosDefManifold as being of the Hermitian type, using the standard LinearAlgebra package. The alias ℍ is used consistently in the code (see aliases). If the input is not flagged as Hermitian, the functions restricting the input to positive definite matrices will not be accessible.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"Example","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"julia> using LinearAlgebra\r\n\njulia> f(S::Hermitian)=S*S'\r\nf (generic function with 1 method)\r\n\njulia> A=randn(3, 3)\r\n3×3 Array{Float64,2}:\r\n -0.67407 -0.344258 0.203714\r\n -1.06551 -0.0233796 0.975465\r\n -1.04727 -1.19807 -0.0219121\r\n\njulia> H=A*A' # although SPD, H is not automatically flagged as Hermitian\r\n3×3 Array{Float64,2}:\r\n 0.614384 0.924991 1.11391\r\n 0.924991 2.08738 1.12251\r\n 1.11391 1.12251 2.53263\r\n\njulia> f(H)\r\nERROR: MethodError: no method matching f(::Array{Float64,2})\r\nClosest candidates are:\r\n f(::Hermitian) at none:1","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"If you construct a positive definite matrix and it is not flagged, you can do so simply by typecasting it, that is, passing as argument to the functions Hermitian(P) instead of just P. The ℍ alias can be used for short, i.e., ℍ(P). Continuing the example above:","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"julia> f(ℍ(H)) # this way it works, equivalent to f(Hermitian(H))\r\n3×3 Array{Float64,2}:\r\n 2.47388 3.74948 4.54381\r\n 3.74948 6.4728 6.21635\r\n 4.54381 6.21635 8.91504","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"Be careful: Hermitian(P) will construct and Hermitian matrix from the argument. If the matrix argument is not symmetric (if real) or Hermitian (if complex) it will be made so by copying the transpose (if real) or complex conjugate and transpose (if complex) of a triangular part into the other. See Hermitian.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"If you want to construct an ℍVector type from, say, two Hermitian matrices P and Q, don't write A=[P, Q], but rather A=ℍVector([P, Q]). In fact, the first is seen by Julia as","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"2-element Array{Hermitian{Float64,Array{Float64,2}},1},","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"while the latter as","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"2-element Array{Hermitian,1},","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"which is the type expected in all functions taking an ℍVector type as argument.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"Other functions act on generic matrices (of type Matrix). This is seen by Julia as Array{T,2} where T. Keep in mind that the functions writing on the argument matrix such as normalizeCol! will give an error if you pass an Hermitian matrix, since Julia does not allow writing on non-diagonal elements of those matrices. In this case typecast it in another object using the Matrix type; suppose H is Hermitian, you would use for example:","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"julia> X=Matrix(H)\r\njulia> normalizeCol!(X, 1)\r\njulia> norm(X[:, 1])\r\n1.0","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"Some more examples:","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"Typecasting Adjoint matrices:","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"Matrix(X')","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"here is how to get an Hermitian matrix out of the diagonal part of an Hermitian matrix H:","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"Hermitian(Matrix(Diagonal(H)))","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"here is how to get a LowerTriangular matrix out of an Hermitian matrix H:","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"LowerTriangular(Matrix(H))","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"For example, you can use this to pass a full inter-distance matrix to the laplacian function to obtain the Laplacian matrix.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"A useful function is typeofMatrix. For example, the following line typecasts matrix M to the type of matrix P and put the result in A:","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"A=typeofMatrix(P)(M)","category":"page"},{"location":"MainModule/#Threads","page":"MainModule (PosDefManifold.jl)","title":"Threads","text":"","category":"section"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"Some functions in PosDefManifold explicitly call BLAS routines for optimal performnce. This is reported in the help section of the concerned functions. Most functions calls BLAS routine implicitly via Julia. You can set the number of threads the BLAS library should use by:","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"using LinearAlgebra\r\nBLAS.set_num_threads(n)","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"where n is the number of threads. By default, PosDefManifold reserves to BLAS all CPU threads available on your computer (given by the output of Sys.CPU_THREADS). The number of threads used by Julia for multi-threaded computations is given by the output of function Threads.nthreads(). In Windows this latter number of threads is set to half the available threads. In Linux and OSX defaults to one and is controlled by an environment variable, i.e.,","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"export JULIA_NUM_THREADS=4.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"In Linux, working with the Atom IDE, you also have to set to global the field found in Atom under Settings(or Preferences)/julia-client/Settings/Julia Options/Number of Threads.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"In Windows, set the desired number of threads in the settings of the julia-client Juno package.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"See for example this post, this post and julia doc on threads.","category":"page"},{"location":"MainModule/","page":"MainModule (PosDefManifold.jl)","title":"MainModule (PosDefManifold.jl)","text":"Notice that PosDefManifold features many multi-threaded functions and these may allow a gain in computation time only if Julia is instructed to use at least two threads.","category":"page"},{"location":"#PosDefManifold-Documentation","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"","category":"section"},{"location":"#Requirements","page":"PosDefManifold Documentation","title":"Requirements","text":"","category":"section"},{"location":"","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"Julia version ≥ 1.3","category":"page"},{"location":"#Installation","page":"PosDefManifold Documentation","title":"Installation","text":"","category":"section"},{"location":"","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"Execute the following command in Julia's REPL:","category":"page"},{"location":"","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"]add PosDefManifold","category":"page"},{"location":"","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"To obtain the latest development version execute instead","category":"page"},{"location":"","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"]add PosDefManifold#master","category":"page"},{"location":"#About-the-Author","page":"PosDefManifold Documentation","title":"About the Author","text":"","category":"section"},{"location":"","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"Marco Congedo is a Research Director of CNRS (Centre National de la Recherche Scientifique), working in Grenoble, France.","category":"page"},{"location":"#Overview","page":"PosDefManifold Documentation","title":"Overview","text":"","category":"section"},{"location":"","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"(Image: Figure 1)","category":"page"},{"location":"","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"Riemannian geometry studies smooth manifolds, multi-dimensional curved spaces with peculiar geometries endowed with non-Euclidean metrics. In these spaces Riemannian geometry allows the definition of angles, geodesics (shortest path between two points), distances between points, centers of mass of several points, etc.","category":"page"},{"location":"","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"In this package we are concerned with the manifold P of positive definite matrices, either symmetric positive definite or Hermitian positive definite.","category":"page"},{"location":"","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"In several fields of research such as computer vision and brain-computer interface, treating data in the P manifold has allowed the introduction of machine learning approaches with remarkable characteristics, such as simplicity of use, excellent classification accuracy, as demonstrated by the winning score obtained in six international data classification competitions, and the ability to operate transfer learning (Congedo et al., 2017)🎓).","category":"page"},{"location":"","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"For a formal introduction to the P manifold the reader is referred to the monography written by Bhatia (2007)🎓.","category":"page"},{"location":"","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"For an introduction to Riemannian geometry and an overview of mathematical tools implemented in this package, see Intro to Riemannian Geometry in this documentation.","category":"page"},{"location":"","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"For starting using this package, browse the code units listed here below and execute the many code examples you will find therein. The core functions are contained in unit riemannianGeometry.jl.","category":"page"},{"location":"#Code-units","page":"PosDefManifold Documentation","title":"Code units","text":"","category":"section"},{"location":"","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"PosDefManifold includes six code units (.jl files):","category":"page"},{"location":"","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"Unit Description\nMainModule (PosDefManifold.jl) Main module, constants, types, aliases, tips & tricks\nriemannianGeometry.jl The fundamental unit collecting all functions acting on the P manifold\nlinearAlgebra.jl Collection of linear algebra routines\nstatistics.jl Collection of statistics routines\nsignalProcessing.jl Collection of signal processing routines\ntest.jl Unit performing all tests","category":"page"},{"location":"#Contents","page":"PosDefManifold Documentation","title":"Contents","text":"","category":"section"},{"location":"","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"Pages = [ \"index.md\",\r\n \"introToRiemannianGeometry.md\",\r\n \"MainModule.md\",\r\n \"riemannianGeometry.md\",\r\n \"linearAlgebra.md\",\r\n \"statistics.md\",\r\n \"signalProcessing.md\",\r\n \"test.md\"]\r\nDepth = 1","category":"page"},{"location":"#Index","page":"PosDefManifold Documentation","title":"Index","text":"","category":"section"},{"location":"","page":"PosDefManifold Documentation","title":"PosDefManifold Documentation","text":"","category":"page"},{"location":"statistics/#statistics.jl","page":"statistics.jl","title":"statistics.jl","text":"","category":"section"},{"location":"statistics/","page":"statistics.jl","title":"statistics.jl","text":"Unit for statistics, probability and related functions.","category":"page"},{"location":"statistics/","page":"statistics.jl","title":"statistics.jl","text":"Category Output\n1. Probability functions relating to probability\n2. Descriptive Statistics functions relating to decriptive statistics","category":"page"},{"location":"statistics/#Probability","page":"statistics.jl","title":"Probability","text":"","category":"section"},{"location":"statistics/","page":"statistics.jl","title":"statistics.jl","text":"Function Description\nsoftmax compute softmax probabilities","category":"page"},{"location":"statistics/","page":"statistics.jl","title":"statistics.jl","text":"softmax","category":"page"},{"location":"statistics/#PosDefManifold.softmax","page":"statistics.jl","title":"PosDefManifold.softmax","text":"softmax(χ::Vector{T}) where T<:Real\n\nGiven a real vector of k non-negative scores χ=c_1c_k, return the vector π=p_1p_k of their softmax probabilities, as per\n\np_i=fractextrme^c_isum_i=1^ktextrme^c_i.\n\nExamples\n\nχ=[1.0, 2.3, 0.4, 5.0]\nπ=softmax(χ)\n\n\n\n\n\n","category":"function"},{"location":"statistics/#Descriptive-Statistics","page":"statistics.jl","title":"Descriptive Statistics","text":"","category":"section"},{"location":"statistics/","page":"statistics.jl","title":"statistics.jl","text":"Function Description\nmean scalar mean of real or complex numbers according to the specified metric\nstd scalar standard deviation of real or complex numbers according to the specified metric","category":"page"},{"location":"statistics/","page":"statistics.jl","title":"statistics.jl","text":"mean(metric::Metric, ν::Vector{T}) where T<:RealOrComplex","category":"page"},{"location":"statistics/","page":"statistics.jl","title":"statistics.jl","text":"See bottom of documentation of general function mean","category":"page"},{"location":"statistics/","page":"statistics.jl","title":"statistics.jl","text":"std","category":"page"},{"location":"statistics/#Statistics.std","page":"statistics.jl","title":"Statistics.std","text":"std(metric::Metric, ν::Vector{T};\n corrected::Bool=true,\n mean=nothing) where T<:RealOrComplex\n\nStandard deviation of k real or complex scalars, using the specified metric of type Metric::Enumerated type and the specified mean if provided.\n\nOnly the Euclidean and Fisher metric are supported by this function. Using the Euclidean metric return the output of standard Julia std function. Using the Fisher metric return the scalar geometric standard deviation, which is defined such as,\n\nsigma=textexpBig(sqrtk^-1sum_i=1^ktextln^2(v_imu)Big).\n\nIf corrected is true, then the sum is scaled with k-1, whereas if it is false the sum is scaled with k.\n\nExamples\n\nusing PosDefManifold\n# Generate 10 random numbers distributed as a chi-square with 2 df.\nν=[randχ²(2) for i=1:10]\narithmetic_sd=std(Euclidean, ν) # mean not provided\ngeometric_mean=mean(Fisher, ν)\ngeometric_sd=std(Fisher, ν, mean=geometric_mean) # mean provided\n\n\n\n\n\n","category":"function"},{"location":"introToRiemannianGeometry/#Intro-to-Riemannian-Geometry","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"","category":"section"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The study of appropriate distance measures for positive definite matrices has recently grown very fast, driven by practical problems in radar data processing, image processing, computer vision, shape analysis, medical imaging (especially diffusion MRI and Brain-Computer Interface), sensor networks, elasticity, mechanics, numerical analysis and machine learning (e.g., see references in Congedo et al., 2017a)🎓.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"In many applications the observed data can be conveniently summarized by positive definite matrices, which are either symmetric positive definite (SPD: real) or Hermitian Positive Definite (HPD: complex). For example, those may be some form of the data covariance matrix in the time, frequency or time-frequency domain, or autocorrelation matrices, kernels, slices of tensors, density matrices, elements of a search space, etc. Positive definite matrices are naturally treated as points on a smooth Riemannian manifold allowing useful operations such as interpolation, smoothing, filtering, approximation, averaging, signal detection and classification. Such operations are the object of the present PosDefManifold library.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"More formally, this Julia library treats operations on the metric space (P δ^2) of n・n positive definite matrices endowed with a distance or symmetric divergence δ(P x P)0 . Several matrix distances or matrix divergences δ are considered. Using some of them, the most important one being the Fisher metric, we define a Riemannian manifold. In mathematics, this is the subject of Riemannian geometry and information geometry.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Note that throughout this library the word 'metric' is used loosely for referring to the actual Riemannian metric on the tangent space and to the resulting distance or to general symmetric divergence acting on P, regardless the fact that we are dealing with a metric in the strict sense and that it induces or not a Riemannian geometry in P. This is done for convenience of exposition, since in practice those 'metrics' in PosDefManifold may be used interchangeably.","category":"page"},{"location":"introToRiemannianGeometry/#Riemannian-manifolds","page":"Intro to Riemannian Geometry","title":"Riemannian manifolds","text":"","category":"section"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Here are some important definitions:","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"A smooth manifold in differential geometry is a topological space that is locally similar to the Euclidean space and has a globally defined differential structure.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The tangent space at point G is the vector space containing the tangent vectors to all curves on the manifold passing through G (Fig. 1).","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"A smooth Riemannian manifold is equipped with an inner product on the tangent space (a Riemannian metric) defined at each point and varying smoothly from point to point. For manifold P the tangent space is the space of symmetric or Hermitian matrices.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Thus, a Riemannian metric turns the metric space (P δ^2) into a Riemannian manifold. This is the case, for example, of the Fisher metric, which has a fundamental role in the manifolds of positive definite matrices and of the Wasserstein metric, fundamental in optimal transport theory.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"(Image: Figure 1) Figure 1. Schematic illustration of the Riemannian manifold of positive definite matrices. Left: geodesic relying points P and Q passing through its-mid-point (mean) G (green curve), tangent space at point G with tangent vectors to geodesic from G to P and from G to Q (blue arrowed lines) and distance δ(G Q). Right: the center of mass (also named mean) G of points P_1P_4 defined as the point minimizing the sum of the four squared distances δ²(G P_i), for i=14.","category":"page"},{"location":"introToRiemannianGeometry/#geodesic","page":"Intro to Riemannian Geometry","title":"geodesic","text":"","category":"section"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The key object in the P manifold is the geodesic, loosely defined as the shortest path joining two points P and Q on the manifold, analogous to straight lines in the Euclidean space (Fig. 1). The gedesic equation with arclength 0a1 is the equation of the points along the path, denoted gamma(P Q a) where with a=0 we stay at P and with a=1 we move all the way to Q. The points along the geodesic in between P and Q (0a1) can be understood as weighted means of P and Q. For example, the geodesic equation according to the Euclidean metric is (1-a)P + aQ, which is the traditional way to define weighted means. With the metrics we consider here, geodesics are unique and always exist. Furthermore, as we will see, using the Fisher metric those geodesics extends indefinitely, i.e., they are definied and always remain positive definite for -a.","category":"page"},{"location":"introToRiemannianGeometry/#distance","page":"Intro to Riemannian Geometry","title":"distance","text":"","category":"section"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The length of the geodesic (at constant velocity) between two points gives the distance δ(P Q). The distance is always real, non-negative and equal to zero if and only if P=Q.","category":"page"},{"location":"introToRiemannianGeometry/#distance-from-the-origin","page":"Intro to Riemannian Geometry","title":"distance from the origin","text":"","category":"section"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"In contrast to an Euclidean space, the origin of the P manifold endowed with the Fisher metric is not 0_n, but I_n, the identity matrix of dimension n・n. The distance between a point P and the origin, i.e., δ(P I), is analogous therein to the length of vectors in Euclidean space. This Riemannian manifold is symmetric around I_n, i.e., δ(P I)=δ(P^-1 I) and δ(P Q)=δ(P^-1 Q^-1). This will be made more precise when we talk about invariances.","category":"page"},{"location":"introToRiemannianGeometry/#mean","page":"Intro to Riemannian Geometry","title":"mean","text":"","category":"section"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The mid-point on the geodesic relying P and Q is named the mean. Using the Euclidean metric this is the arithmetic mean of P and Q and using the inverse Euclidean metric this is their harmonic mean. As we will see, those are straightforward extensions of their scalar counterparts. Using the Fisher metric the mid-point of the geodesic relying P and Q allows the proper generalization to matrices of the scalars' geometric mean. The other metrics allows other definition of means (see below).","category":"page"},{"location":"introToRiemannianGeometry/#Fréchet-mean","page":"Intro to Riemannian Geometry","title":"Fréchet mean","text":"","category":"section"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Using Fréchet's variational approach we can extend to positive-definite matrices the concept of weighted mean of a set of scalars; as the midpoint G on the geodesic relying P and Q is the minimizer of sigma^2(P G)+sigma^2(Q G), so the mean G of points P_1 P_2P_k is the matrix G verifying","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"textrmargmin_Gsum_i=1^kδ^2(P_iG)","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Thus, every metric induces a distance (or divergence) function, which, in turn, induces a mean.","category":"page"},{"location":"introToRiemannianGeometry/#invariances","page":"Intro to Riemannian Geometry","title":"invariances","text":"","category":"section"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"An important characteristic of metrics is that they may induce invariance properties on the distance, which are in turn inherited by the mean.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Let us denote shortly by P_i the set P_1P_k, where i=1k and by GP_i the Fréchet mean of the set (in this section we drop the weights here for keeping the notation short). The most important invariance properties are:","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"invariance effect on distance δ(PQ) effect on mean GP_i\nrotation δ(PQ)=δ(U^HPUU^HQU) GU^HP_iU=U^HGP_iU\naffinity δ(PQ)=δ(B^HPBB^HQB) GB^HP_iB=B^HGP_iB\ninversion δ(PQ)=δ(P^-1Q^-1) GP_i^-1=G^-1P_i","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"for any unitary U and non-singular B.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The affine invariance implies the rotation invariance and is also named congruence invariance.","category":"page"},{"location":"introToRiemannianGeometry/#metrics","page":"Intro to Riemannian Geometry","title":"metrics","text":"","category":"section"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"We are interested in distance or divergence functions, the difference between the two being that a divergence does not need to be symmetric nor to satisfy the triangle inequality. Note that in PosDefManifold we consider only distances and symmetric divergences. In fact those are of greater interest in practice. One can find several distances and divergences in the literature and they often turn out to be related to each other, see for example (Chebby and Moakher, 2012; Cichocki et al., 2015; Sra, 2016)🎓. Ten of them are implemented in PosDefManifold and two of them are Riemannian metrics (the Fisher and Wasserstein metric as we have said). In this section we give a complete list of the expressions for their induced","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"distance of a point P from the origin,\ndistance between two points P and Q,\ngeodesic relying P to Q (hence the weighted means of P and Q)\nweighted Fréchet mean G(Pw) of a set of k2 points P_1P_k with associated real non-negative weights w_1w_k summing up to 1.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"note: Nota Bene\nIn the following, the weights w_1w_k are always supposed summing up to 1, superscript H indicate conjugate transpose (or just transpose if the matrix is real) and if a is the arclength of a geodesic, we define for convenience b=1-a.","category":"page"},{"location":"introToRiemannianGeometry/#Euclidean","page":"Intro to Riemannian Geometry","title":"Euclidean","text":"","category":"section"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"This is the classical Euclidean distance leading to the usual arithmetic mean. In general this metric is not well adapted to the P manifold. It verifies only the rotation invariance, however the mean also verifies the congruence invariance.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"distance² to I distance²\nP-I^2 P-Q^2","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"geodesic Fréchet mean\nbP + aQ sum_i=1^kw_i P_i","category":"page"},{"location":"introToRiemannianGeometry/#inverse-Euclidean","page":"Intro to Riemannian Geometry","title":"inverse Euclidean","text":"","category":"section"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"This is the classical harmonic distance leading to the harmonic mean. It verifies only the rotation invariance, however the mean also verifies the congruence invariance.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"distance² to I distance²\nP^-1-I^2 P^-1-Q^-1^2","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"geodesic Fréchet mean\nbig(bP^-1 + aQ^-1big)^-1 big(sum_i=1^kw_i P_i^-1big)^-1","category":"page"},{"location":"introToRiemannianGeometry/#Cholesky-Euclidean","page":"Intro to Riemannian Geometry","title":"Cholesky Euclidean","text":"","category":"section"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"This is a very simple metric that has been tried to improve the Euclidean one. It is rarely used (see for example Dai et al., 2016)🎓. It does not verify any invariance. Let L_P be the lower triangular Cholesky factor of P, then","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"distance² to I distance²\nL_P-I^2 L_P-L_Q ^2","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"geodesic Fréchet mean\n(bL_P+aL_Q)(bL_P+aL_Q)^H big(sum_i=1^kw_i L_P_ibig)big(sum_i=1^kw_i L_P_ibig)^H","category":"page"},{"location":"introToRiemannianGeometry/#log-Euclidean","page":"Intro to Riemannian Geometry","title":"log Euclidean","text":"","category":"section"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"If matrices P_1P_k all pair-wise commute, then this metric coincides with the Fisher metric. See (Arsigny et al., 2007 ; Bhatia et al., 2019a)🎓. It enjoys the rotation and inversion invariance. The log-Euclidean distance to I is the same as per the Fisher metric. This mean has the same determinant as the Fisher mean, and trace equal or superior to the trace of the Fisher mean. A minimum trace log Euclidean mean approximating well the Fisher mean has been proposed in Congedo et al. (2015)🎓.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"distance² to I distance²\ntextrmlog(P)^2 textrmlog(P)-textrmlog(Q)^2","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"geodesic Fréchet mean\ntextrmexpbig(textrmlogP + atextrmlogQbig) textrmexpbig(sum_i=1^kw_ihspace1pttextrmlogP_ibig)","category":"page"},{"location":"introToRiemannianGeometry/#log-Cholesky","page":"Intro to Riemannian Geometry","title":"log Cholesky","text":"","category":"section"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"It is a recently proposed distance in P. Like the Cholesky Euclidean metric here above, it exploits the diffeomorphism between matrices in P and their Cholesky factor, such that L_PL_P^H=P, thanks to the fact that the Cholesky factor is unique and that the map is smooth (Lin, 2019)🎓. The mean has the same determinant as the Fisher and log-Euclidean mean.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Let L_X,S_X and D_X be the lower triangle, the strictly lower triangle and the diagonal part of X, respectively (hence, S_X+D_X=L_X), then","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Distance² to I Distance²\nS_P-I^2+textrmlogD_P^2 S_P-S_Q^2+textrmlogD_P-textrmlogD_Q^2","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"geodesic: S_P+a(S_Q-S_P)+D_Phspace2pttextrmexpbig(atextrmlogD_Q-atextrmlogD_Pbig)","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Fréchet mean: TT^H, where T=sum_i=1^kw_iS_P_i+sum_i=1^kw_itextrmlogD_P_i","category":"page"},{"location":"introToRiemannianGeometry/#Fisher","page":"Intro to Riemannian Geometry","title":"Fisher","text":"","category":"section"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The Fisher metric, also known as affine-invariant, natural and Fisher-Rao metric, among others names, has a paramount importance for the P manifold, standing out as the natural choice both from the perspective of differential geometry and information geometry. Endowed with the Fisher metric the manifold P is Riemannian, has nonpositive curvature and is symmetric. This metric verifies all three invariances we have considered.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Distance² to I Distance²\ntextrmlog(P)^2 textrmlog(P^-12QP^-12)^2","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"geodesic\nP^12 big(P^-12 Q P^-12big)^a P^12","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Fréchet mean: it does not have a closed-form solution in general. The solution is the unique positive definite matrix G satisfying (Bhatia and Holbrook, 2006; Moakher, 2005).🎓","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"sum_i=1^kw_itextrmlogbig(G^-12 P_i G^-12big)=0","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"For estimating it, PosDefManifold implements the well-known gradient descent algorithm, resulting in iterations:","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"G G^12textrmexpbig(sum_i=1^kw_itextrmlog(G^-12 P_i G^-12)big)G^12","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Alternatively, and more efficiently, one can ask for an approximate solution invoking the MPM algorithm (Congedo et al., 2017b)🎓, which is also implemented (in order to estimate the geometric mean use function powerMean with parameter p=0 or with a very small value of p).","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"This mean is known under many different names (Fisher, Rao, Fisher-Rao, Pusz-Woronowicz, Cartan, Fréchet, Karcher, geometric....). The ‘centrality’ of this mean among a wide family of divergence-based means can be appreciated in Fig. 4 of Cichocki et al. (2015)🎓.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The geometric mean G of two matrices P and Q is denoted gamma(P Q frac12). Currently it is an object of intense study because of its interesting mathematical properties. For instance,","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"it is the unique solution to Riccati equation GQ^-1G=P\nit is equal to F^-HD_1^12D_2^12F^-1 for whatever joint diagonalizer F of P and Q, i.e., for whatever matrix F satisfying F^HPF=D_1 and F^HQF=D_2, with D_1, D_1 non-singular diagonal matrices (Congedo et al., 2015)🎓.\nit enjoys all 10 properties of means postulated in the seminal work of Ando et al. (2010)🎓.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"When P and Q commutes, the Fisher mean of two matrices reduces to P^12Q^12, which indeed in this case is the log-Euclidean mean frac12textrmlogP + frac12textrmlogQ.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"We denote the Fisher geodesic equation as gamma(P Q a). Note that gamma(I P a)=P^a and gamma(P I a)=P^b, where b=1-a.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Fisher geodesic equation verifies gamma(P Q a)=gamma(Q P b) and (gamma(P Q a))^-1=gamma(P^-1 Q^-1 a).","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"An interesting property of the Fisher metric is that using its geodesic equation we can extrapolate positive matrices, always remaining in P. That is, using any real value of a :","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"with 0 a 1 we move toward Q\t\t(attraction),\nwith a 1 we move over and beyond Q\t(extrapolation) and\nwith a 0 we move back away from Q \t(repulsion).","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Something similar can be done using the log Cholesky metric as well.","category":"page"},{"location":"introToRiemannianGeometry/#power-means","page":"Intro to Riemannian Geometry","title":"power means","text":"","category":"section"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The arithmetic, harmonic and geometric mean we have encountered are all members of the 1-parameter family of power means (with parameter p-1 1) introduced by Lim and Palfia (2012)🎓 to generalize the concept of power means of scalars (also known as Hölder means or generalized means). The family of power means G with parameter p satisfies equation","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"G=sum_i=1^kw_igamma(G P p),","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"wheregamma(G P p) is the Fisher geodesic equation we have discussed here above talking about the Fisher metric. In particular:","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"with p=-1 this is the harmonic mean (see the inverse Euclidean metric)\nwith p=+1 this is the arithmetic mean (see the Euclidean metric)\nat the limit of p evaluated at zero from both side this is the geometric mean (see the Fisher metric).","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Thus, the family of power means continuously interpolate between the arithmetic and harmonic mean passing through the the geometric mean.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Power means are the unique positive definite solution of (Yamazaki, 2019)🎓","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"sum_i=1^kw_ibig(G^-12 P_i G^-12big)^p=I.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"All power means enjoy the congruence invariance (hence the rotation invariance), but only the geometric mean enjoy also the inversion invariance.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The power mean with p=frac12 is the solution of the Fréchet mean problem using the following divergence (Bhatia, Gaubert and Jain, 2019)🎓","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"δ^2(PQ)=textrmtr(P+Q)-2textrmtrgamma(G P frac12) = textrmtr(textrmarithm mean(P Q)) textrmtr(textrmgeom mean(P Q))","category":"page"},{"location":"introToRiemannianGeometry/#generalized-means","page":"Intro to Riemannian Geometry","title":"generalized means","text":"","category":"section"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"When the matrices in the set all pairwise commute, it has been proved in Lim and Palfia (2012, see Property 1, p. 1502) 🎓 that the power means we have just seen reduce to","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"big(sum_i=1^kw_iP_i^pbig)^1p,","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"which are the straightforward extension of scalar power means (see generalized means) to matrices. As usual, such straightforward extensions work well in commuting algebra, but not in general. See for example the case of the mean obtained using the log Euclidean metric, which is the straightforward extension to matrices of the scalar geometric mean, but is not the matrix geometric mean, unless the matrices all pairwise commute.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Both the generalized means and the power means have a parameter p-1 1. For the latter, the solution is implemented via the fixed-point MPM algorithm (Congedo et al., 2017b)🎓.","category":"page"},{"location":"introToRiemannianGeometry/#modified-Bhattacharyya-mean","page":"Intro to Riemannian Geometry","title":"modified Bhattacharyya mean","text":"","category":"section"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"If matrices P_1 P_2P_k all pair-wise commute, the special case p=frac12 yields the following instance of power means (and of generalized means):","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"big(sum_i=1^kw_iP_i^12big)^12.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"This mean has been proposed in a different context by Moakher (2012)🎓 as a modified Bhattacharyya mean, since it is a modification of the Bhattacharyya mean we will encounter next under the name logdet zero. It is worth noting that in commuting algebra Moakher’s mean also corresponds to the mean obtained with the Wasserstein metric.","category":"page"},{"location":"introToRiemannianGeometry/#logdet-zero","page":"Intro to Riemannian Geometry","title":"logdet zero","text":"","category":"section"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The logdet zero divergence, also known as the square of the Bhattacharyya divergence (Mohaker, 2013)🎓, Stein divergence (Harandi et al., 2016)🎓, symmetrized Jensen divergence, the S-divergence (Sra, 2016)🎓 or the log determinant α-divergence (with α=0, Chebby and Moakher, 2012 🎓) is a Jensen-Bregman symmetric divergence enjoying all three invariances we have listed.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Its square root has been shown to be a distance (Sra, 2016)🎓. It behaves very similarly to the Fisher metric at short distances (Moakher, 2012; Sra, 2016; Cichocki et al., 2015; Harandi et al., 2016) 🎓 and the mean of two matrices in P is the same as the Fisher mean (Harandi et al., 2016) 🎓. Thus, it has often been used instead of the Fisher metric because it allows more efficient calculations. In fact, the calculation of this distance requires only three Cholesky decompositions, whereas the computation of the Fisher distance involves extracting generalized eigenvalues.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"distance² to I distance²\ntextrmlogdetfrac12(P+I)-frac12textrmlogdet(P) textrmlogdetfrac12(P+Q)-frac12textrmlogdet(PQ)","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"geodesic: we use the Fréchet mean with appropriate weights.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Fréchet mean: the solution is the unique positive definite matrix G satisfying","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"sum_i=1^kw_ibig(frac12P_i+frac12Gbig)^-1=G^-1.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"For estimating it PosDefManifold implements the fixed-point iterations (Moakher, 2012, p315)🎓:","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"G frack2big(sum_i=1^kw_i(P_i+G)^-1big)^-1.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The logdet zero divergence between P and Q can also be written as the log-determinant of their arithmetic mean minus the log-determinant of their geometric mean (Moakher, 2012)🎓, which thus defines a possible extension to matrices of the useful concept of Wiener entropy.","category":"page"},{"location":"introToRiemannianGeometry/#logdet-α","page":"Intro to Riemannian Geometry","title":"logdet α","text":"","category":"section"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The log determinant α-divergence family for α-11 (Chebby and Moakher, 2012)🎓 allows","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"the logdet zero mean for α=0,\nthe left Kullback-Leibler mean for α=-1 (which is the harmonic mean)\nthe right Kullback-Leibler mean for α=1 (which is the arithmetic mean).","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"We do not consider the left and right Kullback-Leibler divergences because the related means are trivially the arithmetic and harmonic one (Moakher, 2012). As per the symmetrized Kullback-Leibler divergence, this is known as Jeffrey divergence and will be considered next. The log determinant α-divergence family of means is not implemented in PosDefManifold (besides the special cases α=(-1 0 1), since the family of power means are implemented.","category":"page"},{"location":"introToRiemannianGeometry/#Jeffrey","page":"Intro to Riemannian Geometry","title":"Jeffrey","text":"","category":"section"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"This is a Jensen-Bregman symmetric divergence, also known as the symmetrized Kullback-Leibler divergence (see logdet α) (Faraki et al., 2015)🎓. It enjoyes all three invariances we have listed.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"distance² to I distance²\nfrac12textrmtr big(P+P^-1big)-n frac12textrmtr(Q^-1P+P^-1Q)-n","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"geodesic: we use the Fréchet mean with appropriate weights.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Fréchet mean: A^12big(A^-12HA^-12big)^12A^12, where A is the arithmetic mean (see Euclidean metric) and H is the harmonic mean (see inverse Euclidean metric). Thus, the weighted Fréchet mean is the geometric mean (see Fisher metric) of the arithmetic and harmonic mean (Moakher, 2012)🎓.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Note that this is the geometric mean only for k=2, that is, for scalars, but not in general for matrices, the geometric mean is the geometric mean of the arithmetic mean and harmonic mean (the only metric inducing the geometric mean in general is the Fisher mean).","category":"page"},{"location":"introToRiemannianGeometry/#Von-Neumann","page":"Intro to Riemannian Geometry","title":"Von Neumann","text":"","category":"section"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The Von Neumann divergence is a Jensen-Bregman symmetric divergence (Sra, 2016; Taghia et al., 2019)🎓. It enjoyes only the rotation invariance.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"distance² to I distance²\nfrac12textrmtr(PtextrmlogP-textrmlogP) frac12textrmtrbig(P(textrmlogP-textrmlogQ)+Q(textrmlogQ-textrmlogP)big)","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The geodesic and weighted Fréchet mean for this metric are not available.","category":"page"},{"location":"introToRiemannianGeometry/#Wasserstein","page":"Intro to Riemannian Geometry","title":"Wasserstein","text":"","category":"section"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"This is an extension to matrices of the Hellinger divergence for vectors and is also known as the Bures divergence in quantum physics, where it is applied on density matrices (unit trace positive-definite matrices). It enjoyes only the rotation invariance. Endowed with the Wasserstein metric the manifold P has a Riemannian geometry of nonnegative curvature. See ( Bhatia et al., 2019a; Bhatia et al., 2019b)🎓.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"distance² to I distance²\ntextrmtr(P+I)-2textrmtr(P^12) textrmtr(P+Q) -2textrmtrbig(P^12QP^12big)^12","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"geodesic\nb^2P+a^2Q +abbig(PQ)^12 +(QP)^12big","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"The quantity textrmtrbig(P^12QP^12big)^12 is known in quantum physics as the fidelity of P and Q when those are density matrices (unit-trace positive definite matrices).","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Fréchet mean: the solution is the unique positive definite matrix G satisfying (Agueh and Carlier, 2011) 🎓","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"G=sum_i=1^kw_ibig( G^12 P_i G^12big)^12.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"For estimating it, PosDefManifold implements the fixed-point algorithm of Álvarez-Esteban et al. (2016)🎓, giving iterations:","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"G G^-12 big(sum_i=1^k w_i(G^12P_i G^12)^12big)^2 G^-12","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"In the special case when the matrices all pair-wise commute, the Wasserstein mean is equal to the instance of power means and generalized means with p=frac12 (Bhatia, Jain and Lim, 2019b)🎓, that is, to the modified Bhattacharyya mean.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"In the special case k=2 and equal weight the mean is W=frac14big(P+Q+(PQ) ^12+(QP)^12big).","category":"page"},{"location":"introToRiemannianGeometry/#","page":"Intro to Riemannian Geometry","title":"🎓","text":"","category":"section"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"References","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"M. Agueh, G. Carlier (2011) Barycenters in the Wasserstein space, SIAM J. Mat. Anal. Appl. 43, 904-924.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"P. C. Álvarez-Esteban, E. del Barrio, J.A. Cuesta-Albertos, C. Matrána (2016) A fixed-point approach to barycenters in Wasserstein space, Journal of Mathematical Analysis and Applications, 441(2), 744-762.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"T. Ando, C.-K. Li, R. Mathias (2004) Geometric means, Linear Algebra and its Applications, 385(1), 305-334.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"V. Arsigny, P. Fillard, X. Pennec, N. Ayache (2007) Geometric means in a novel vector space structure on symmetric positive-definite matrices, SIAM journal on matrix analysis and applications, 29(1), 328-347.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"A. Barachant, S. Bonnet, M. Congedo, C. Jutten (2012) Multi-class Brain Computer Interface Classification by Riemannian Geometry, IEEE Transactions on Biomedical Engineering, 59(4), 920-928.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"A. Barachant, S. Bonnet, M. Congedo, C. Jutten (2013) Classification of covariance matrices using a Riemannian-based kernel for BCI applications, Neurocomputing, 112, 172-178.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"R. Bhatia (2007) Positive Definite Matrices. Princeton University press.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"R. Bhatia, M. Congedo (2019) Procrustes problems in manifolds of positive definite matrices Linear Algebra and its Applications, 563, 440-445.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"R. Bhatia, S. Gaubert, T. Jain (2019) Matrix versions of the Hellinger distance, arXiv:1901.01378.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"R. Bhatia, J. Holbrook (2006) Riemannian geometry and matrix geometric means, Linear Algebra and its Applications, 413 (2-3), 594-618.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"R. Bhatia, T. Jain (2010) Approximation problems in the Riemannian metric on positive definite matrices, Ann. Funct. Anal., 5(2), 118-126.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"R. Bhatia, T. Jain,Y. Lim (2019a) Inequalities for the Wasserstein mean of positive definite matrices, Linear Algebra and its Applications, in press.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"R. Bhatia, T. Jain, Y. Lim (2019b) On the Bures-Wasserstein distance between positive definite matrices Expositiones Mathematicae, in press.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Z. Chebbi, M. Moakher (2012) Means of Hermitian positive-definite matrices based on the log-determinant α-divergence function, Linear Algebra and its Applications, 436(7), 1872-1889.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"A. Cichocki, S. Cruces, S-I- Amari (2015) Log-Determinant Divergences Revisited: Alpha-Beta and Gamma Log-Det Divergences, Entropy, 17(5), 2988-3034.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"R.R. Coifman, Y. Shkolnisky, F.J. Sigworth, A. Singer (2008) Graph Laplacian Tomography From Unknown Random Projections, IEEE Transactions on Image Processing, 17(10), 1891-1899.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"M. Congedo, B. Afsari, A. Barachant, M Moakher (2015) Approximate Joint Diagonalization and Geometric Mean of Symmetric Positive Definite Matrices, PLoS ONE 10(4): e0121423.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"M. Congedo, A. Barachant, R. Bhatia R (2017a) Riemannian Geometry for EEG-based Brain-Computer Interfaces; a Primer and a Review, Brain-Computer Interfaces, 4(3), 155-174.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"M. Congedo, A. Barachant, E. Kharati Koopaei (2017b) Fixed Point Algorithms for Estimating Power Means of Positive Definite Matrices, IEEE Transactions on Signal Processing, 65(9), 2211-2220.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"X. Dai, S. Khamis, Y. Zhang, L.S. Davis (2016) Parameterizing region covariance: an efficient way to apply sparse codes on second order statistics, arXiv:1602.02822.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"M. Faraki, M. Harandi, F. Porikli (2015) More About VLAD: A Leap from Euclidean to Riemannian Manifolds, IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Boston.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"W. Förstner, B. Moonen (1999) A metric for covariance matrices, In Krumm K and Schwarze VS eds. Qho vadis geodesia...?, number 1999.6 in tech. report of the Dep. Of Geodesy and Geoinformatics, p.113–128, Stuttgart University.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"M.T. Harandi, R. Hartley, B. Lovell, C. Sanderson (2016) Sparse coding on symmetric positive definite manifolds using bregman divergences, IEEE transactions on neural networks and learning systems, 27 (6), 1294-1306.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"N.J. Higham (1988) Computing a Nearest Symmetric Positive Semidefinite Matrix Linear Algebra and its Applications, 103, 103-118.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"J. Ho, G. Cheng, H. Salehian, B.C. Vemuri (2013) Recursive Karcher Expectation Estimators and Geometric Law of Large Numbers, Proc. of the AISTATS Conf.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"B. Iannazzo (2016) The geometric mean of two matrices from a computational viewpoint Numerical Linear Algebra with Applications, 23-2, 208-229.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"S. Lafon (2004) Diffusion maps and geometric harmonics, Ph.D. dissertation, Yale University, New Heaven, CT.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Y. Lim, M. Pálfia (2012) Matrix power means and the Karcher mean, Journal of Functional Analysis, 262(4), 1498-1514.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Y. Lim, M. Pálfia (2019) Strong law of large numbers for the L1-Karcher mean arXiv:1912.09295","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"Z. Lin (2019) Riemannian Geometry of Symmetric Positive Definite Matrices via Cholesky Decomposition, in press.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"E. Massart, J.M. Hendrickx, P.-A. Absil (2018) Matrix Geometric Meansbased on shuffled inductive sequences Linear Algebra and its Aplications, 252, 334-359.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"M. Moakher (2005) A Differential Geometric Approach to the Geometric Mean of Symmetric Positive-Definite Matrices, SIAM Journal on Matrix Analysis and Applications, 26(3), 735-747.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"M. Moakher (2012) Divergence measures and means of symmetric positive-definite matrices, in D.H Lailaw and A. Vilanova (Eds) \"New Developments in the Visualization and Processing of Tensor Fields\", Springer, Berlin.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"C. Mostajeran, C. Grussler, R. Sepulchre (2019) Geometric Matrix Midranges arXiv:1907.04188.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"X. Pennec, P. Fillard, N. Ayache (2006) A Riemannian Framework for Tensor Computing, International Journal of Computer Vision, 66(1), 41-66.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"P.L.C. Rodrigues, M. Congedo, C Jutten (2018) Multivariate Time-Series Analysis Via Manifold Learning, in Proc. of the the IEEE Statistical Signal Processing Workshop (SSP 2018), Fribourg-en-Brisgau, Germany.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"S. Sra (2016) Positive definite matrices and the S-divergence, Proc. Amer. Math. Soc., 144, 2787-2797.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"J. Taghia, M. Bånkestad, F. Lindsten, T.B. Schön (2019) Constructing the Matrix Multilayer Perceptron and its Application to the VAE, arXiv:1902.01182v1","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"S. Umeyama (1988) An Eigendecomposition Approach to Weighted Graph Matching Problems, IEEE Trans. Pattern. Anal. Mach. Intell., 10(5), 695-703.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"O. Yair, M. Ben-Chen, R. Talmon (2019) Parallel Transport on the Cone Manifold of SPD Matrices for Domain Adaptation IEEE Trans. Sig. Process. 67(7), 1797-1811.","category":"page"},{"location":"introToRiemannianGeometry/","page":"Intro to Riemannian Geometry","title":"Intro to Riemannian Geometry","text":"T. Yamazaki (2019) The Ando-Hiai inequalities for the solution of the generalized Karcher Equation and related results arXiv:1802.06200v2.","category":"page"}] } diff --git a/docs/build/signalProcessing/index.html b/docs/build/signalProcessing/index.html index 6befe79..47c00f5 100644 --- a/docs/build/signalProcessing/index.html +++ b/docs/build/signalProcessing/index.html @@ -1,20 +1,20 @@ -signalProcessing.jl · PosDefManifold

        signalProcessing.jl

        This unit contains miscellaneous signal processing functions useful in relation to the Riemannian geometry of the manifold of Symmetric Positive Definite (SPD) or Hermitian Positive Definite (HPD) matrices. In Julia those are Hermitian matrices, see typecasting matrices.

        FunctionDescription
        randChi², randχ²Generate a random variable distributed as a chi-squared
        randEigvals, randλGenerate a random vectors of real positive eigenvalues
        randEigvalsMat, randΛGenerate a random diagonal matrix of real positive eigenvalues
        randUnitaryMat, randUGenerate a random orthogonal or unitary matrix
        randPosDefMat, randPGenerate one or an array of random positive definite matrices
        regularize!Regularize an array of positive definite matrices
        gramGram matrix of a matrix
        tradetrace and determinant of a matrix as a 2-tuple

        PosDefManifold.randChi²Function
        randChi²(df::Int)

        alias: randχ²

        Generate a random variable distributed as a chi-squared with df degrees of freedom.

        It uses the Wilson–Hilferty transformation for df>=20 - see chi-squared distribution.

        Examples

        using Plots, PosDefManifold
        +signalProcessing.jl · PosDefManifold

        signalProcessing.jl

        This unit contains miscellaneous signal processing functions useful in relation to the Riemannian geometry of the manifold of Symmetric Positive Definite (SPD) or Hermitian Positive Definite (HPD) matrices. In Julia those are Hermitian matrices, see typecasting matrices.

        FunctionDescription
        randChi², randχ²Generate a random variable distributed as a chi-squared
        randEigvals, randλGenerate a random vectors of real positive eigenvalues
        randEigvalsMat, randΛGenerate a random diagonal matrix of real positive eigenvalues
        randUnitaryMat, randUGenerate a random orthogonal or unitary matrix
        randPosDefMat, randPGenerate one or an array of random positive definite matrices
        regularize!Regularize an array of positive definite matrices
        gramGram matrix of a matrix
        tradetrace and determinant of a matrix as a 2-tuple

        PosDefManifold.randChi²Function
        randChi²(df::Int)

        alias: randχ²

        Generate a random variable distributed as a chi-squared with df degrees of freedom.

        It uses the Wilson–Hilferty transformation for df>=20 - see chi-squared distribution.

        Examples

        using Plots, PosDefManifold
         chi=[randχ²(2) for i=1:10000]
        -histogram(chi) # needs Plots package. Check your plots back-end.
        source
        PosDefManifold.randEigvalsFunction
            randEigvals(n::Int;
             <
             df::Int=2,
        -    eigvalsSNR::Real=10e3 >)

        alias: randλ

        Generate an $n$-vector of random real positive eigenvalues. The eigenvalues are generated as in function randΛ(randEigvalsMat), the syntax of which is used.

        See also: randU (randUnitaryMat), randP (randPosDefMat).

        Examples

        using Plots, PosDefManifold
        +    eigvalsSNR::Real=10e3 >)

        alias: randλ

        Generate an $n$-vector of random real positive eigenvalues. The eigenvalues are generated as in function randΛ(randEigvalsMat), the syntax of which is used.

        See also: randU (randUnitaryMat), randP (randPosDefMat).

        Examples

        using Plots, PosDefManifold
         λ=sort(randλ(10), rev=true)
         σ=sort(randλ(10, eigvalsSNR=10), rev=true)
         plot(λ) # needs Plots package. Check your plots back-end.
        -plot!(σ) # needs Plots package. Check your plots back-end.
        source
        PosDefManifold.randEigvalsMatFunction
            (1) randEigvalsMat(n::Int;
             <
             df::Int=2,
             eigvalsSNR::Real=10e3 >)
         
             (2) randEigvalsMat(n::Int, k::Int;
        -    < same keyword arguments as in (1) >)

        alias: randΛ

        (1) Generate an $n⋅n$ diagonal matrix of random real positive eigenvalues.

        (2) An array 1d (of 𝔻Vector type) of $k$ matrices of the kind in (1)

        The eigenvalues are generated according to model

        $λ_i=χ_{df}^2+η,\hspace{6pt}\textrm{for}\hspace{2pt}i=1:n,$

        where

        • $χ_{df}^2$ (signal term) is randomly distributed as a chi-square with df degrees of freedom,
        • $η$ is a white noise term, function of <keyword argument> eigvalsSNR, such that

        $\textrm{eigenvalues SNR}=\mathbb{E}\big(\sum_{i=1}^{n}λ_i\big)\big/nη.$

        The expected sum $\mathbb{E}\big(\sum_{i=1}^{n}λ_i\big)$ here above is the expected variance of the signal term, i.e., $n(df)$, since the expectation of a random chi-squared variable is equal to its degrees of freedom.

        If eigvalsSNR=Inf is passed as argument, then $η$ is set to zero, i.e., no white noise is added. In any case eigvalsSNR must be positive.

        Note that with the default value of <keyword argument> df (df=2) the generating model assumes that the eigenvalues have exponentially decaying variance, which is often observed on real data.

        Nota Bene

        The <keyword argument> eigvalsSNR expresses the expected eigenvalues SNR (signal-to-noise ratio), not the real one, and is not expressed in decibels, but as the expected SNR variance ratio.

        This function is used by function randP (randPosDefMat) to generate random positive definite matrices with added white noise in order to emulate eigenvalues observed in real data and to improve the conditioning of the generated matrices with respect to inversion.

        See also: randλ (randEigvals), randU (randUnitaryMat), randP (randPosDefMat), randχ² (randChi²).

        Examples

        using PosDefManifold
        +    < same keyword arguments as in (1) >)

        alias: randΛ

        (1) Generate an $n⋅n$ diagonal matrix of random real positive eigenvalues.

        (2) An array 1d (of 𝔻Vector type) of $k$ matrices of the kind in (1)

        The eigenvalues are generated according to model

        $λ_i=χ_{df}^2+η,\hspace{6pt}\textrm{for}\hspace{2pt}i=1:n,$

        where

        • $χ_{df}^2$ (signal term) is randomly distributed as a chi-square with df degrees of freedom,
        • $η$ is a white noise term, function of <keyword argument> eigvalsSNR, such that

        $\textrm{eigenvalues SNR}=\mathbb{E}\big(\sum_{i=1}^{n}λ_i\big)\big/nη.$

        The expected sum $\mathbb{E}\big(\sum_{i=1}^{n}λ_i\big)$ here above is the expected variance of the signal term, i.e., $n(df)$, since the expectation of a random chi-squared variable is equal to its degrees of freedom.

        If eigvalsSNR=Inf is passed as argument, then $η$ is set to zero, i.e., no white noise is added. In any case eigvalsSNR must be positive.

        Note that with the default value of <keyword argument> df (df=2) the generating model assumes that the eigenvalues have exponentially decaying variance, which is often observed on real data.

        Nota Bene

        The <keyword argument> eigvalsSNR expresses the expected eigenvalues SNR (signal-to-noise ratio), not the real one, and is not expressed in decibels, but as the expected SNR variance ratio.

        This function is used by function randP (randPosDefMat) to generate random positive definite matrices with added white noise in order to emulate eigenvalues observed in real data and to improve the conditioning of the generated matrices with respect to inversion.

        See also: randλ (randEigvals), randU (randUnitaryMat), randP (randPosDefMat), randχ² (randChi²).

        Examples

        using PosDefManifold
         # (1)
         n=3;
         U=randU(n);
        @@ -24,14 +24,14 @@
         Q=ℍ(U*Λ*U') # generate an SPD matrix and flag it as 'Hermitian'
         
         # (2) generate an array of 10 matrices of simulated eigenvalues
        -Dvec=randΛ(n, 10)
        source
        PosDefManifold.randUnitaryMatFunction
        (1) randUnitaryMat(n::Int)
        +(2) randUnitaryMat(::Type{Complex{T}}, n::Int)

        aliases: randOrthMat, randU

        Generate a random $n⋅n$

        The matrices are generated running the modified (stabilized) Gram-Schmidt orthogonalization procedure (mgs) on an $n⋅n$ matrix filled with random Gaussian elements.

        See also: randΛ (randEigvals), randP (randPosDefMat).

        Examples

        using PosDefManifold
         n=3;
         X=randU(n)*sqrt(randΛ(n))*randU(n)'  # (1) generate a random square real matrix
         
         U=randU(ComplexF64, n);
         V=randU(ComplexF64, n);
        -Y=U*sqrt(randΛ(n))*V' # (2) generate a random square complex matrix
        source
        PosDefManifold.randPosDefMatFunction
            (1) randPosDefMat(n::Int;
             <
             df::Int=2,
             eigvalsSNR::Real=10e3 >)
        @@ -47,14 +47,14 @@
             commuting=false >)
         
             (4) randPosDefMat(::Type{Complex{T}}, n::Int, k::Int;
        -    < same keyword arguments as in (3) >)

        alias: randP

        Generate

        • (1) one random Hermitian positive definite matrix (real) of size $n⋅n$
        • (2) one random Hermitian positive definite matrix (complex) of size $n⋅n$
        • (3) an array 1d (of ℍVector type) of $k$ matrices of the kind in (1)
        • (4) an array 1d (of ℍVector type) of $k$ matrices of the kind in (2).

        Methods (3) and (4) are multi-threaded. See Threads.

        For (1) and (2) the matrix is generated according to model

        $UΛU^H+ηI$,

        where $U$ is a random orthogonal (1) or unitary (2) matrix generated by function randU(randUnitaryMat) and $Λ$, $η$ are a positive definite diagonal matrix and a non-negative scalar depending on <optional keywords arguments> df and eigvalsSNR randomly generated calling function randΛ(randEigvalsMat).

        For (3) and (4), if the <optional keyword argument> commuting=true is passed, the $k$ matrices are generated according to model

        $UΛ_iU^H+ηI,\hspace{8pt}$, for $i$=1:$k$

        otherwise they are generated according to model

        $(UΛ_iU^H+ηI)+φ(V_iΔ_iV_i^H+ηI),\hspace{8pt}$, for $i$=1:$k$ Eq.[1]

        where

        • $U$ and the $V_i$ are random (3) orthogonal/(4) unitary matrices,
        • $Λ_i$ and $Δ_i$ are positive definite diagonal matrices
        • $η$ is a non-negative scalar.

        All variables here above are randomly generated as in (1) and (2)

        <optional keywords arguments>, such as

        $SNR=\frac{\displaystyle\sum_{i=1}^{k}\textrm{tr}(UΛ_iU^H+ηI)}{\displaystyle\sum_{i=1}^{k}\textrm{tr}φ(V_iΔ_iV_i^H+ηI)}$.

        Nota Bene

        The keyword arguments SNR is not expressed in decibels, but as the expected SNR variance ratio. It must be a positive number.

        A slightly different version of this model for generating positive definite matrices has been proposed in (Congedo et al., 2017b)[🎓]; in the model of Eq. [1]

        • $UΛ_iU^H$ is the signal term, where the signal is supposed sharing the same coordinates for all matrices,
        • $φ(V_iΔ_iV_i^H)$ is a structured noise term, which is different for all matrices
        • $ηI$ is a white noise term, with same variance for all matrices.

        See also: the aforementioned paper and randΛ (randEigvalsMat).

        Examples

        using PosDefManifold
        +    < same keyword arguments as in (3) >)

        alias: randP

        Generate

        • (1) one random Hermitian positive definite matrix (real) of size $n⋅n$
        • (2) one random Hermitian positive definite matrix (complex) of size $n⋅n$
        • (3) an array 1d (of ℍVector type) of $k$ matrices of the kind in (1)
        • (4) an array 1d (of ℍVector type) of $k$ matrices of the kind in (2).

        Methods (3) and (4) are multi-threaded. See Threads.

        For (1) and (2) the matrix is generated according to model

        $UΛU^H+ηI$,

        where $U$ is a random orthogonal (1) or unitary (2) matrix generated by function randU(randUnitaryMat) and $Λ$, $η$ are a positive definite diagonal matrix and a non-negative scalar depending on <optional keywords arguments> df and eigvalsSNR randomly generated calling function randΛ(randEigvalsMat).

        For (3) and (4), if the <optional keyword argument> commuting=true is passed, the $k$ matrices are generated according to model

        $UΛ_iU^H+ηI,\hspace{8pt}$, for $i$=1:$k$

        otherwise they are generated according to model

        $(UΛ_iU^H+ηI)+φ(V_iΔ_iV_i^H+ηI),\hspace{8pt}$, for $i$=1:$k$ Eq.[1]

        where

        • $U$ and the $V_i$ are random (3) orthogonal/(4) unitary matrices,
        • $Λ_i$ and $Δ_i$ are positive definite diagonal matrices
        • $η$ is a non-negative scalar.

        All variables here above are randomly generated as in (1) and (2)

        <optional keywords arguments>, such as

        $SNR=\frac{\displaystyle\sum_{i=1}^{k}\textrm{tr}(UΛ_iU^H+ηI)}{\displaystyle\sum_{i=1}^{k}\textrm{tr}φ(V_iΔ_iV_i^H+ηI)}$.

        Nota Bene

        The keyword arguments SNR is not expressed in decibels, but as the expected SNR variance ratio. It must be a positive number.

        A slightly different version of this model for generating positive definite matrices has been proposed in (Congedo et al., 2017b)[🎓]; in the model of Eq. [1]

        • $UΛ_iU^H$ is the signal term, where the signal is supposed sharing the same coordinates for all matrices,
        • $φ(V_iΔ_iV_i^H)$ is a structured noise term, which is different for all matrices
        • $ηI$ is a white noise term, with same variance for all matrices.

        See also: the aforementioned paper and randΛ (randEigvalsMat).

        Examples

        using PosDefManifold
         R=randP(10, df=10, eigvalsSNR=1000) # 1 SDP Matrix of size 10x10 #(1)
         H=randP(ComplexF64, 5, eigvalsSNR=10) # 1 Hermitian Matrix of size 5x5 # (2)
         ℛ=randP(10, 1000, eigvalsSNR=100) # 1000 SPD Matrices of size 10x10 # (3)
         using Plots
         heatmap(Matrix(ℛ[1]), yflip=true, c=:bluesreds)
        -ℋ=randP(ComplexF64, 20, 1000) # 1000 Hermitian Matrices of size 20x20 # (4)
        source
        PosDefManifold.regularize!Function
        (1) regularize!(P::ℍ; <SNR=10e3>)
        -(2) regularize!(𝐏::ℍVector; <SNR=10e3>)

        Add white noise to either

        • (1) a positive definite matrix $P$ of size $n⋅n$, or
        • (2) a 1d array $𝐏$ of $k$ positive definite matrices of size $n⋅n$, of ℍVector type.

        The added noise improves the matrix conditioning with respect to inversion. This is used to avoid numerical errors when decomposing these matrices or when evaluating some functions of their eigevalues such as the log.

        A constant value is added to all diagonal elements of (1) $P$ or (2) af all matrices in $𝐏$, that is, on output:

        $\textrm{(1)}\hspace{2pt}P\leftarrow P+ηI$

        $\textrm{(2)}\hspace{2pt}𝐏_i\leftarrow 𝐏_i+ηI, \hspace{2pt}\textrm{for}\hspace{2pt} i=1:k.$

        The amount of added noise $η$ is determined by the SNR <keyword argument>, which by default is 10000. This is such that

        $\textrm{(1)}\hspace{2pt}SNR=\frac{\displaystyle\textrm{tr}(P)}{\displaystyle\textrm{tr}(ηI)}.$

        $\textrm{(2)}\hspace{2pt}SNR=\frac{\displaystyle\sum_{i=1}^{k}\textrm{tr}(𝐏_i)}{\displaystyle k\hspace{1pt}\textrm{tr}(ηI)}.$

        $P$ in (1) must be flagged as Hermitian. See typecasting matrices.

        Nota Bene

        The keyword argument SNR expresses a SNR (signal-to-noise ratio), and is not expressed in decibels, but as the SNR variance ratio. It must be a positive number. Differently from function randΛrandEigvalsMat, randλrandEigvals and randPrandPosDefMat, the SNR here is not the expected SNR, but the actual SNR.

        See also: randP (randPosDefMat).

        Examples

        # (1)
        +ℋ=randP(ComplexF64, 20, 1000) # 1000 Hermitian Matrices of size 20x20 # (4)
        source
        PosDefManifold.regularize!Function
        (1) regularize!(P::ℍ; <SNR=10e3>)
        +(2) regularize!(𝐏::ℍVector; <SNR=10e3>)

        Add white noise to either

        • (1) a positive definite matrix $P$ of size $n⋅n$, or
        • (2) a 1d array $𝐏$ of $k$ positive definite matrices of size $n⋅n$, of ℍVector type.

        The added noise improves the matrix conditioning with respect to inversion. This is used to avoid numerical errors when decomposing these matrices or when evaluating some functions of their eigevalues such as the log.

        A constant value is added to all diagonal elements of (1) $P$ or (2) af all matrices in $𝐏$, that is, on output:

        $\textrm{(1)}\hspace{2pt}P\leftarrow P+ηI$

        $\textrm{(2)}\hspace{2pt}𝐏_i\leftarrow 𝐏_i+ηI, \hspace{2pt}\textrm{for}\hspace{2pt} i=1:k.$

        The amount of added noise $η$ is determined by the SNR <keyword argument>, which by default is 10000. This is such that

        $\textrm{(1)}\hspace{2pt}SNR=\frac{\displaystyle\textrm{tr}(P)}{\displaystyle\textrm{tr}(ηI)}.$

        $\textrm{(2)}\hspace{2pt}SNR=\frac{\displaystyle\sum_{i=1}^{k}\textrm{tr}(𝐏_i)}{\displaystyle k\hspace{1pt}\textrm{tr}(ηI)}.$

        $P$ in (1) must be flagged as Hermitian. See typecasting matrices.

        Nota Bene

        The keyword argument SNR expresses a SNR (signal-to-noise ratio), and is not expressed in decibels, but as the SNR variance ratio. It must be a positive number. Differently from function randΛrandEigvalsMat, randλrandEigvals and randPrandPosDefMat, the SNR here is not the expected SNR, but the actual SNR.

        See also: randP (randPosDefMat).

        Examples

        # (1)
         using LinearAlgebra, Plots, PosDefManifold
         n=3
         U=randU(n)
        @@ -69,17 +69,20 @@
         
         # (2)
         𝐏=[ℍ(U*Diagonal(randn(3).^2)*U') for i=1:5] # 5 real 3x3 positive matrices
        -regularize!(𝐏, SNR=1000)

        Run a test

        using LinearAlgebra
        +regularize!(𝐏, SNR=1000)
        +
        +## Run a test
        +using LinearAlgebra
         𝐏=randP(10, 100, SNR=1000); # 100 real Hermitian matrices
         signalVar=sum(tr(P) for P in 𝐏);
         regularize!(𝐏, SNR=1000);
         signalPlusNoiseVar=sum(tr(P) for P in 𝐏);
         output_snr=signalVar/(signalPlusNoiseVar-signalVar)
        -# output_snr should be approx. equal to 1000
        source
        PosDefManifold.gramFunction
        gram(X::Matrix{T}) where T<:RealOrComplex

        Given a generic data matrix $X$, comprised of real or complex elements, return the normalized Gram matrix, that is, the covariance matrix of $X$ corrected by sample size, but without subtracting the mean.

        The result is flagged as Hermitian. See typecasting matrices.

        Nota Bene

        If $X$ is wide or square (r<=c) return $XX^H/c$. If $X$ is tall (r>c) return $X^HX/r$.

        Examples

        using PosDefManifold
        +# output_snr should be approx. equal to 1000
        source
        PosDefManifold.gramFunction
        gram(X::Matrix{T}) where T<:RealOrComplex

        Given a generic data matrix $X$, comprised of real or complex elements, return the normalized Gram matrix, that is, the covariance matrix of $X$ corrected by sample size, but without subtracting the mean.

        The result is flagged as Hermitian. See typecasting matrices.

        Nota Bene

        If $X$ is wide or square (r<=c) return $XX^H/c$. If $X$ is tall (r>c) return $X^HX/r$.

        Examples

        using PosDefManifold
         X=randn(5, 150);
         G=gram(X) # => G=X*X'/150
         X=randn(100, 2);
        -F=gram(X); # => G=X'*X/100
        source
        PosDefManifold.tradeFunction

        trade(P::ℍ{T}) where T<:RealOrComplex

        Given a positive definite matrix P, return as a 2-tuple the trace and the determinant of P. This is used to plot positive matrices in two dimensions (TraDe plots: log(trace/n) vs. log(determinant), see exemple here below).

        P must be flagged by julia as Hermitian. See typecasting matrices.

        Examples

        using PosDefManifold
        +F=gram(X); # => G=X'*X/100
        source
        PosDefManifold.tradeFunction

        trade(P::ℍ{T}) where T<:RealOrComplex

        Given a positive definite matrix P, return as a 2-tuple the trace and the determinant of P. This is used to plot positive matrices in two dimensions (TraDe plots: log(trace/n) vs. log(determinant), see exemple here below).

        P must be flagged by julia as Hermitian. See typecasting matrices.

        Examples

        using PosDefManifold
         P=randP(3)
         t, d=trade(P)  # equivalent to (t, d)=trade(P)
         
        @@ -95,4 +98,4 @@
         end
         x=log.(x./n)
         y=log.(y)
        -plot(x, y, seriestype=:scatter)
        source
        +plot(x, y, seriestype=:scatter)
        source
        diff --git a/docs/build/statistics/index.html b/docs/build/statistics/index.html index 2d79f2c..928a2d0 100644 --- a/docs/build/statistics/index.html +++ b/docs/build/statistics/index.html @@ -1,10 +1,10 @@ -statistics.jl · PosDefManifold

        statistics.jl

        Unit for statistics, probability and related functions.

        CategoryOutput
        1. Probabilityfunctions relating to probability
        2. Descriptive Statisticsfunctions relating to decriptive statistics

        Probability

        FunctionDescription
        softmaxcompute softmax probabilities
        PosDefManifold.softmaxFunction
        softmax(χ::Vector{T}) where T<:Real

        Given a real vector of $k$ non-negative scores $χ=c_1,...,c_k$, return the vector $π=p_1,...,p_k$ of their softmax probabilities, as per

        $p_i=\frac{\textrm{e}^{c_i}}{\sum_{i=1}^{k}\textrm{e}^{c_i}}$.

        Examples

        χ=[1.0, 2.3, 0.4, 5.0]
        -π=softmax(χ)
        source

        Descriptive Statistics

        FunctionDescription
        meanscalar mean of real or complex numbers according to the specified metric
        stdscalar standard deviation of real or complex numbers according to the specified metric
        mean(metric::Metric, ν::Vector{T}) where T<:RealOrComplex

        See bottom of documentation of general function mean

        Statistics.stdFunction
        std(metric::Metric, ν::Vector{T};
        +statistics.jl · PosDefManifold

        statistics.jl

        Unit for statistics, probability and related functions.

        CategoryOutput
        1. Probabilityfunctions relating to probability
        2. Descriptive Statisticsfunctions relating to decriptive statistics

        Probability

        FunctionDescription
        softmaxcompute softmax probabilities
        PosDefManifold.softmaxFunction
        softmax(χ::Vector{T}) where T<:Real

        Given a real vector of $k$ non-negative scores $χ=c_1,...,c_k$, return the vector $π=p_1,...,p_k$ of their softmax probabilities, as per

        $p_i=\frac{\textrm{e}^{c_i}}{\sum_{i=1}^{k}\textrm{e}^{c_i}}$.

        Examples

        χ=[1.0, 2.3, 0.4, 5.0]
        +π=softmax(χ)
        source

        Descriptive Statistics

        FunctionDescription
        meanscalar mean of real or complex numbers according to the specified metric
        stdscalar standard deviation of real or complex numbers according to the specified metric
        mean(metric::Metric, ν::Vector{T}) where T<:RealOrComplex

        See bottom of documentation of general function mean

        Statistics.stdFunction
        std(metric::Metric, ν::Vector{T};
             corrected::Bool=true,
        -    mean=nothing) where T<:RealOrComplex

        Standard deviation of $k$ real or complex scalars, using the specified metric of type Metric::Enumerated type and the specified mean if provided.

        Only the Euclidean and Fisher metric are supported by this function. Using the Euclidean metric return the output of standard Julia std function. Using the Fisher metric return the scalar geometric standard deviation, which is defined such as,

        $\sigma=\text{exp}\Big(\sqrt{k^{-1}\sum_{i=1}^{k}\text{ln}^2(v_i/\mu})\Big)$.

        If corrected is true, then the sum is scaled with $k-1$, whereas if it is false the sum is scaled with $k$.

        Examples

        using PosDefManifold
        +    mean=nothing) where T<:RealOrComplex

        Standard deviation of $k$ real or complex scalars, using the specified metric of type Metric::Enumerated type and the specified mean if provided.

        Only the Euclidean and Fisher metric are supported by this function. Using the Euclidean metric return the output of standard Julia std function. Using the Fisher metric return the scalar geometric standard deviation, which is defined such as,

        $\sigma=\text{exp}\Big(\sqrt{k^{-1}\sum_{i=1}^{k}\text{ln}^2(v_i/\mu})\Big)$.

        If corrected is true, then the sum is scaled with $k-1$, whereas if it is false the sum is scaled with $k$.

        Examples

        using PosDefManifold
         # Generate 10 random numbers distributed as a chi-square with 2 df.
         ν=[randχ²(2) for i=1:10]
         arithmetic_sd=std(Euclidean, ν) # mean not provided
         geometric_mean=mean(Fisher, ν)
        -geometric_sd=std(Fisher, ν, mean=geometric_mean) # mean provided
        source
        +geometric_sd=std(Fisher, ν, mean=geometric_mean) # mean provided
        source
        diff --git a/docs/build/test/index.html b/docs/build/test/index.html index ccd9ace..b745998 100644 --- a/docs/build/test/index.html +++ b/docs/build/test/index.html @@ -1,2 +1,2 @@ -test.jl · PosDefManifold

        test.jl

        Most functions in PosDefManifold are tested, both for real and complex data input. This unit declares the function testall() that performs all tests.

        Some functions are fully tested, the others are just executed. Unce you ran it, for each method of each function, a ⭐ sign is printed if the test is succesful, while a ⛔ sign is printed if the test is not succesful. A ☆ sign is printed if the function has been executed correctly.

        Tests on functions for which a multi-threated version exist are indicated by symbol ( ⏩ ).

        If there are fails, the concerned functions will be listed as warnings.

        Note that the first time you execute the test it will take some time as the code will be compiled.

        This here below is the output of the testall() function (v0.1.3) run on the 20th of May 2019:

        ⭐ PosDefManifold testing utility⭐

        Starting tests...

        • Unit 'linearAlgebra.jl'

        typeofMatrix: ☆

        dim: ☆

        det1: ⭐ ⭐

        function tr1: ⭐ ⭐

        normalizeCol!: ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        ispos: ⭐ ⭐

        colProd: ⭐ ⭐ ⭐ ⭐

        colNorm: ⭐ ⭐

        sumOfSqr: ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        sumOfSqrDiag: ⭐ ⭐ ⭐

        sumOfSqrTril: ⭐ ⭐

        tr: ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        quadraticForm: ⭐ ⭐ ⭐ ⭐ ⭐

        fidelity: ☆ ☆

        fDiag: ⭐

        DiagOfProd: ⭐ ⭐

        mgs: ⭐ ⭐

        fVec: ⭐ ⭐ ⭐ ⭐

        evd: ⭐ ⭐

        spectralFunctions: ☆ ☆ ☆

        pow: ⭐ ⭐ ⭐

        invsqrt: ⭐ ⭐ ⭐

        sqr: ⭐ ⭐ ⭐

        powerIterations: ⭐ ⭐ ⭐ ⭐ ⭐

        choL: ⭐ ⭐ ⭐

        • Unit 'signalProcessing.jl'

        randλ: ☆

        randΛ: ☆ ☆

        randU: ⭐ ⭐

        randP: ☆ ☆

        regularize!: ⭐ ⭐ ⭐ ⭐

        gram: ☆ ☆

        trade: ☆ ☆

        • Unit 'riemannianGeometry.jl'

        geodesic: ☆ ☆ ☆

        distanceSqr (I): ☆ ☆ ☆ ☆

        distanceSqr (II): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        distanceSqr (III): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        distance (I): ☆ ☆ ☆ ☆

        distance (II): ☆ ☆

        distanceSqrMat (I): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        distanceSqrMat (I ⏩ ): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        distanceSqrMat (II): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        distanceSqrMat (II ⏩ ): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        distanceMat (I): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        distanceMat (I ⏩ ): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        distanceMat (II): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        distanceMat (II ⏩ ): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        laplacian: ☆

        laplacianEigenMaps: ☆

        spectralEmbedding: ☆

        mean (I): ☆ ☆ ☆ ☆ ☆

        mean (II): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        mean (⏩ ): ☆ ☆

        means: ☆ ☆ ☆

        means (⏩ ): ☆ ☆ ☆

        generalizedMean: ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        generalizedMean(⏩ ): ☆ ☆ ☆ ☆ ☆ ☆

        geometricMean: ☆ ☆ ☆ ☆ ☆ ☆

        geometricMean(⏩ ): ☆ ☆ ☆ ☆ ☆ ☆

        logdet0Mean: ⭐ ⭐ ⭐ ⭐ ⭐

        logdet0Mean(⏩ ): ☆ ☆ ☆ ☆ ☆ ☆

        wasMean: ☆ ☆ ☆ ☆ ☆ ☆

        wasMean(⏩ ): ☆ ☆ ☆ ☆ ☆ ☆

        powerMean: ☆ ☆ ☆ ☆ ☆ ☆

        powerMean(⏩ ): ☆ ☆ ☆ ☆ ☆ ☆

        logMap: ☆ ☆

        expMap: ☆ ☆

        vecP: ☆ ☆

        matP: ☆ ☆

        procrustes: ☆ ☆

        • Unit 'classification.jl'

        softmax: ⭐

        [ Info: All tests were succesful!

        +test.jl · PosDefManifold

        test.jl

        Most functions in PosDefManifold are tested, both for real and complex data input. This unit declares the function testall() that performs all tests.

        Some functions are fully tested, the others are just executed. Unce you ran it, for each method of each function, a ⭐ sign is printed if the test is succesful, while a ⛔ sign is printed if the test is not succesful. A ☆ sign is printed if the function has been executed correctly.

        Tests on functions for which a multi-threated version exist are indicated by symbol ( ⏩ ).

        If there are fails, the concerned functions will be listed as warnings.

        Note that the first time you execute the test it will take some time as the code will be compiled.

        This here below is the output of the testall() function (v0.1.3) run on the 20th of May 2019:

        ⭐ PosDefManifold testing utility⭐

        Starting tests...

        • Unit 'linearAlgebra.jl'

        typeofMatrix: ☆

        dim: ☆

        det1: ⭐ ⭐

        function tr1: ⭐ ⭐

        normalizeCol!: ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        ispos: ⭐ ⭐

        colProd: ⭐ ⭐ ⭐ ⭐

        colNorm: ⭐ ⭐

        sumOfSqr: ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        sumOfSqrDiag: ⭐ ⭐ ⭐

        sumOfSqrTril: ⭐ ⭐

        tr: ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        quadraticForm: ⭐ ⭐ ⭐ ⭐ ⭐

        fidelity: ☆ ☆

        fDiag: ⭐

        DiagOfProd: ⭐ ⭐

        mgs: ⭐ ⭐

        fVec: ⭐ ⭐ ⭐ ⭐

        evd: ⭐ ⭐

        spectralFunctions: ☆ ☆ ☆

        pow: ⭐ ⭐ ⭐

        invsqrt: ⭐ ⭐ ⭐

        sqr: ⭐ ⭐ ⭐

        powerIterations: ⭐ ⭐ ⭐ ⭐ ⭐

        choL: ⭐ ⭐ ⭐

        • Unit 'signalProcessing.jl'

        randλ: ☆

        randΛ: ☆ ☆

        randU: ⭐ ⭐

        randP: ☆ ☆

        regularize!: ⭐ ⭐ ⭐ ⭐

        gram: ☆ ☆

        trade: ☆ ☆

        • Unit 'riemannianGeometry.jl'

        geodesic: ☆ ☆ ☆

        distanceSqr (I): ☆ ☆ ☆ ☆

        distanceSqr (II): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        distanceSqr (III): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        distance (I): ☆ ☆ ☆ ☆

        distance (II): ☆ ☆

        distanceSqrMat (I): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        distanceSqrMat (I ⏩ ): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        distanceSqrMat (II): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        distanceSqrMat (II ⏩ ): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        distanceMat (I): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        distanceMat (I ⏩ ): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        distanceMat (II): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        distanceMat (II ⏩ ): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        laplacian: ☆

        laplacianEigenMaps: ☆

        spectralEmbedding: ☆

        mean (I): ☆ ☆ ☆ ☆ ☆

        mean (II): ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        mean (⏩ ): ☆ ☆

        means: ☆ ☆ ☆

        means (⏩ ): ☆ ☆ ☆

        generalizedMean: ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐ ⭐

        generalizedMean(⏩ ): ☆ ☆ ☆ ☆ ☆ ☆

        geometricMean: ☆ ☆ ☆ ☆ ☆ ☆

        geometricMean(⏩ ): ☆ ☆ ☆ ☆ ☆ ☆

        logdet0Mean: ⭐ ⭐ ⭐ ⭐ ⭐

        logdet0Mean(⏩ ): ☆ ☆ ☆ ☆ ☆ ☆

        wasMean: ☆ ☆ ☆ ☆ ☆ ☆

        wasMean(⏩ ): ☆ ☆ ☆ ☆ ☆ ☆

        powerMean: ☆ ☆ ☆ ☆ ☆ ☆

        powerMean(⏩ ): ☆ ☆ ☆ ☆ ☆ ☆

        logMap: ☆ ☆

        expMap: ☆ ☆

        vecP: ☆ ☆

        matP: ☆ ☆

        procrustes: ☆ ☆

        • Unit 'classification.jl'

        softmax: ⭐

        [ Info: All tests were succesful!

        diff --git a/src/PosDefManifold.jl b/src/PosDefManifold.jl index ebbaa86..03b33f3 100644 --- a/src/PosDefManifold.jl +++ b/src/PosDefManifold.jl @@ -1,7 +1,7 @@ # Main Module of the PosDefManifold Package for julia language # MIT License -# Copyright (c) 2019-21, Marco Congedo, CNRS, Grenobe, France: +# Copyright (c) 2019-22, Marco Congedo, CNRS, Grenobe, France: # https://sites.google.com/site/marcocongedo/home # __precompile__() diff --git a/src/linearAlgebra.jl b/src/linearAlgebra.jl index 07445e0..743c03c 100644 --- a/src/linearAlgebra.jl +++ b/src/linearAlgebra.jl @@ -1,7 +1,7 @@ # Unit linearAlgebra.jl, part of PosDefManifold Package for julia language # # MIT License -# Copyright (c) 2019-21, Marco Congedo, CNRS, Grenobe, France: +# Copyright (c) 2019-22, Marco Congedo, CNRS, Grenobe, France: # https://sites.google.com/site/marcocongedo/home # # DESCRIPTION @@ -27,7 +27,7 @@ # By convention their name begin with underscore char # ----------------------------------------------------------- -# return a vector of ranges partitioning lineraly and +# return a vector of ranges partitioning linearly and # as much as possible evenly `n` elements in `threads` ranges. # `threads` is the number of threads to which the ranges are to be # dispatched. If `threads` is not provided, it is set to the number diff --git a/src/riemannianGeometry.jl b/src/riemannianGeometry.jl index db017f7..bb7bf07 100644 --- a/src/riemannianGeometry.jl +++ b/src/riemannianGeometry.jl @@ -1,7 +1,7 @@ # Unit riemannianGeometry.jl, part of PosDefManifold Package for julia language # # MIT License -# Copyright (c) 2019-21, Marco Congedo, CNRS, Grenobe, France: +# Copyright (c) 2019-22, Marco Congedo, CNRS, Grenobe, France: # https://sites.google.com/site/marcocongedo/home # # DESCRIPTION @@ -1657,7 +1657,7 @@ function geometricMean( 𝐏::ℍVector; (k, n, type, thr, n², iter, conv, oldconv, converged, ς, threaded, tolerance, v) = _setVar_IterAlg(𝐏, w, ✓w, tol, ⏩) _giveStartInfo_IterAlg(threaded, verbose, "geometricMean Fixed-Point") - init == nothing ? M = mean(logEuclidean, 𝐏; w=v, ✓w=false, ⏩=⏩) : M = ℍ(init) + init === nothing ? M = mean(logEuclidean, 𝐏; w=v, ✓w=false, ⏩=⏩) : M = ℍ(init) 💡 = similar(M, type) # new iteration solution if threaded 𝐐 = 𝕄Vector(repeat([𝐏[1]], thr)) end # memory pre-allocation for fVec function c1(M⁻½::ℍ, 𝐏::ℍVector) = cong(M⁻½, 𝐏, ℍVector) # utility function @@ -1850,7 +1850,7 @@ function geometricpMean(𝐏::ℍVector, p::Real=goldeninv; (k, n, type, thr, n², iter, conv, oldconv, converged, ς, threaded, tolerance, v) = _setVar_IterAlg(𝐏, w, ✓w, tol, ⏩) _giveStartInfo_IterAlg(threaded, verbose, "geometricpMean Fixed-Point") 𝑓, d², q, ςHasNotChanged, ςold = Fisher, distance², p-1, 0, 0 - init == nothing ? M = mean(logEuclidean, 𝐏; w=v, ✓w=false, ⏩=⏩) : M = ℍ(init) + init === nothing ? M = mean(logEuclidean, 𝐏; w=v, ✓w=false, ⏩=⏩) : M = ℍ(init) 💡 = similar(M, type) 𝐑 = similar(𝐏) if threaded 𝐐 = similar(𝐏) end @@ -2018,7 +2018,7 @@ function logdet0Mean(𝐏::Union{ℍVector, 𝔻Vector}; (k, n, type, thr, n², iter, conv, oldconv, converged, ς, threaded, tolerance, v) = _setVar_IterAlg(𝐏, w, ✓w, tol, ⏩) _giveStartInfo_IterAlg(threaded, verbose, "logDet0Mean Fixed-Point") 𝕋, l = typeofMatrix(𝐏), k/2 - init == nothing ? M = mean(logEuclidean, 𝐏; w=v, ✓w=false, ⏩=⏩) : M = 𝕋(init) + init === nothing ? M = mean(logEuclidean, 𝐏; w=v, ✓w=false, ⏩=⏩) : M = 𝕋(init) 💡 = similar(M, type) if threaded 𝐐 = similar(𝐏) end @@ -2166,7 +2166,7 @@ function wasMean(𝐏::ℍVector; (k, n, type, thr, n², iter, conv, oldconv, converged, ς, threaded, tolerance, v) = _setVar_IterAlg(𝐏, w, ✓w, tol, ⏩) _giveStartInfo_IterAlg(threaded, verbose, "wasMean Fixed-Point") - init == nothing ? M = generalizedMean(𝐏, 0.5; w=v, ✓w=false, ⏩=⏩) : M = ℍ(init) + init === nothing ? M = generalizedMean(𝐏, 0.5; w=v, ✓w=false, ⏩=⏩) : M = ℍ(init) 💡 = similar(M, type) if threaded 𝐐 = similar(𝐏) end @@ -2355,7 +2355,7 @@ function powerMean(𝐏::ℍVector, p::Real; _giveStartInfo_IterAlg(threaded, verbose, "powerMean Fixed-Point") absp, sqrtn = abs(p), √n r = -0.375/absp - init == nothing ? M = generalizedMean(𝐏, p; w=v, ✓w=false, ⏩=⏩) : M = ℍ(init) + init === nothing ? M = generalizedMean(𝐏, p; w=v, ✓w=false, ⏩=⏩) : M = ℍ(init) p<0 ? X=ℍ(M^(0.5)) : X=ℍ(M^(-0.5)) 💡, H, 𝒫 = similar(X, type), similar(X, type), similar(𝐏) p<0 ? 𝒫=[inv(P) for P in 𝐏] : 𝒫=𝐏 diff --git a/src/signalProcessing.jl b/src/signalProcessing.jl index f46c1f8..56ab2f7 100644 --- a/src/signalProcessing.jl +++ b/src/signalProcessing.jl @@ -1,7 +1,7 @@ # Unit signalProcessing.jl, part of PosDefManifold Package for julia language # # MIT License -# Copyright (c) 2019-21, Marco Congedo, CNRS, Grenobe, France: +# Copyright (c) 2019-22, Marco Congedo, CNRS, Grenobe, France: # https://sites.google.com/site/marcocongedo/home # # DESCRIPTION diff --git a/src/statistics.jl b/src/statistics.jl index 73d0bfd..9368c48 100644 --- a/src/statistics.jl +++ b/src/statistics.jl @@ -1,7 +1,7 @@ # Unit statistics.jl, part of PosDefManifold Package for julia language # # MIT License -# Copyright (c) 2019-21, Marco Congedo, CNRS, Grenobe, France: +# Copyright (c) 2019-22, Marco Congedo, CNRS, Grenobe, France: # https://sites.google.com/site/marcocongedo/home # # DESCRIPTION @@ -10,7 +10,7 @@ # CONTENT # 1. Utilities # 2. Probability -# 3. Descriptive Statistocs +# 3. Descriptive Statistics # __________________________________________________________________ @@ -126,7 +126,7 @@ function std(metric::Metric, ν::Vector{T}; if metric == Euclidean return std(ν; corrected=corrected, mean=mean) elseif metric == Fisher - mean==nothing ? μ=mean(Fisher, ν) : μ=mean + mean===nothing ? μ=mean(Fisher, ν) : μ=mean if corrected return exp(√(𝚺(log(w/μ)^2 for w in ν)/(length(ν)-1))) else return exp(√(𝛍(log(w/μ)^2 for w in ν))) end diff --git a/src/test.jl b/src/test.jl index ce3404f..5a6ae21 100644 --- a/src/test.jl +++ b/src/test.jl @@ -1,16 +1,16 @@ # Unit test.jl, part of PosDefManifold Package for julia language # # MIT License -# Copyright (c) 2019-21, Marco Congedo, CNRS, Grenobe, France: +# Copyright (c) 2019-22, Marco Congedo, CNRS, Grenobe, France: # https://sites.google.com/site/marcocongedo/home # # DESCRIPTION # This Unit tests all functions in PosDefManifold. # Some functions are fully tested, the others are just executed. -# Unce you ran it, for each method of each function, +# Unce you run it, for each method of each function, # a ⭐ sign is printed if the test is succesful, while # a ⛔ sign is printed if the test is not succesful. -# a ☆ sign is printed if the function has been executed correctly. +# A ☆ sign is printed if the function has been executed correctly. # If there are fails, the concerned functions will be listed as Warnings # and returned by the testall() function as an array of strings