diff --git a/Project.toml b/Project.toml new file mode 100644 index 0000000..d2a5219 --- /dev/null +++ b/Project.toml @@ -0,0 +1,23 @@ +name = "HALeqO" +uuid = "8c1b84d6-365e-44ef-b36c-195f7cd27d2c" +authors = ["Alberto De Marchi "] +version = "0.2.1" + +[deps] +CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b" +CUTEst = "1b53aba6-35b6-5f92-a507-53c67d53f819" +DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" +KNITRO = "67920dd8-b58e-52a8-8622-53c4cffbe346" +LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" +NCL = "af6b844e-5feb-47ff-8350-2ae20a94a0cf" +NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6" +NLPModelsIpopt = "f4238b75-b362-5c4c-b852-0801c9a21d71" +NLPModelsKnitro = "bec4dd0d-7755-52d5-9a02-22f0ffc7efcb" +Percival = "01435c0c-c90d-11e9-3788-63660f8fbccc" +Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" +PositiveFactorizations = "85a6dd25-e78a-55b7-8502-1745935b8125" +Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" +QDLDL = "bfc457fd-c171-5ab7-bd9e-d5dbfc242d63" +SolverBenchmark = "581a75fa-a23a-52d0-a590-d6201de2218a" +SolverCore = "ff4d7338-4cf1-434d-91df-b86cb86fb843" +SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" diff --git a/README.md b/README.md index 676594c..eec480d 100644 --- a/README.md +++ b/README.md @@ -22,15 +22,34 @@ HALeqO.jl uses the free [QDLDL.jl](https://github.com/osqp/QDLDL.jl) routines as ### Citing -If you are using HALeqO for your work, we encourage you to +If you find this code useful, you can [cite](CITATION.bib) the related [paper](https://doi.org/10.1109/CDC45484.2021.9683199) as + + @inproceedings{demarchi2021augmented, + author = {De~Marchi, Alberto}, + title = {Augmented {L}agrangian methods as dynamical systems for constrained optimization}, + year = {2021}, + month = {12}, + pages = {6533--6538}, + booktitle = {2021 60th {IEEE} {C}onference on {D}ecision and {C}ontrol ({CDC})}, + doi = {10.1109/CDC45484.2021.9683199}, + } -* [Cite](CITATION.bib) the related [paper](https://doi.org/10.1109/CDC45484.2021.9683199), -* Put a star on this repository. +### Benchmarks -### Bug reports and support +We compared HALeqO against [Ipopt](https://coin-or.github.io/Ipopt/), via the wrapper provided by [NLPModelsIpopt](https://github.com/JuliaSmoothOptimizers/NLPModelsIpopt.jl), and [NCL.jl](https://github.com/JuliaSmoothOptimizers/NCL.jl) invoking Ipopt. +There is also [Percival](https://github.com/JuliaSmoothOptimizers/Percival.jl) now. +See `run_benchmarks.jl` in the `tests` folder. -Please report any issues via the [issue tracker](https://github.com/aldma/HALeqO.jl/issues). All types of issues are welcome including bug reports, typos, feature requests and so on. +To use the provided test codes (originally compatible with Julia version 1.8.5, tested on Linux x86_64, August 2021): -### Benchmarks +* start Julia from this directory with `julia --project=.` (or with the relative path to the `HALeqO` directory from somewhere else); + +* do `]instantiate` to download all dependencies (only required the first time) and go back to standard Julia prompt with backspace afterwards; + +* load the package with `using HALeqO` + +To run the CUTEst benchmarks: + +* do `include("tests/run_benchmarks.jl")` to invoke the solvers, generate the results, print statistics and save figures. -We compared HALeqO against [IPOPT](https://coin-or.github.io/Ipopt/), via the wrapper provided by [NLPModelsIpopt](https://github.com/JuliaSmoothOptimizers/NLPModelsIpopt.jl), and [NCL.jl](https://github.com/JuliaSmoothOptimizers/NCL.jl) invoking IPOPT. See `run_benchmarks.jl` in the `tests` folder. +Options for benchmarking (e.g., max time and tolerance) can be modified at the beginning of the `tests/run_benchmarks.jl` file. \ No newline at end of file diff --git a/tests/run_benchmarks.jl b/tests/run_benchmarks.jl index 9a95342..575290e 100644 --- a/tests/run_benchmarks.jl +++ b/tests/run_benchmarks.jl @@ -1,7 +1,7 @@ # load solvers using HALeqO using NLPModelsIpopt -using NCL # from https://github.com/JuliaSmoothOptimizers/NCL.jl, using IPOPT +#using NCL # from https://github.com/JuliaSmoothOptimizers/NCL.jl, using Ipopt using Percival # load problems @@ -29,7 +29,7 @@ MAXITER = 3000 # default 3000 problems = (CUTEstModel(probname) for probname in probnames) solvers = Dict{Symbol,Function}( :HALeqO => prob -> haleqo(prob; tol = TOL, max_iter = MAXITER), - :NCL => prob -> NCLSolve(prob, opt_tol = TOL, feas_tol = TOL, max_iter = MAXITER), + #:NCL => prob -> NCLSolve(prob, opt_tol = TOL, feas_tol = TOL, max_iter = MAXITER), :IPOPT => prob -> ipopt(prob; tol = TOL, print_level = 0, max_iter = MAXITER), :Percival => prob -> percival(prob; atol = TOL, rtol = 0.0, ctol = TOL, max_iter = MAXITER), ) @@ -68,3 +68,6 @@ tprof = time_profile(stats, cost) for solver ∈ keys(stats) CSV.write("data/" * filename * "_" * String(solver) * ".csv", stats[solver], header = true) end + +savefig(pprof,"data/" * filename * "_" * "perfprof" * ".pdf") +savefig(tprof,"data/" * filename * "_" * "timeprof" * ".pdf") \ No newline at end of file diff --git a/tests/run_tests.jl b/tests/run_tests.jl index bf12666..554885b 100644 --- a/tests/run_tests.jl +++ b/tests/run_tests.jl @@ -15,12 +15,16 @@ probnames = CUTEst.select( only_equ_con = true, ) +#nlp = CUTEstModel("BOXBOD") +#out = haleqo(nlp, tol=1e-6, max_iter=1000) +#finalize(nlp) + # setup testing problems = (CUTEstModel(probname) for probname in probnames) solver = prob -> haleqo(prob) # run solver! -stats = solve_problems(solver, problems) +stats = solve_problems(solver, "haleqo", problems) # get statistics @info "HALeqO statuses" count_unique(stats.status)