-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathbetts_10_88_opty.py
135 lines (102 loc) · 2.92 KB
/
betts_10_88_opty.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
# %%
"""
Nonconvex Delay
===============
This is example 10.88 from Betts' book "Practical Methods for Optimal Control
Using NonlinearProgramming", 3rd edition, Chapter 10: Test Problems.
(actually it is 10.90, as I use :math:`\\sigma = 5`)
This simulation shows the use of instance constrains of the form:
:math:`x_i(t_0) = x_j(t_f)`.
**States**
- :math:`x_1,....., x_N` : state variables
**Controls**
- :math:`u_1,....., u_N` : control variables
Note: I simply copied the equations of motion, the bounds and the constants
from the book. I do not know their meaning.
"""
import numpy as np
import sympy as sm
import sympy.physics.mechanics as me
from opty.direct_collocation import Problem
from opty.utils import create_objective_function
# %%
# Equations of Motion
# -------------------
t = me.dynamicsymbols._t
N = 20
tf = 0.1
sigma = 5
x = me.dynamicsymbols(f'x:{N}')
u = me.dynamicsymbols(f'u:{N}')
eom = sm.Matrix([
*[-x[i].diff(t) + 1**2 - u[i] for i in range(sigma)],
*[-x[i].diff(t) + x[i-sigma]**2 - u[i] for i in range(sigma, N)],
])
sm.pprint(eom)
# %%
# Define and Solve the Optimization Problem
# -----------------------------------------
num_nodes = 501
t0 = 0.0
interval_value = (tf - t0) / (num_nodes - 1)
state_symbols = x
unkonwn_input_trajectories = u
# %%
# Specify the objective function and form the gradient.
objective = sm.Integral(sum([x[i]**2 + u[i]**2 for i in range(N)]), t)
obj, obj_grad = create_objective_function(
objective,
state_symbols,
unkonwn_input_trajectories,
tuple(),
num_nodes,
interval_value
)
# %%
instance_constraints = (
x[0].func(t0) - 1.0,
*[ x[i].func(t0) - x[i-1].func(tf) for i in range(1, N)],
)
limit_value = np.inf
bounds = {
x[i]: (0.7, limit_value) for i in range(1, N)
}
# %%
# Iterate
# -------
prob = Problem(obj,
obj_grad,
eom,
state_symbols,
num_nodes,
interval_value,
instance_constraints= instance_constraints,
bounds=bounds,
)
prob.add_option('max_iter', 1000)
initial_guess = np.ones(prob.num_free) * 0.1
# Find the optimal solution.
for i in range(2):
solution, info = prob.solve(initial_guess)
initial_guess = solution
print(info['status_msg'])
print(f'Objective value achieved: {info['obj_val']:.4f}, as per the book '+
f'it is {2.79685770}, so the error is: '
f'{(info['obj_val'] - 2.79685770)/2.79685770*100:.3f} % ')
print('\n')
# %%
# Plot the optimal state and input trajectories.
prob.plot_trajectories(solution)
# %%
# Plot the constraint violations.
prob.plot_constraint_violations(solution)
# %%
# Plot the objective function.
prob.plot_objective_value()
# %%
# Are the inequality constraints satisfied?
min_x = np.min(solution[1: N*num_nodes-1])
if min_x >= 0.7:
print(f"Minimal value of the x\u1D62 is: {min_x:.12f} >= 0.7, so satisfied")
else:
print(f"Minimal value of the x\u1D62 is: {min_x:.12f} < 0.7, so not satisfied")