Skip to content

Commit

Permalink
MixedIntegerConstraintsReturn contains bounds for variables. (#277)
Browse files Browse the repository at this point in the history
  • Loading branch information
hongkai-dai authored Feb 4, 2021
1 parent 146a6e3 commit 0757db2
Show file tree
Hide file tree
Showing 2 changed files with 121 additions and 18 deletions.
59 changes: 41 additions & 18 deletions neural_network_lyapunov/gurobi_torch_mip.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,14 @@ def __init__(self):
self.Aeq_slack = None
self.Aeq_binary = None
self.rhs_eq = None
# Lower and upper bounds on the variables. We will use these bounds to
# modify the variable bound in v.lb and v.ub, where v is a gurobi
# variable object. Note that the inequality (and equality) constraints
# should still include the constraints lb <= var <= ub.
self.input_lo = None
self.input_up = None
self.slack_lo = None
self.slack_up = None
self.binary_up = None
self.binary_lo = None

Expand Down Expand Up @@ -356,18 +364,45 @@ def add_mixed_integer_linear_constraints(self, mip_cnstr_return,
"""
# Do some check
assert (isinstance(mip_cnstr_return, MixedIntegerConstraintsReturn))

def set_var_bound(variables, var_lo, var_up):
if var_lo is not None:
assert (isinstance(var_lo, torch.Tensor))
assert (var_lo.shape == (len(variables), ))
for i in range(len(variables)):
if variables[i].lb < var_lo[i].item():
variables[i].lb = var_lo[i].item()
if var_up is not None:
assert (isinstance(var_up, torch.Tensor))
assert (var_up.shape == (len(variables), ))
for i in range(len(variables)):
if variables[i].ub > var_up[i].item():
variables[i].ub = var_up[i].item()
if var_lo is not None or var_up is not None:
self.gurobi_model.update()

# Enforce the lower and upper bound on the input variable if it exists.
set_var_bound(input_vars, mip_cnstr_return.input_lo,
mip_cnstr_return.input_up)

# First add the slack variables
slack_size = 0
if mip_cnstr_return.Ain_slack is not None:
slack_size = mip_cnstr_return.Ain_slack.shape[1]
elif mip_cnstr_return.Aeq_slack is not None:
slack_size = mip_cnstr_return.Aeq_slack.shape[1]
elif mip_cnstr_return.slack_lo is not None:
slack_size = mip_cnstr_return.slack_lo.numel()
elif mip_cnstr_return.slack_up is not None:
slack_size = mip_cnstr_return.slack_up.numel()
if slack_size != 0:
assert (isinstance(slack_var_name, str))
slack = self.addVars(slack_size,
lb=-gurobipy.GRB.INFINITY,
vtype=gurobipy.GRB.CONTINUOUS,
name=slack_var_name)
set_var_bound(slack, mip_cnstr_return.slack_lo,
mip_cnstr_return.slack_up)
else:
slack = []
# Now add the binary variables
Expand All @@ -382,24 +417,12 @@ def add_mixed_integer_linear_constraints(self, mip_cnstr_return,
binary_size = mip_cnstr_return.binary_up.numel()
if binary_size != 0:
assert (isinstance(binary_var_name, str))
if mip_cnstr_return.binary_lo is None and\
mip_cnstr_return.binary_up is None:
binary = self.addVars(binary_size,
lb=-gurobipy.GRB.INFINITY,
vtype=gurobipy.GRB.BINARY,
name=binary_var_name)
else:
binary_lo = mip_cnstr_return.binary_lo if\
mip_cnstr_return.binary_lo is not None else\
-gurobipy.GRB.INFINITY
binary_up = mip_cnstr_return.binary_up if\
mip_cnstr_return.binary_up is not None else\
gurobipy.GRB.INFINITY
binary = self.addVars(binary_size,
lb=binary_lo,
ub=binary_up,
vtype=gurobipy.GRB.BINARY,
name=binary_var_name)
binary = self.addVars(binary_size,
lb=-gurobipy.GRB.INFINITY,
vtype=gurobipy.GRB.BINARY,
name=binary_var_name)
set_var_bound(binary, mip_cnstr_return.binary_lo,
mip_cnstr_return.binary_up)
else:
binary = []

Expand Down
80 changes: 80 additions & 0 deletions neural_network_lyapunov/test/test_gurobi_torch_mip.py
Original file line number Diff line number Diff line change
Expand Up @@ -791,6 +791,86 @@ def check_binary_bounds(binary_lo, binary_up, lo_expected,
check_binary_bounds(torch.tensor([0, 1], dtype=dtype),
torch.tensor([0, 1], dtype=dtype), [0, 1], [0, 1])

def test_add_mixed_integer_linear_constraints5(self):
# Test adding bounds on the input variables.
dtype = torch.float64

def check_input_bounds(input_lo, input_up, lo_expected, up_expected):
mip_cnstr_return = gurobi_torch_mip.MixedIntegerConstraintsReturn()
mip_cnstr_return.input_lo = input_lo
mip_cnstr_return.input_up = input_up
mip = gurobi_torch_mip.GurobiTorchMIP(dtype)
x = mip.addVars(len(lo_expected), lb=-2, ub=3)
self.assertEqual(len(mip.Ain_r_row), 4)
self.assertEqual(len(mip.Ain_r_col), 4)
self.assertEqual(len(mip.Ain_r_val), 4)
self.assertEqual(len(mip.rhs_in), 4)
slack, binary = mip.add_mixed_integer_linear_constraints(
mip_cnstr_return, x, None, None, "binary", "ineq", "eq", "out")
self.assertEqual(len(slack), 0)
self.assertEqual(len(binary), 0)
for i in range(len(x)):
self.assertEqual(x[i].lb, lo_expected[i])
self.assertEqual(x[i].ub, up_expected[i])
self.assertEqual(len(mip.Ain_r_row), 4)
self.assertEqual(len(mip.Ain_r_col), 4)
self.assertEqual(len(mip.Ain_r_val), 4)
self.assertEqual(len(mip.rhs_in), 4)
self.assertEqual(len(mip.Aeq_r_row), 0)
self.assertEqual(len(mip.Aeq_r_col), 0)
self.assertEqual(len(mip.Aeq_r_val), 0)
self.assertEqual(len(mip.Ain_zeta_row), 0)
self.assertEqual(len(mip.Ain_zeta_col), 0)
self.assertEqual(len(mip.Ain_zeta_val), 0)
self.assertEqual(len(mip.Aeq_zeta_row), 0)
self.assertEqual(len(mip.Aeq_zeta_col), 0)
self.assertEqual(len(mip.Aeq_zeta_val), 0)

check_input_bounds(None, torch.tensor([0, 5], dtype=dtype), [-2, -2],
[0, 3])
check_input_bounds(torch.tensor([-4, 1], dtype=dtype), None, [-2, 1],
[3, 3])
check_input_bounds(torch.tensor([-4, -1], dtype=dtype),
torch.tensor([1, 6], dtype=dtype), [-2, -1], [1, 3])

def test_add_mixed_integer_linear_constraints6(self):
# Test adding bounds on the slack variables.
dtype = torch.float64

def check_slack_bounds(slack_lo, slack_up, lo_expected, up_expected):
mip_cnstr_return = gurobi_torch_mip.MixedIntegerConstraintsReturn()
mip_cnstr_return.slack_lo = slack_lo
mip_cnstr_return.slack_up = slack_up
mip = gurobi_torch_mip.GurobiTorchMIP(dtype)
slack, binary = mip.add_mixed_integer_linear_constraints(
mip_cnstr_return, [], None, "slack", "binary", "ineq", "eq",
"out")
self.assertEqual(len(slack), len(lo_expected))
self.assertEqual(len(binary), 0)
for i in range(len(slack)):
self.assertEqual(slack[i].lb, lo_expected[i])
self.assertEqual(slack[i].ub, up_expected[i])
self.assertEqual(len(mip.Ain_r_row), 0)
self.assertEqual(len(mip.Ain_r_col), 0)
self.assertEqual(len(mip.Ain_r_val), 0)
self.assertEqual(len(mip.rhs_in), 0)
self.assertEqual(len(mip.Aeq_r_row), 0)
self.assertEqual(len(mip.Aeq_r_col), 0)
self.assertEqual(len(mip.Aeq_r_val), 0)
self.assertEqual(len(mip.Ain_zeta_row), 0)
self.assertEqual(len(mip.Ain_zeta_col), 0)
self.assertEqual(len(mip.Ain_zeta_val), 0)
self.assertEqual(len(mip.Aeq_zeta_row), 0)
self.assertEqual(len(mip.Aeq_zeta_col), 0)
self.assertEqual(len(mip.Aeq_zeta_val), 0)

check_slack_bounds(None, torch.tensor([0, 5], dtype=dtype),
[-np.inf, -np.inf], [0, 5])
check_slack_bounds(torch.tensor([-4, 1], dtype=dtype), None, [-4, 1],
[np.inf, np.inf])
check_slack_bounds(torch.tensor([-4, -1], dtype=dtype),
torch.tensor([1, 6], dtype=dtype), [-4, -1], [1, 6])


class TestGurobiTorchMILP(unittest.TestCase):
def test_setObjective(self):
Expand Down

0 comments on commit 0757db2

Please sign in to comment.