Skip to content

Commit

Permalink
addVars takes vector bounds. MIPResult takes binary bounds. (#276)
Browse files Browse the repository at this point in the history
  • Loading branch information
hongkai-dai authored Feb 4, 2021
1 parent bde7615 commit 146a6e3
Show file tree
Hide file tree
Showing 3 changed files with 181 additions and 30 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,11 @@ def compute_u(model, x):
parser.add_argument("--pretrain_num_epochs", type=int, default=20)
parser.add_argument("--enable_wandb", action="store_true")
parser.add_argument("--train_adversarial", action="store_true")
parser.add_argument(
"--training_set",
type=str,
default=None,
help="path to the training set for adversarial training.")
args = parser.parse_args()
dt = 0.01
dtype = torch.float64
Expand Down Expand Up @@ -332,9 +337,16 @@ def compute_u(model, x):
dut.lyapunov_derivative_mip_pool_solutions = 1000
dut.add_positivity_adversarial_state = True
dut.add_derivative_adversarial_state = True
positivity_state_samples_init = utils.uniform_sample_in_box(
x_lo, x_up, 1000)
derivative_state_samples_init = positivity_state_samples_init
if args.training_set:
training_set = torch.load(args.training_set)
positivity_state_samples_init = training_set[
"positivity_state_samples_all"]
derivative_state_samples_init = training_set[
"derivative_state_samples_all"]
else:
positivity_state_samples_init = utils.uniform_sample_in_box(
x_lo, x_up, 1000)
derivative_state_samples_init = positivity_state_samples_init
result = dut.train_adversarial(positivity_state_samples_init,
derivative_state_samples_init, options)
else:
Expand Down
81 changes: 57 additions & 24 deletions neural_network_lyapunov/gurobi_torch_mip.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@ def __init__(self):
self.Aeq_slack = None
self.Aeq_binary = None
self.rhs_eq = None
self.binary_up = None
self.binary_lo = None


class GurobiTorchMIP:
Expand Down Expand Up @@ -87,8 +89,17 @@ def addVars(self,
"""
@return new_vars_list A list of new variables.
"""
if isinstance(lb, float) or isinstance(lb, int):
lb = torch.full((num_vars, ), lb, dtype=self.dtype)
if isinstance(ub, float) or isinstance(ub, int):
ub = torch.full((num_vars, ), ub, dtype=self.dtype)
assert (isinstance(lb, torch.Tensor))
assert (isinstance(ub, torch.Tensor))
assert (lb.shape == (num_vars, ))
assert (ub.shape == (num_vars, ))
new_vars = self.gurobi_model.addVars(num_vars,
lb=lb,
ub=ub,
vtype=vtype,
name=name)
self.gurobi_model.update()
Expand All @@ -99,26 +110,26 @@ def addVars(self,
self.r_indices[new_vars[i]] = num_existing_r + i
# If lower bound is not -inf, then add the inequality constraint
# x>lb
if lb != -gurobipy.GRB.INFINITY:
self.Ain_r_row.extend(
range(len(self.rhs_in),
len(self.rhs_in) + num_vars))
self.Ain_r_col.extend(
range(num_existing_r, num_existing_r + num_vars))
self.Ain_r_val.extend([torch.tensor(-1, dtype=self.dtype)] *
num_vars)
self.rhs_in.extend([torch.tensor(-lb, dtype=self.dtype)] *
num_vars)
if ub != gurobipy.GRB.INFINITY:
self.Ain_r_row.extend(
range(len(self.rhs_in),
len(self.rhs_in) + num_vars))
self.Ain_r_col.extend(
range(num_existing_r, num_existing_r + num_vars))
self.Ain_r_val.extend([torch.tensor(1, dtype=self.dtype)] *
num_vars)
self.rhs_in.extend([torch.tensor(ub, dtype=self.dtype)] *
num_vars)
for i in range(num_vars):
if lb[i].item() > -gurobipy.GRB.INFINITY and lb[i].item(
) < ub[i].item():
self.Ain_r_row.append(len(self.rhs_in))
self.Ain_r_col.append(num_existing_r + i)
self.Ain_r_val.append(torch.tensor(-1, dtype=self.dtype))
self.rhs_in.append(-lb[i])
for i in range(num_vars):
if ub[i] < gurobipy.GRB.INFINITY and lb[i].item() < ub[i].item(
):
self.Ain_r_row.append(len(self.rhs_in))
self.Ain_r_col.append(num_existing_r + i)
self.Ain_r_val.append(torch.tensor(1, dtype=self.dtype))
self.rhs_in.append(ub[i])
for i in range(num_vars):
if lb[i].item() == ub[i].item():
self.Aeq_r_row.append(len(self.rhs_eq))
self.Aeq_r_col.append(num_existing_r + i)
self.Aeq_r_val.append(torch.tensor(1, dtype=self.dtype))
self.rhs_eq.append(lb[i])
elif vtype == gurobipy.GRB.BINARY:
num_existing_zeta = len(self.zeta_indices)
self.zeta.extend([new_vars[i] for i in range(num_vars)])
Expand Down Expand Up @@ -357,18 +368,40 @@ def add_mixed_integer_linear_constraints(self, mip_cnstr_return,
lb=-gurobipy.GRB.INFINITY,
vtype=gurobipy.GRB.CONTINUOUS,
name=slack_var_name)
else:
slack = []
# Now add the binary variables
binary_size = 0
if mip_cnstr_return.Ain_binary is not None:
binary_size = mip_cnstr_return.Ain_binary.shape[1]
elif mip_cnstr_return.Aeq_binary is not None:
binary_size = mip_cnstr_return.Aeq_binary.shape[1]
elif mip_cnstr_return.binary_lo is not None:
binary_size = mip_cnstr_return.binary_lo.numel()
elif mip_cnstr_return.binary_up is not None:
binary_size = mip_cnstr_return.binary_up.numel()
if binary_size != 0:
assert (isinstance(binary_var_name, str))
binary = self.addVars(binary_size,
lb=-gurobipy.GRB.INFINITY,
vtype=gurobipy.GRB.BINARY,
name=binary_var_name)
if mip_cnstr_return.binary_lo is None and\
mip_cnstr_return.binary_up is None:
binary = self.addVars(binary_size,
lb=-gurobipy.GRB.INFINITY,
vtype=gurobipy.GRB.BINARY,
name=binary_var_name)
else:
binary_lo = mip_cnstr_return.binary_lo if\
mip_cnstr_return.binary_lo is not None else\
-gurobipy.GRB.INFINITY
binary_up = mip_cnstr_return.binary_up if\
mip_cnstr_return.binary_up is not None else\
gurobipy.GRB.INFINITY
binary = self.addVars(binary_size,
lb=binary_lo,
ub=binary_up,
vtype=gurobipy.GRB.BINARY,
name=binary_var_name)
else:
binary = []

def add_var_if_not_none(coeff_matrix, var, coeff_matrices, var_list):
# if coeff_matrix is not None, then append coeff_matrix to
Expand Down
112 changes: 109 additions & 3 deletions neural_network_lyapunov/test/test_gurobi_torch_mip.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def setup_mip1(dut):


class TestGurobiTorchMIP(unittest.TestCase):
def test_add_vars(self):
def test_add_vars1(self):
dut = gurobi_torch_mip.GurobiTorchMIP(torch.float64)
# Add continuous variables with no bounds
x = dut.addVars(2,
Expand All @@ -57,6 +57,9 @@ def test_add_vars(self):

# Add continuous variables with bounds
y = dut.addVars(3, lb=1, ub=2, vtype=gurobipy.GRB.CONTINUOUS)
for i in range(3):
self.assertEqual(y[i].lb, 1)
self.assertEqual(y[i].ub, 2)
self.assertEqual(dut.gurobi_model.getAttr(gurobipy.GRB.Attr.NumVars),
5)
self.assertEqual(dut.r, [x[0], x[1], y[0], y[1], y[2]])
Expand Down Expand Up @@ -106,6 +109,69 @@ def test_add_vars(self):
})
self.assertEqual(dut.zeta_indices, {alpha[0]: 0, alpha[1]: 1})

def test_addVars2(self):
# addVars for continuous variable with a tensor type of lb and(or) ub.
dtype = torch.float64
dut = gurobi_torch_mip.GurobiTorchMIP(dtype)
lb = torch.tensor([-2, -np.inf, 4., -np.inf, 5], dtype=dtype)
ub = torch.tensor([4., 3, np.inf, np.inf, 5], dtype=dtype)
x = dut.addVars(5, lb=lb, ub=ub, vtype=gurobipy.GRB.CONTINUOUS)
self.assertEqual(len(x), 5)
for i in range(5):
self.assertEqual(x[i].lb, lb[i].item())
self.assertEqual(x[i].ub, ub[i].item())
self.assertEqual(x[i].vtype, gurobipy.GRB.CONTINUOUS)
self.assertListEqual(dut.Ain_r_row, [0, 1, 2, 3])
self.assertListEqual(dut.Ain_r_col, [0, 2, 0, 1])
self.assertEqual(dut.Ain_r_val, [
torch.tensor(-1, dtype=dtype),
torch.tensor(-1, dtype=dtype),
torch.tensor(1, dtype=dtype),
torch.tensor(1, dtype=dtype)
])
self.assertEqual(dut.rhs_in, [
torch.tensor(2, dtype=dtype),
torch.tensor(-4, dtype=dtype),
torch.tensor(4, dtype=dtype),
torch.tensor(3, dtype=dtype)
])
self.assertEqual(len(dut.Ain_zeta_row), 0)
self.assertEqual(len(dut.Ain_zeta_col), 0)
self.assertEqual(len(dut.Ain_zeta_val), 0)

self.assertEqual(dut.Aeq_r_row, [0])
self.assertEqual(dut.Aeq_r_col, [4])
self.assertEqual(dut.Aeq_r_val, [torch.tensor(1, dtype=dtype)])
self.assertEqual(dut.rhs_eq, [torch.tensor(5, dtype=dtype)])

self.assertEqual(len(dut.Aeq_zeta_row), 0)
self.assertEqual(len(dut.Aeq_zeta_col), 0)
self.assertEqual(len(dut.Aeq_zeta_val), 0)

def test_addVars3(self):
# addVars for binary variable with a tensor type of lb and(or) ub.
dtype = torch.float64
dut = gurobi_torch_mip.GurobiTorchMIP(dtype)
lb = torch.tensor([0., -1., 1., -np.inf, 0], dtype=dtype)
ub = torch.tensor([1., 0., 2., np.inf, 0], dtype=dtype)
b = dut.addVars(5, lb=lb, ub=ub, vtype=gurobipy.GRB.BINARY)
for i in range(5):
self.assertEqual(b[i].lb, torch.clamp(lb[i], 0, 1).item())
self.assertEqual(b[i].ub, torch.clamp(ub[i], 0, 1).item())
self.assertEqual(b[i].vtype, gurobipy.GRB.BINARY)
self.assertEqual(len(dut.Ain_r_row), 0)
self.assertEqual(len(dut.Ain_r_col), 0)
self.assertEqual(len(dut.Ain_r_val), 0)
self.assertEqual(len(dut.Aeq_r_row), 0)
self.assertEqual(len(dut.Aeq_r_col), 0)
self.assertEqual(len(dut.Aeq_r_val), 0)
self.assertEqual(len(dut.Ain_zeta_row), 0)
self.assertEqual(len(dut.Ain_zeta_col), 0)
self.assertEqual(len(dut.Ain_zeta_val), 0)
self.assertEqual(len(dut.Aeq_zeta_row), 0)
self.assertEqual(len(dut.Aeq_zeta_col), 0)
self.assertEqual(len(dut.Aeq_zeta_val), 0)

def test_addLConstr(self):
dut = gurobi_torch_mip.GurobiTorchMIP(torch.float64)
x = dut.addVars(2, lb=0, vtype=gurobipy.GRB.CONTINUOUS)
Expand Down Expand Up @@ -562,7 +628,8 @@ def setup_mixed_integer_constraints_return(self):
def test_add_mixed_integer_linear_constraints1(self):
"""
Test with MixedIntegerConstraintsReturn that doesn't contain any None
items, it also adds the output constraint.
items (except binary_lo and binary_up), it also adds the output
constraint.
"""
mip_constr_return, dtype = \
self.setup_mixed_integer_constraints_return()
Expand Down Expand Up @@ -685,6 +752,45 @@ def test_add_mixed_integer_linear_constraints3(self):
dut, Ain_r_expected, Ain_zeta_expected, rhs_in_expected,
Aeq_r_expected, Aeq_zeta_expected, rhs_eq_expected)

def test_add_mixed_integer_linear_constraints4(self):
# Test adding bounds on the binary variables.
dtype = torch.float64

def check_binary_bounds(binary_lo, binary_up, lo_expected,
up_expected):
mip_cnstr_return = gurobi_torch_mip.MixedIntegerConstraintsReturn()
mip_cnstr_return.binary_lo = binary_lo
mip_cnstr_return.binary_up = binary_up
mip = gurobi_torch_mip.GurobiTorchMIP(dtype)
slack, binary = mip.add_mixed_integer_linear_constraints(
mip_cnstr_return, [], None, None, "binary", "ineq", "eq",
"out")
self.assertEqual(len(binary), 2)
self.assertEqual(binary[0].vtype, gurobipy.GRB.BINARY)
self.assertEqual(binary[1].vtype, gurobipy.GRB.BINARY)
for i in range(2):
self.assertEqual(binary[i].lb, lo_expected[i])
self.assertEqual(binary[i].ub, up_expected[i])
self.assertEqual(len(mip.Ain_r_row), 0)
self.assertEqual(len(mip.Ain_r_col), 0)
self.assertEqual(len(mip.Ain_r_val), 0)
self.assertEqual(len(mip.Aeq_r_row), 0)
self.assertEqual(len(mip.Aeq_r_col), 0)
self.assertEqual(len(mip.Aeq_r_val), 0)
self.assertEqual(len(mip.Ain_zeta_row), 0)
self.assertEqual(len(mip.Ain_zeta_col), 0)
self.assertEqual(len(mip.Ain_zeta_val), 0)
self.assertEqual(len(mip.Aeq_zeta_row), 0)
self.assertEqual(len(mip.Aeq_zeta_col), 0)
self.assertEqual(len(mip.Aeq_zeta_val), 0)

check_binary_bounds(None, torch.tensor([0, 1], dtype=dtype), [0, 0],
[0, 1])
check_binary_bounds(torch.tensor([0, 1], dtype=dtype), None, [0, 1],
[1, 1])
check_binary_bounds(torch.tensor([0, 1], dtype=dtype),
torch.tensor([0, 1], dtype=dtype), [0, 1], [0, 1])


class TestGurobiTorchMILP(unittest.TestCase):
def test_setObjective(self):
Expand Down Expand Up @@ -785,7 +891,7 @@ def compute_milp_example_cost(a_numpy, autograd_flag):
rhs=a[0] + 2 * a[2] * a[1])
dut.addLConstr([
torch.stack((torch.tensor(2., dtype=dtype), a[1]**2,
torch.tensor(0.5, dtype=dtype))),
torch.tensor(0.5, dtype=dtype))),
torch.tensor([1., 1.], dtype=dtype)
], [x, alpha],
sense=gurobipy.GRB.EQUAL,
Expand Down

0 comments on commit 146a6e3

Please sign in to comment.