Exemple #1
0
 def _init_input_costs(self, cost: Cost, matrix, loc_map: typing.Dict[Node, int], partitiontype,
                       edge_type: EdgeType):
     edge_map = defaultdict(list)
     edge_srcs = {}
     for edge in (edge for edge in self.edges if edge.tp == edge_type):
         edge_map[edge.out_id].append(edge.dst)
         edge_srcs[edge.out_id] = edge.src
     movement = []
     for out_id in edge_srcs:
         src_node = self.id_to_node_lookup[edge_srcs[out_id]]
         src_row = self._get_row_or_zeroes(matrix, loc_map, src_node)
         dst_nodes = [self.id_to_node_lookup[dst] for dst in edge_map[out_id] if
                      self.id_to_node_lookup[dst] in loc_map]
         if not dst_nodes:
             continue
         # push_set = self._project_to_bool(sum(self._get_row_or_zeroes(matrix, loc_map, dest) for dest in dst_nodes),
         #                                  cost.value)
         push_set = sum(self._get_row_or_zeroes(matrix, loc_map, dest) for dest in dst_nodes)
         projected = self._project_to_bool(push_set, name="push_set_projected")
         diff = self._convert_to_var(projected - src_row, name="push-src")
         movement.append(self._convert_to_var([(gpy.max_(d, 0)) for d in diff], name="input_costs_pb"))
     if not movement:
         return
     total_movement = self._convert_to_var(sum(movement, 0))
     for i, move in enumerate(total_movement):
         self.model.addConstr(move <= cost.value,
                              name="cost_{}_{}_{}_input_cost".format(partitiontype.typename, i, edge_type))
Exemple #2
0
def gurobi_test_low_silo_cost():
    """Attempt to get low silo cost term to work with Gurobi.

    It seems `MVar` and `MLinExpr` doesn't play nice with `max_` and `sum`
    """
    n_bin = 5
    price = 10 * np.ones(n_bin)

    model = gurobipy.Model()

    milling = model.addVars(n_bin, vtype=GRB.BINARY)
    cost_milling = sum(milling[i] * price[i] for i in range(n_bin))

    silo = model.addVars(n_bin, vtype=GRB.CONTINUOUS, lb=2, ub=10)
    silo_min = 7 * np.ones(n_bin)
    cost_silo = sum((silo_min[i] - silo[i]) * price[i] for i in range(n_bin))
    model.update()

    model.setObjective(cost_milling + gurobipy.max_(cost_silo, 2) - 99,
                       sense=GRB.MINIMIZE)
    model.optimize()

    import IPython
    IPython.embed()

    print(f"milling: {[_.x for _ in milling.values()]}")
    print(f"cost_milling: {cost_milling.getValue()}")
    print(f"silo: {[_.x for _ in silo.values()]}")
    print(f"cost_silo: {cost_silo.getValue()}")
    print(f'Obj: {model.objVal}')
Exemple #3
0
 def _init_output_costs(self, cost: Cost, matrix, loc_map: typing.Dict[Node, int], partitiontype,
                        edge_type: EdgeType):
     # for each node, for count number of distinct out_ids that aren't in the same partition.
     edge_map = defaultdict(list)
     edge_srcs = {}
     for edge in (edge for edge in self.edges if edge.tp == edge_type):
         edge_map[edge.out_id].append(edge.dst)
         edge_srcs[edge.out_id] = edge.src
     movement = []
     for out_id in edge_srcs:
         src_node = self.id_to_node_lookup[edge_srcs[out_id]]
         if src_node not in loc_map:
             continue
         src_row = self._get_row_or_zeroes(matrix, loc_map, src_node)
         dst_nodes = [self.id_to_node_lookup[dst] for dst in edge_map[out_id]]
         if any(node not in loc_map for node in dst_nodes):
             # if a node can't be in this partition type, then we have to export.
             movement.append(src_row)
         else:
             num_outputs = len(dst_nodes)
             matrix_sum = sum(self._get_row_or_zeroes(matrix, loc_map, dst) for dst in dst_nodes)
             num_out_of_partition_type_nodes = num_outputs - gpy.quicksum(matrix_sum)
             out_of_partition_type_movement = self._project_to_bool(num_out_of_partition_type_nodes)
             in_partition_type_movement = gpy.quicksum(list(self._convert_to_var(
                 [gpy.max_(x, 0) for x in self._convert_to_var(self._project_to_bool(matrix_sum) - src_row)])))
             has_movement = self._project_to_bool(out_of_partition_type_movement + in_partition_type_movement)
             movement.append(np.array([self.and_(has_movement, s) for s in src_row]))
     if movement:
         total_movement = sum(movement)
         for i, move in enumerate(total_movement):
             self.model.addConstr(move <= cost.value,
                                  name="cost_{}_{}_{}_output_cost".format(partitiontype.typename, i, edge_type))
Exemple #4
0
# 方法2:使用gurobi内置方法
import gurobipy as grb

m = grb.Model()
x = m.addVar(name='x')
y = m.addVar(name='y')
z = m.addVar(name='z')
m.addConstr(x == 4, name='c4')
m.addConstr(y == 5, name='c5')
m.addConstr(z == grb.max_(x, y, 3))
m.setObjective(z)
m.optimize()
print("最大值是:z=", z.X)
# 输出 z= 5.0
Exemple #5
0
    def _create_model(self, job_ids, r_times, p_intervals, m_availabe):
        ## prepare the index for decision variables
        # start time of process
        jobs = tuple(job_ids)
        machines = tuple(range(len(machine_properties)))
        # order of executing jobs: tuple list
        jobPairs = [(i, j) for i in jobs for j in jobs if i < j]
        # assignment of jobs on machines
        job_machinePairs = [(i, k) for i in jobs for k in machines]

        ## parameters model (dictionary)
        # 1. release time
        release_time = dict(zip(jobs, tuple(r_times)))
        # 2. process time
        process_time = dict(zip(jobs, tuple(p_intervals)))
        # 3. machiane available time
        machine_time = dict(zip(machines, tuple(m_availabe)))
        # 4. define BigM
        BigM = np.sum(r_times) + np.sum(p_intervals) + np.sum(m_availabe)

        ## create model
        m = Model('PMSP')
        ## create decision variables
        # 1. assignments of jobs on machines
        z = m.addVars(job_machinePairs, vtype=GRB.BINARY, name='assign')
        # 2. order of executing jobs
        y = m.addVars(jobPairs, vtype=GRB.BINARY, name='order')
        # 3. start time of executing each job
        startTime = m.addVars(jobs, name='startTime')
        ## create objective
        # m.setObjective(quicksum(startTime), GRB.MINIMIZE) # TOTRY

        m._max_complete = m.addVar(1, name='max_complete_time')
        m.setObjective(m._max_complete, GRB.MINIMIZE)  # TOTRY
        m.addConstr((m._max_complete == max_(startTime)), 'minimax')
        ## create constraints
        # 1. job release constraint
        m.addConstrs((startTime[i] >= release_time[i] for i in jobs),
                     'job release constraint')
        # 2. machine available constraint
        m.addConstrs((startTime[i] >= machine_time[k] - BigM * (1 - z[i, k])
                      for (i, k) in job_machinePairs),
                     'machine available constraint')
        # 3. disjunctive constraint
        m.addConstrs((startTime[j] >= startTime[i] + process_time[i] - BigM *
                      ((1 - y[i, j]) + (1 - z[j, k]) + (1 - z[i, k]))
                      for k in machines
                      for (i, j) in jobPairs), 'temporal disjunctive order1')
        m.addConstrs((startTime[i] >= startTime[j] + process_time[j] - BigM *
                      (y[i, j] + (1 - z[j, k]) + (1 - z[i, k]))
                      for k in machines
                      for (i, j) in jobPairs), 'temporal disjunctive order2')
        # 4. one job is assigned to one and only one machine
        m.addConstrs((quicksum([z[i, k] for k in machines]) == 1
                      for i in jobs), 'job non-splitting')

        # set initial solution
        for (i, k) in job_machinePairs:
            if (i, k) in assign_list:
                z[(i, k)].start = 1
            else:
                z[(i, k)].start = 0

        for (i, j) in jobPairs:
            if (i, j) in order_list:
                y[(i, j)].start = 1
            else:
                y[(i, j)].start = 0

        for i in job_ids:
            startTime[i].start = start_times[i]

        return m, z, y, startTime
Exemple #6
0
	def fit(self, x_or, y, w=None):
		""" Fits upper and lower bounds on p(y|x) """

		if self.standardize:
			xselector = VarianceThreshold(threshold=.1).fit(x_or)
			temp_x = xselector.transform(x_or)
			xscaler = StandardScaler().fit(temp_x)
			self.xscaler = lambda x: xscaler.transform(xselector.transform(x))
			x = self.xscaler(x_or)
		else:
			x = x_or.copy()

		if self.kernel == 'linear':
			self.kernel_fit = lambda x: x
			x = self.kernel_fit(x)

		elif self.kernel == 'poly':
			if self.p is None:
				raise ValueError('Need polynomial value')

			self.kernel_fit = lambda x: np.hstack([x**i for i in range(1, self.p + 1)])
			x = self.kernel_fit(x)

		elif self.kernel == 'rbf':
			if self.sig is None:
				raise ValueError('Need Length scale value')
			self.x_tr = x.copy()
			self.kernel_fit = lambda x_ts: RBF(length_scale=self.sig).__call__(x_ts,
				self.x_tr)
			x = self.kernel_fit(x)

		elif self.kernel == 'rbf_approx':
			if self.sig is None:
				raise ValueError('Need Length scale value')

			rbf_fit = RBFSampler(gamma=1 / self.sig, n_components=50).fit(x.copy())
			self.kernel_fit = lambda x_ts: rbf_fit.transform(x_ts)
			x = self.kernel_fit(x)

		n, d = x.shape[0], x.shape[1]
		mdl = grb.Model("qp")
		mdl.ModelSense = 1
		mdl.setParam('OutputFlag', False)
		mdl.reset()

		L = 1e5
		us = [mdl.addVar(name="u%d" % i, lb=-L, ub=L) for i in range(n)]
		ls = [mdl.addVar(name="l%d" % i, lb=-L, ub=L) for i in range(n)]
		bsU = [mdl.addVar(name="bu%d" % i, lb=-L, ub=L) for i in range(d + 1)]
		bsL = [mdl.addVar(name="bl%d" % i, lb=-L, ub=L) for i in range(d + 1)]
		rUs = [mdl.addVar(name="ru%d" % i, lb=0, ub=L) for i in range(n)]
		rLs = [mdl.addVar(name="rl%d" % i, lb=0, ub=L) for i in range(n)]

		slackU = 0
		slackL = 0

		if w is None:
			w = np.ones(n) / n

		obj_terms = []
		for i in range(n):
			mdl.addConstr(us[i] >= ls[i])

			mdl.addConstr(us[i] == np.dot(x[i, ], bsU[:d]) + bsU[-1])
			mdl.addConstr(ls[i] == np.dot(x[i, ], bsL[:d]) + bsL[-1])

			mdl.addConstr(rUs[i] >= y[i] - us[i])
			mdl.addConstr(rLs[i] >= ls[i] - y[i])

			slackU += w[i] * rUs[i]
			slackL += w[i] * rLs[i]

			if self.loss == 'square':
				obj_terms.append(w[i] * (us[i] - ls[i]) * (us[i] - ls[i]))
			elif self.loss == 'linear':
				if self.agg == 'max':
					obj_terms.append((us[i] - ls[i]))
				else:
					obj_terms.append(w[i] * (us[i] - ls[i]))

			else:
				raise Exception('Unrecognized loss: %s' % self.loss)

		if self.agg == 'max':
			o = mdl.addVar(name="o", lb=-L, ub=L)
			os = []
			for i in range(n):
				oi = mdl.addVar(name="o%d" % i, lb=-L, ub=L)
				mdl.addConstr(oi == obj_terms[i])
				os += [oi]
			mdl.addConstr(o == grb.max_(os))
			obj = o
		else:
			obj = grb.quicksum(obj_terms)

		# ----add the values of the objectives
		obj_reg_u, obj_reg_l = 0, 0
		for k in range(d):
			obj_reg_u += bsU[k] * bsU[k]
			obj_reg_l += bsL[k] * bsL[k]

		obj_reg = self.alphau * obj_reg_u + self.alphal * obj_reg_l

		mdl.addConstr(slackU <= self.lamdau)
		mdl.addConstr(slackL <= self.lamdal)
		obj_f = obj + obj_reg

		mdl.setObjective(obj_f)
		mdl.optimize()

		self.bu = np.array([bsU[j].x for j in range(d + 1)])
		self.bl = np.array([bsL[j].x for j in range(d + 1)])

		# print(obj.getValue(), obj_slack.getValue())

		return self
Exemple #7
0
	def fit(self, x, y, w=None):
		""" Fits upper and lower bounds on p(y|x)
		Args:
			x, y are lists with control groups first
		"""

		# -----preprocessing
		if self.standardize:

			x0selector = VarianceThreshold(threshold=.1).fit(x[0])
			temp_x0 = x0selector.transform(x[0])
			x0scaler = StandardScaler().fit(temp_x0)
			self.x0scaler = lambda x: x0scaler.transform(x0selector.transform(x))

			x1selector = VarianceThreshold(threshold=.1).fit(x[1])
			temp_x1 = x1selector.transform(x[1])
			x1scaler = StandardScaler().fit(temp_x1)
			self.x1scaler = lambda x: x1scaler.transform(x1selector.transform(x))

			x00 = self.x0scaler(x[0])
			x01 = self.x1scaler(x[0])
			x11 = self.x1scaler(x[1])
			x10 = self.x0scaler(x[1])

		else:
			x00, x01 = x[0], x[0]
			x11, x10 = x[1], x[1]

		if self.kernel == 'linear':
			self.kernel_fit = lambda x: x
			x00 = self.kernel_fit(x00)
			x01 = self.kernel_fit(x01)
			x11 = self.kernel_fit(x11)
			x10 = self.kernel_fit(x10)

		elif self.kernel == 'poly':
			if self.p is None:
				raise ValueError('Need polynomial value')

			self.kernel_fit = lambda x: np.hstack([x**i for i in range(1, self.p + 1)])
			x00 = self.kernel_fit(x00)
			x01 = self.kernel_fit(x01)
			x11 = self.kernel_fit(x11)
			x10 = self.kernel_fit(x10)

		elif self.kernel == 'rbf':
			if self.sig is None:
				raise ValueError('Need Length scale value')
			self.x0_tr = x00.copy()
			self.x1_tr = x11.copy()

			self.kernel_fit = lambda x, tg: RBF(length_scale=self.sig).__call__(x,
				self.x1_tr) if tg == 1 else \
				RBF(length_scale=self.sig).__call__(x, self.x0_tr)

			x00 = self.kernel_fit(x00, tg=0)
			x01 = self.kernel_fit(x01, tg=1)
			x11 = self.kernel_fit(x11, tg=1)
			x10 = self.kernel_fit(x10, tg=0)

		elif self.kernel == 'rbf_approx':
			if self.sig is None:
				raise ValueError('Need Length scale value')

			self.x0_tr = x00.copy()
			self.x1_tr = x11.copy()

			self.rbf_approx1 = RBFSampler(gamma=1 / self.sig, n_components=100,
				random_state=0).fit(self.x1_tr)
			self.rbf_approx0 = RBFSampler(gamma=1 / self.sig, n_components=100,
				random_state=0).fit(self.x0_tr)

			self.kernel_fit = lambda x, tg: self.rbf_approx1.transform(x) if tg == 1 else \
				self.rbf_approx0.transform(x)

			x00 = self.kernel_fit(x00, tg=0)
			x01 = self.kernel_fit(x01, tg=1)
			x11 = self.kernel_fit(x11, tg=1)
			x10 = self.kernel_fit(x10, tg=0)

		n1, d1 = x11.shape[0], x11.shape[1]
		n0, d0 = x00.shape[0], x00.shape[1]
		y0 = y[0]
		y1 = y[1]

		n = n1 + n0

		mdl = grb.Model("cqp")
		mdl.ModelSense = 1
		mdl.setParam('OutputFlag', False)
		mdl.reset()
		L = 1e5

		u0 = [mdl.addVar(name="u0_%d" % i, lb=-L, ub=L) for i in range(n)]
		l0 = [mdl.addVar(name="l0_%d" % i, lb=-L, ub=L) for i in range(n)]

		u1 = [mdl.addVar(name="u1_%d" % i, lb=-L, ub=L) for i in range(n)]
		l1 = [mdl.addVar(name="l1_%d" % i, lb=-L, ub=L) for i in range(n)]

		bU0 = [mdl.addVar(name="bu0_%d" % i, lb=-L, ub=L) for i in range(d0 + 1)]
		bL0 = [mdl.addVar(name="bl0_%d" % i, lb=-L, ub=L) for i in range(d0 + 1)]

		bU1 = [mdl.addVar(name="bu1_%d" % i, lb=-L, ub=L) for i in range(d1 + 1)]
		bL1 = [mdl.addVar(name="bl1_%d" % i, lb=-L, ub=L) for i in range(d1 + 1)]

		rUs = [mdl.addVar(name="ru%d" % i, lb=0, ub=L) for i in range(n)]
		rLs = [mdl.addVar(name="rl%d" % i, lb=0, ub=L) for i in range(n)]

		slackU1 = 0
		slackL1 = 0

		slackU0 = 0
		slackL0 = 0

		if w is None:
			w0, w1= np.ones(n0) / n0, np.ones(n1) / n1
		else:
			w0 = w[0]
			w1 = w[1]

		obj_terms = []
		for i in range(n):
			mdl.addConstr(u1[i] >= l1[i])
			mdl.addConstr(u0[i] >= l0[i])

		for i in range(n0):

			mdl.addConstr(u1[i] == np.dot(x01[i, ], bU1[:d1]) + bU1[-1])
			mdl.addConstr(l1[i] == np.dot(x01[i, ], bL1[:d1]) + bL1[-1])

			mdl.addConstr(u0[i] == np.dot(x00[i, ], bU0[:d0]) + bU0[-1])
			mdl.addConstr(l0[i] == np.dot(x00[i, ], bL0[:d0]) + bL0[-1])

			mdl.addConstr(rUs[i] >= y0[i] - u0[i])
			mdl.addConstr(rLs[i] >= l0[i] - y0[i])

			slackU0 += w0[i] * rUs[i]
			slackL0 += w0[i] * rLs[i]

			if self.loss == 'square':
				obj_terms.append(w0[i] * ((u0[i] - l0[i]) * (u0[i] - l0[i]) + (u1[i] - l1[i]) * (u1[i] - l1[i])))
			elif self.loss == 'linear':
				if self.agg == "max":
					obj_terms.append(((u0[i] - l0[i]) + (u1[i] - l1[i])))
				else:
					obj_terms.append(w0[i]*((u0[i] - l0[i])+ (u1[i] - l1[i])))
			else:
				raise Exception('Unrecognized loss: %s' % self.loss)

		for i in range(n0, n1+n0):

			mdl.addConstr(u1[i] == np.dot(x11[i - n0, ], bU1[:d1]) + bU1[-1])
			mdl.addConstr(l1[i] == np.dot(x11[i - n0, ], bL1[:d1]) + bL1[-1])

			mdl.addConstr(u0[i] == np.dot(x10[i - n0, ], bU0[:d0]) + bU0[-1])
			mdl.addConstr(l0[i] == np.dot(x10[i - n0, ], bL0[:d0]) + bL0[-1])

			mdl.addConstr(rUs[i] >= y1[i - n0] - u1[i])
			mdl.addConstr(rLs[i] >= l1[i] - y1[i - n0])

			slackU1 += w1[i - n0] * rUs[i]
			slackL1 += w1[i - n0] * rLs[i]

			if self.loss == 'square':
				obj_terms.append(w1[i - n0] * ((u1[i] - l1[i]) * (u1[i] - l1[i])))

			elif self.loss == 'linear':
				if self.agg == "max":
					obj_terms.append(((u1[i] - l1[i]) + (u0[i] - l0[i])))
				else:
					obj_terms.append(w1[i - n0] * ((u1[i] - l1[i]) + (u0[i] - l0[i])))

			else:
				raise Exception('Unrecognized loss: %s' % self.loss)

		if self.agg == 'max':
			o = mdl.addVar(name="o", lb=-L, ub=L)
			os = []
			for i in range(n):
				oi = mdl.addVar(name="o%d" % i, lb=-L, ub=L)
				mdl.addConstr(oi == obj_terms[i])
				os += [oi]
			mdl.addConstr(o == grb.max_(os))
			obj = o# + .01*grb.quicksum(obj_terms)
		else:
			obj = grb.quicksum(obj_terms)

		obj_reg_u0, obj_reg_l0, obj_reg_u1, obj_reg_l1 = 0, 0, 0, 0

		for k in range(d1):
			obj_reg_u1 += bU1[k] * bU1[k]
			obj_reg_l1 += bL1[k] * bL1[k]

		for k in range(d0):
			obj_reg_u0 += bU0[k] * bU0[k]
			obj_reg_l0 += bL0[k] * bL0[k]

		obj_reg = self.alphau1 * obj_reg_u1 + self.alphal1 * obj_reg_l1 + \
			self.alphau0 * obj_reg_u0 + self.alphal0 * obj_reg_l0

		obj = obj + obj_reg

		mdl.addConstr((slackU0 <= self.lamdau0))
		mdl.addConstr((slackL0 <= self.lamdal0))

		mdl.addConstr((slackU1 <= self.lamdau1))
		mdl.addConstr((slackL1 <= self.lamdal1))

		mdl.setObjective(obj)
		mdl.optimize()

		self.bu0 = np.array([bU0[j].x for j in range(d0 + 1)])
		self.bl0 = np.array([bL0[j].x for j in range(d0 + 1)])

		self.bu1 = np.array([bU1[j].x for j in range(d1 + 1)])
		self.bl1 = np.array([bL1[j].x for j in range(d1 + 1)])

		return self
print(cross_nodes)

import gurobipy as gp
from gurobipy import GRB

m = gp.Model("cuMcM_2011B")
x = m.addVars(Plf_nodes, cross_nodes, vtype=GRB.BINARY, name="allocate")
d = m.addVars(Plf_nodes, vtype=GRB.CONTINUOUS, name='d')
D = m.addVar(vtype=GRB.CONTINUOUS, name="max_Distance")

m.addConstrs((x.sum(i, '*') <= 1 for i in Plf_nodes), name='plf')
m.addConstrs((x.sum('*', k) == 1 for k in cross_nodes), name='crs')
m.addConstrs((d[i] == gp.quicksum(x[i, k] * dis[i, k] for k in cross_nodes)
              for i in Plf_nodes),
             name='midis')
m.addConstr((D == gp.max_(d)), name='d')

m.setObjective(D, GRB.MINIMIZE)
m.optimize()

for i in Plf_nodes:
    for k in cross_nodes:
        print(int(x[i, k].x), end=" ")
    print("in line ", i)

for i in Plf_nodes:
    for k in cross_nodes:
        if int(x[i, k].x >= 1e-6):
            print("服务平台 ", i, " 管理着路口 ", k)

print("最短时间:", D.x)
    def optimize(self,
                 t_decision,
                 t_end,
                 predictions_PV,
                 predictions_load,
                 forecasting=True,
                 objective_1='cost',
                 objective_2='soc',
                 method='deterministic',
                 parameters=None,
                 lambda_soc=0.5,
                 verbose=True):
        ''' Main optimizing function
            Input: 
                t_decision: pd.Timestamp, time of decision
                t_end:  pd.Timestamp, time window end
                Predictions_PV: df, output from PV forecast
                Predictions_load: df, output from Load forecast
                forecasting: bool, False if perfect foresight
                objective_1: str, main objective
                objective_2: str, usually soc
                method: str, method of resolution (deterministic, expectedv value, CVaR)
                parameters: dict, optimization parameters
                lamda_soc: int, weight of SOC in the optimization
                verbose: bool
            output:
                decisions: df, results with decision variables and parameters
                '''
        # determine whether it is stochastic or deterministic
        if predictions_PV.shape[1] == 1:
            self.stochastic = False

        else:
            self.stochastic = True

        # instantiate model
        self.m = gp.Model()

        # detailed verbose
        self.m.Params.OutputFlag = 0

        # time limit of optimization execution
        self.m.Params.TimeLimit = 45

        # PF or MPC
        self.forecasting = forecasting

        # call functions
        self.preprocess_data(t_decision, t_end, predictions_PV,
                             predictions_load)
        self.set_parameters()

        # first_stage
        self.add_first_stage_variables()
        self.add_first_stage_constraints()

        # second stage
        self.add_second_stage_variables()
        self.add_second_stage_constraints()

        # SOC objective
        SOC_difference = gp.quicksum([
            self.SOC_max - self.SOC[self.t_end, i] for i in self.range_samples
        ])

        # objective function
        objective = {}

        # pv objective
        if 'pv' in [objective_1, objective_2]:

            # Power bought 1st stage
            PV_grid_1 = self.P_PV_2G_1

            # sum of Power bought 2nd stage
            PV_grid = gp.quicksum(self.P_PV_2G)

            # soc penalty
            penalty = self.BC_EV

            # MIP gap
            self.m.Params.MIPGap = 5e-1

            # first and second stage objective function
            decision = PV_grid_1
            future = PV_grid

        # APR
        elif 'peak' in [objective_1, objective_2]:

            # auxiliary variable
            self.z_apr = self.m.addVars(self.n_samples, name='z_apr')

            self.P_max_var = self.m.addVars(self.n_samples, name='P_max_var')

            # add the constraint
            self.P_max = self.m.addConstrs(self.P_max_var[i] == gp.max_(
                self.P_grid_bought[t, i] for t in self.time_horizon)
                                           for i in self.range_samples)
            self.peak = self.m.addConstrs(
                (gp.quicksum(self.P_grid_bought[t, i]
                             for t in self.time_horizon)) /
                len(self.time_horizon) + self.z_apr[i] == self.P_max_var[i]
                for i in self.range_samples)

            # parameters
            penalty = self.BC_EV
            self.m.Params.MIPGap = 1e-2

            # first and second stage
            decision = 0
            future = gp.quicksum(self.z_apr)

        # cost objective
        elif 'cost' in [objective_1, objective_2]:

            # first and second stage of cost
            cost_decision = self.P_grid_bought_1 * self.buy_spot_price[
                self.t_decision] - self.P_grid_sold_1 * self.sell_spot_price[
                    self.t_decision]
            cost_forecast = gp.quicksum([
                (self.P_grid_bought[t, i] * self.buy_spot_price[t] -
                 self.P_grid_sold[t, i] * self.sell_spot_price[t])
                for t in self.time_horizon for i in self.range_samples
            ])

            # soc penalty
            penalty = self.buy_spot_price[self.t_departure] * self.BC_EV

            decision = cost_decision
            future = cost_forecast

        # adjust SOC obj function with penalty
        Power_difference = penalty * SOC_difference
        objective['soc'] = Power_difference / self.n_samples

        lambda_main = 1 - lambda_soc

        if method == 'deterministic' or method == 'day_ahead' or method == 'expected value':
            if verbose:
                print('Method: ' + method)
                print('Objective: ' + objective_1)
                print('Number of scenarios: ' + str(predictions_PV.shape[1]))

            objective[objective_1] = decision + future / self.n_samples

        elif method == 'CVaR':
            if verbose:
                print('Method: ' + method)
                print('Objective: ' + objective_1)
                print('Number of scenarios: ' + str(predictions_PV.shape[1]))

            alpha_obj1 = parameters['alpha_obj1']
            objective[objective_1] = decision + future / (
                (1 - alpha_obj1) * self.n_samples)

        # set objective function
        self.m.setObjective(lambda_main * objective[objective_1] +
                            lambda_soc * objective['soc'])

        self.m.optimize()

        # update decision at time of decision
        self.update_decisions()
Exemple #10
0
 def reduce(cls, args: typing.List):
     return gpy.max_(args)
Exemple #11
0
    def calculate_trajectory(self, x0, t_elapsed):
        """
        Uses Gurobi to calculate optimal trajectory points (self.xstar) and 
        control inputs (self.ustar)

        """

        # Options
        Nin = 6  # number of sides in inner-ellipse polygon approx
        Nout = 15  # number of sides in outer-ellipse polygon approx

        initial_state = x0.reshape(6)
        goal_state = np.zeros(6)
        n = self.mean_motion
        mc = self.mass_chaser

        # Shorten the number of initial time steps (self.tau0) based on the amount of time elapsed
        tau = int(max(10, np.round(self.tau0 - t_elapsed / self.dt_plan)))
        print("time elapsed = ", t_elapsed)

        # Set Ranges
        smax = 15000  # arbitrary (included bounds to speed up solver)
        vmax = 10  # [m/s] max velocity
        Fmax = 2  # [N] max force

        # Initialize states
        sx = []
        sy = []
        sz = []
        vx = []
        vy = []
        vz = []
        Fx = []
        Fy = []
        Fz = []
        snorm = []
        sxabs = []
        syabs = []
        vnorm = []
        vxabs = []
        vyabs = []
        zeta = []

        m = gp.Model("QPTraj")

        # Define variables at each of the tau timesteps
        for t in range(tau):
            sx.append(
                m.addVar(vtype=GRB.CONTINUOUS,
                         lb=-smax,
                         ub=smax,
                         name="sx" + str(t)))
            vx.append(
                m.addVar(vtype=GRB.CONTINUOUS,
                         lb=-vmax,
                         ub=vmax,
                         name="vx" + str(t)))
            Fx.append(
                m.addVar(vtype=GRB.CONTINUOUS,
                         lb=-Fmax,
                         ub=Fmax,
                         name="Fx" + str(t)))
            sy.append(
                m.addVar(vtype=GRB.CONTINUOUS,
                         lb=-smax,
                         ub=smax,
                         name="sy" + str(t)))
            vy.append(
                m.addVar(vtype=GRB.CONTINUOUS,
                         lb=-vmax,
                         ub=vmax,
                         name="vy" + str(t)))
            Fy.append(
                m.addVar(vtype=GRB.CONTINUOUS,
                         lb=-Fmax,
                         ub=Fmax,
                         name="Fy" + str(t)))
            sz.append(
                m.addVar(vtype=GRB.CONTINUOUS,
                         lb=-smax,
                         ub=smax,
                         name="sz" + str(t)))
            vz.append(
                m.addVar(vtype=GRB.CONTINUOUS,
                         lb=-vmax,
                         ub=vmax,
                         name="vz" + str(t)))
            Fz.append(
                m.addVar(vtype=GRB.CONTINUOUS,
                         lb=-Fmax,
                         ub=Fmax,
                         name="Fz" + str(t)))
            snorm.append(
                m.addVar(vtype=GRB.CONTINUOUS,
                         lb=0,
                         ub=smax,
                         name="snorm" + str(t)))
            sxabs.append(
                m.addVar(vtype=GRB.CONTINUOUS,
                         lb=0,
                         ub=smax,
                         name="sxabs" + str(t)))
            syabs.append(
                m.addVar(vtype=GRB.CONTINUOUS,
                         lb=0,
                         ub=smax,
                         name="syabs" + str(t)))
            vnorm.append(
                m.addVar(vtype=GRB.CONTINUOUS,
                         lb=0,
                         ub=vmax,
                         name="vnorm" + str(t)))
            vxabs.append(
                m.addVar(vtype=GRB.CONTINUOUS,
                         lb=0,
                         ub=vmax,
                         name="vxabs" + str(t)))
            vyabs.append(
                m.addVar(vtype=GRB.CONTINUOUS,
                         lb=0,
                         ub=vmax,
                         name="vyabs" + str(t)))

        for p in range(Nin):
            zeta.append(m.addVar(vtype=GRB.BINARY, name="zeta" + str(p)))

        m.update()

        # Set Initial Conditions
        m.addConstr(sx[0] == initial_state[0], "sx0")
        m.addConstr(sy[0] == initial_state[1], "sy0")
        m.addConstr(sz[0] == initial_state[2], "sz0")
        m.addConstr(vx[0] == initial_state[3], "vx0")
        m.addConstr(vy[0] == initial_state[4], "vy0")
        m.addConstr(vz[0] == initial_state[5], "vz0")

        # Specify Terminal Set
        if self.f_goal_set == 0:  # origin
            m.addConstr(sx[-1] == goal_state[0], "sxf")
            m.addConstr(sy[-1] == goal_state[1], "syf")
            m.addConstr(sz[-1] == goal_state[2], "szf")
            m.addConstr(vx[-1] == goal_state[3], "vxf")
            m.addConstr(vy[-1] == goal_state[4], "vyf")
            m.addConstr(vz[-1] == goal_state[5], "vzf")
        elif self.f_goal_set == 1:  # stationary point or periodic line
            m.addConstr(sx[-1] == goal_state[0], "sxf")
            m.addConstr(vx[-1] == goal_state[3], "vxf")
            m.addConstr(vy[-1] == goal_state[4], "vyf")
        elif self.f_goal_set == 2:  # ellipse
            m.addConstr(vy[-1] + 2 * n * sx[-1] == 0, "ellipse1")
            m.addConstr(sy[-1] - (2 / n) * vx[-1] == 0, "ellipse2")

        # Set dynamic speed limit
        for t in range(tau):
            # Define the norms:
            m.addConstr(sxabs[t] == gp.abs_(sx[t]))
            m.addConstr(syabs[t] == gp.abs_(sy[t]))
            m.addConstr(snorm[t] == gp.max_(sxabs[t], syabs[t]),
                        "snorm" + str(t))
            m.addConstr(vxabs[t] == gp.abs_(vx[t]))
            m.addConstr(vyabs[t] == gp.abs_(vy[t]))
            m.addConstr(vnorm[t] == gp.max_(vxabs[t], vyabs[t]),
                        "vnorm" + str(t))

            # Speed limit constraint:
            m.addConstr(vnorm[t] <= self.kappa_speed * snorm[t])

        # Collision Avoidance Constraint
        if self.f_collision_avoidance:
            for t in range(tau):
                m.addConstr(snorm[t] >= self.collision_dist)
            if initial_state[0] < self.collision_dist or initial_state[
                    1] < self.collision_dist:
                print(
                    "\nERROR: Initial position is too close! Collision constraint violated!\n"
                )

        # # Final point within [1km-5km] of target
        # m.addConstr( snorm[-1] <= 2000 )
        # m.addConstr( snorm[-1] >= 1000 )

        # Terminal constraint: inner polygonal approx on outer ellipse bound
        Nout = Nout + 1
        aout = self.semiminor_out
        bout = self.semiminor_out * 2
        theta = np.linspace(0, 2 * np.pi, Nout)
        for j in range(0, Nout - 1):
            x0 = aout * np.cos(theta[j])
            y0 = bout * np.sin(theta[j])
            x1 = aout * np.cos(theta[j + 1])
            y1 = bout * np.sin(theta[j + 1])
            alphax = y0 - y1
            alphay = x1 - x0
            gamma = alphay * y1 + alphax * x1
            m.addConstr(alphax * sx[-1] + alphay * sy[-1] >= gamma,
                        "OPA" + str(j))

        # Terminal constraint: outer polygonal approx on inner ellipse bound
        if self.f_collision_avoidance:
            a_in = self.semiminor_in
            b_in = self.semiminor_in * 2
            theta = np.linspace(0, 2 * np.pi, Nin + 1)
            big_M = 100000
            for j in range(0, Nin):
                x0 = a_in * np.cos(theta[j])
                y0 = b_in * np.sin(theta[j])
                c1 = (2 * x0 / (a_in**2))
                c2 = (2 * y0 / (b_in**2))
                cmag = np.sqrt(c1**2 + c2**2)
                c1 = c1 / cmag
                c2 = c2 / cmag
                m.addConstr(
                    c1 * sx[-1] + c2 * sy[-1] - c1 * x0 - c2 * y0 -
                    big_M * zeta[j] >= -big_M, "IPA" + str(j))
            m.addConstr(sum(zeta[p] for p in range(Nin)) >= 0.5)

        # Set Dynamics
        for t in range(tau - 1):
            # Dynamics
            m.addConstr(sx[t + 1] == sx[t] + vx[t] * self.dt_plan,
                        "Dsx_" + str(t))
            m.addConstr(sy[t + 1] == sy[t] + vy[t] * self.dt_plan,
                        "Dsy_" + str(t))
            m.addConstr(sz[t + 1] == sz[t] + vz[t] * self.dt_plan,
                        "Dsz_" + str(t))
            m.addConstr(
                vx[t + 1] == vx[t] + sx[t] * 3 * n**2 * self.dt_plan +
                vy[t] * 2 * n * self.dt_plan + Fx[t] * (1 / mc) * self.dt_plan,
                "Dvx_" + str(t))
            m.addConstr(
                vy[t + 1] == vy[t] - vx[t] * 2 * n * self.dt_plan + Fy[t] *
                (1 / mc) * self.dt_plan, "Dvy_" + str(t))
            m.addConstr(
                vz[t + 1] == vz[t] + (-n**2) * sz[t] * self.dt_plan + Fz[t] *
                (1 / mc) * self.dt_plan, "Dvz_" + str(t))

        # Set Objective ( minimize: sum(Fx^2 + Fy^2) )
        obj = Fx[0] * Fx[0] + Fy[0] * Fy[0] + Fz[0] * Fz[0]
        for t in range(0, tau):
            obj = obj + Fx[t] * Fx[t] + Fy[t] * Fy[t] + Fz[t] * Fz[t]

        m.setObjective(obj, GRB.MINIMIZE)
        m.setParam('OutputFlag', False)

        # Optimize and report on results
        m.optimize()

        # Save desired trajectory
        self.xstar = np.zeros([6, tau])
        self.ustar = np.zeros([3, tau])
        self.snorm = np.zeros([tau])
        self.vnorm = np.zeros([tau])

        for t in range(tau):  # TODO: find quicker way to do this
            self.xstar[0, t] = m.getVarByName("sx" + str(t)).x
            self.xstar[1, t] = m.getVarByName("sy" + str(t)).x
            self.xstar[3, t] = m.getVarByName("vx" + str(t)).x
            self.xstar[4, t] = m.getVarByName("vy" + str(t)).x
            self.ustar[0, t] = m.getVarByName("Fx" + str(t)).x
            self.ustar[1, t] = m.getVarByName("Fy" + str(t)).x
            self.ustar[2, t] = m.getVarByName("Fz" + str(t)).x
            self.snorm[t] = m.getVarByName("snorm" + str(t)).x
            self.vnorm[t] = m.getVarByName("vnorm" + str(t)).x
Exemple #12
0
 def to_gurobi(self, model):
     return model.addConstr(
         self.output.to_gurobi(model) == grb.max_(
             self.in_a.to_gurobi(model), self.in_b.to_gurobi(model)))
Exemple #13
0
    def to_gurobi(self, model):
        c_name = 'ReLU_{n}_{layer}_{row}'.format(n=self.net,
                                                 layer=self.layer,
                                                 row=self.row)
        ret_constr = None

        if self.input.getLo() >= 0:
            # relu must be active
            ret_constr = model.addConstr(
                self.output.to_gurobi(model) == self.input.to_gurobi(model),
                name=c_name)
        elif self.input.getHi() <= 0:
            # relu must be inactive
            ret_constr = model.addConstr(self.output.to_gurobi(model) == 0,
                                         name=c_name)
        elif fc.use_grb_native:
            ret_constr = model.addConstr(
                self.output.to_gurobi(model) == grb.max_(
                    self.input.to_gurobi(model), 0),
                name=c_name)
        elif fc.use_asymmetric_bounds:
            model.addConstr(self.output.to_gurobi(model) >= 0,
                            name=c_name + '_a')
            model.addConstr(
                self.output.to_gurobi(model) >= self.input.to_gurobi(model),
                name=c_name + '_b')

            M_input = self.input.getHi()
            m_input = self.input.getLo()
            model.addConstr(self.input.to_gurobi(model) -
                            M_input * self.delta.to_gurobi(model) <= 0,
                            name=c_name + '_c')
            model.addConstr(self.input.to_gurobi(model) +
                            (1 - self.delta.to_gurobi(model)) * -m_input >= 0,
                            name=c_name + '_d')

            M_active = max(abs(self.input.getLo()), abs(self.input.getHi()))
            model.addConstr(
                self.output.to_gurobi(model) <= self.input.to_gurobi(model) +
                (1 - self.delta.to_gurobi(model)) * M_active,
                name=c_name + '_e')

            M_output = self.output.getHi()
            ret_constr = model.addConstr(
                self.output.to_gurobi(model) <=
                self.delta.to_gurobi(model) * M_output,
                name=c_name + '_f')
        else:
            bigM = max(abs(self.input.getLo()), abs(self.input.getHi()))
            model.addConstr(self.output.to_gurobi(model) >= 0,
                            name=c_name + '_a')
            model.addConstr(
                self.output.to_gurobi(model) >= self.input.to_gurobi(model),
                name=c_name + '_b')
            model.addConstr(self.input.to_gurobi(model) -
                            bigM * self.delta.to_gurobi(model) <= 0,
                            name=c_name + '_c')
            model.addConstr(self.input.to_gurobi(model) +
                            (1 - self.delta.to_gurobi(model)) * bigM >= 0,
                            name=c_name + '_d')
            model.addConstr(
                self.output.to_gurobi(model) <= self.input.to_gurobi(model) +
                (1 - self.delta.to_gurobi(model)) * bigM,
                name=c_name + '_e')
            ret_constr = model.addConstr(self.output.to_gurobi(model) <=
                                         self.delta.to_gurobi(model) * bigM,
                                         name=c_name + '_f')
        return ret_constr
Exemple #14
0
def solveTotalTimeMILP(polygonList,
                       validGrid,
                       velocity,
                       minFluxReqd,
                       pseudo_3D=False,
                       scene=[]):
    '''
        Given a environment filled with surfaces to irradiate and a list of potential vantage points, 
        this function will solve a linear program which minimizes the total time taken to irradiate the room
        (the total dwell-time plus the time taken to traverse the tour through all dwell-points) while 
        ensuring that each surface receives a sufficient amount of radiation to be disinfected properly 
        Arguments: polygonList: a list which defines the obstacles in the 2D environment
                   chosenPoints: the list of potential vantage points at which the robot can stop
                   velocity: speed of the robot carrying UV light
                   minFluxReqd: the minimum flux required per unit length for a surface to be irradiated sufficiently
                   pseudo_3D: Boolean value; if True, we simulate a pseudo-3D environment where each polygon has a height, but the robot only moves in a plane    
    '''
    edgeList, obstacleNodeList, obstacles = get_scene_details(polygonList)
    vReciprocal = 1 / velocity

    t1 = time.time()
    #### Things I've changed
    #gridPointList = get_grid_points_res(x_res = 0.1,y_res = 0.1,xBounds = [0,bbox[2]],yBounds = [0,bbox[3]],minDist = 0.1,obstacles = scene.scene)
    print('len validgrid = {}'.format(len(validGrid)))
    numEdges = len(edgeList)
    numPoints = len(validGrid)
    grid_array = np.zeros((len(validGrid), 2))
    for index, i in enumerate(validGrid):
        grid_array[index, 0] = i[0]
        grid_array[index, 1] = i[1]

    # scene.display_matplotlib(block = False)

    # plt.scatter(grid_array[:,0],grid_array[:,1])
    # plt.show(block = True)
    # validGrid = getGridLines(0.5,0.5,chosenPoints,obstacles)

    # graphEdges, indexDict = getGridEdges(validGrid, chosenPoints)
    print('getting adjacency matrix')
    chosenPointIndices = [i for i in range(numPoints)]
    # adjacencyMatrix, pathDict = getAdjacencyMatrixFromEdges(graphEdges,chosenPoints,chosenPointIndices)
    space = None
    start = None
    goal = None
    x_bound = 1.1 * scene.scene.bounds[2]
    y_bound = 1.1 * scene.scene.bounds[3]
    space = CircleObstacleCSpace(x_bound + 2, y_bound + 2)

    # for poly in list(scene.scene.geoms):
    #     space.addObstacle(Pgon(poly,y_bound))

    space.addObstacle(MultiPgon(scene.scene, y_bound))
    milestones = []
    for l in validGrid:
        milestones.append(l)
    program = CSpaceObstacleSolver1(space,
                                    milestones=milestones,
                                    initial_points=100)
    adjacencyMatrix, pathDict = program.get_adjacency_matrix_from_milestones()
    new_adjacency = np.zeros(shape=(adjacencyMatrix.shape[0] + 1,
                                    adjacencyMatrix.shape[1] + 1))
    new_adjacency[:-1, :-1] = adjacencyMatrix
    adjacencyMatrix = new_adjacency
    velocityAdjacencyMatrix = vReciprocal * adjacencyMatrix
    print('\n\n NAN VALUES : \n\n = ', np.isinf(adjacencyMatrix).sum())
    print('getting irradiation matrix')
    irradiationMatrix = get_irradiation_matrix(edgeList,
                                               obstacleNodeList,
                                               validGrid,
                                               obstacles,
                                               height=2,
                                               power=80,
                                               pseudo_3D=pseudo_3D)

    # try:

    # Create a new model
    m = gp.Model("MinimizeTotalTime")
    m.setParam("NodefileStart", 0.5)
    m.setParam("TimeLimit", 60 * 60)
    m.params.MIPGap = 0.01
    m.params.Threads = 19
    m.params.Presolve = 2
    m.params.PreSparsify = 1
    m.params.Cuts = 0
    m.params.MIPFocus = 3
    m.params.ImproveStartTime = 720
    # m.params.Method = 3
    # m.params.NodeMethod = 1
    # print('no problems so far')

    # m.params.Method = 3
    print("numPoints = {} , adjacency_shape = {}".format(
        numPoints, adjacencyMatrix.shape))
    # Creating variables
    # timeVarDict variables are named from 'C0' to 'CN', where N=numPoints-1
    timeVarDict = m.addMVar(numPoints, vtype=GRB.CONTINUOUS, name='timeVars')
    # edgeVarDict variables are named from 'CN1' to CN2' where N1=numPoints and N2=numPoints+(numPoints+1)^2 - 1
    edgeVarDict = m.addMVar((numPoints + 1, numPoints + 1),
                            vtype=GRB.BINARY,
                            name='edgeVars')
    # networkFlowDict variables are named from 'CN3' to 'CN4', where N3 = numPoints + (numPoints+1)^2 and N4 = numPoints + 2*(numPoints+1)^2 - 1
    networkFlowDict = m.addMVar((numPoints + 1, numPoints + 1),
                                vtype=GRB.CONTINUOUS,
                                name='networkVars')

    slackVars = m.addMVar(irradiationMatrix.shape[0],
                          vtype=GRB.CONTINUOUS,
                          name='slackVars')
    # Setting objective
    numPointOnes = np.ones(numPoints)
    obj = gp.MLinExpr()
    obj = numPointOnes @ timeVarDict
    # print('gets here')
    # Assuming the last node (index - numPoints) is a "dummy" node, whose distance from all other nodes is zero
    # The path length in the objective includes only "real" edges, not involving the dummy node. Therefore, this optimization finds the shortest path, not tour
    for i in range(numPoints):
        obj += velocityAdjacencyMatrix[:, i] @ edgeVarDict[:, i]
    obj += 100 * slackVars.sum()
    print('multiplies \n\n')
    m.setObjective(obj, GRB.MINIMIZE)
    # Setting constraints
    # Setting bounds for each time variable
    numPointZeros = np.zeros(numPoints)
    TIME_UPPER_LIM = 3000000
    m.addConstr(timeVarDict >= numPointZeros, "Time_sign_constraints")
    numPointM = np.full(numPoints + 1, TIME_UPPER_LIM)
    # print('gets here2 ')
    for i in range(numPoints):
        m.addConstr(timeVarDict[i] <= edgeVarDict[i, :] @ numPointM,
                    "Time_upper-bound_constraint" + str(i))
        # Setting bounds for each network flow variable
    numPointPlusZeros = np.zeros(numPoints + 1)
    print('after multiplication 1')
    for i in range(numPoints + 1):
        m.addConstr(networkFlowDict[i, :] >= numPointPlusZeros,
                    "Flow_sign_constraint" + str(i))
        ubVector = np.full(1, TIME_UPPER_LIM)
        m.addConstr(
            networkFlowDict[i, :] <= edgeVarDict[i, :] * TIME_UPPER_LIM,
            "Flow_upper-bound_constraint" + str(i))
        # No edges allowed from a vertex to itself
    for i in range(numPoints + 1):
        m.addConstr(edgeVarDict[i, i] == 0,
                    "No_loop_edges_constraint" + str(i))
        # No flow allowed from a vertex to itself
    for i in range(numPoints + 1):
        m.addConstr(networkFlowDict[i, i] == 0,
                    "No_loop_flows_constraint" + str(i))
        # Making sure tour starts at the dummy node (which has index numPoints)
    print('gets here?')
    numPointPlusOnes = np.ones(numPoints + 1)
    m.addConstr(numPointPlusOnes @ edgeVarDict[numPoints, :] == 1,
                "Start_at_dummy_node")
    # Adding constraints for required flux value for each edge
    minFluxVector = np.full(numEdges, minFluxReqd)
    # Multiply by -1 since the original irradiation matrix stores all negative values
    sparseIrradiationMatrix = sp.csr_matrix(-1 * irradiationMatrix)
    # print('gets here 3')
    m.addConstr(
        sparseIrradiationMatrix @ timeVarDict + slackVars >= minFluxVector,
        "Edge_flux_constraints")
    # ensure all slacks are greater of equal to zero
    m.addConstr(slackVars >= 0, 'slack_constraints')
    # Ensuring each selected vertex has degree 2
    print('gets here 4')
    for i in range(numPoints + 1):
        m.addConstr(
            numPointPlusOnes @ edgeVarDict[i, :] == numPointPlusOnes
            @ edgeVarDict[:, i], "Ensure_degree_2_constraint" + str(i))
        m.addConstr(numPointPlusOnes @ edgeVarDict[i, :] <= 1,
                    "In-flux_limit_constraint" + str(i))
        # Bounding flow values
    tmp_matrix_sum = gp.LinExpr()
    for this_one in edgeVarDict.tolist():
        tmp_matrix_sum += sum(this_one)
    flowBound = m.addVar(vtype=GRB.CONTINUOUS)
    print('almost there')
    # print(edgeVarDict.vararr.sum())

    m.addConstr(flowBound == tmp_matrix_sum - 1)

    maximum = m.addVar(vtype=GRB.CONTINUOUS)
    networkFlowFlatList = []
    for i in networkFlowDict.tolist():
        networkFlowFlatList.extend(i)
    m.addConstr(maximum == gp.max_(networkFlowFlatList))
    m.addConstr(maximum - flowBound <= 0)
    # Ensuring equal-density flow across the tour
    print('passes through here')
    for i in range(numPoints):
        m.addConstr(
            numPointPlusOnes @ networkFlowDict[:, i] -
            numPointPlusOnes @ networkFlowDict[i, :] == numPointPlusOnes
            @ edgeVarDict[i, :], "Equal_density-flow_constraint" + str(i))
    # Optimizing model
    # m.write('..\Week9\model_matrix.lp')
    # print('Going to optimize')
    m.optimize()
    print('finished optimizing')
    variableValues = list()
    # Getting solution
    # for v in m.getVars():
    #     print('%s %g' % (v.varName, v.x))
    #     variableValues.append(v.x)
    timeValues = timeVarDict.x.flatten()
    edgeValues = edgeVarDict.x.flatten()
    print('\n\n\n slack values = {} \n\n\n'.format(slackVars.x.sum()))
    # pdb.set_trace()
    # timeValues = m.getVarByName('timeVars').x
    # edgeValues = m.getVarByName('edgeVars').x
    print('Obj: %g' % m.objVal)
    return timeValues, edgeValues, irradiationMatrix, pathDict, m.objVal, adjacencyMatrix