def solve(self, p): model = miosqp.MIOSQP() model.setup(p.P, p.q, p.A, p.l, p.u, p.i_idx, p.i_l, p.i_u, self.options) res_miosqp = model.solve() return res_miosqp
def __init__(self, n_inputs, n_outputs, n_tasks, args): super(Net, self).__init__() nl, nh = args.n_layers, args.n_hiddens self.margin = args.memory_strength self.is_cifar = ('cifar10' in args.data_file) m = miosqp.MIOSQP() self.solver = m if self.is_cifar: self.net = ResNet18(n_outputs, bias=args.bias) else: self.net = MLP([n_inputs] + [nh] * nl + [n_outputs]) self.ce = nn.CrossEntropyLoss() self.n_outputs = n_outputs self.normalize = args.normalize self.opt = optim.SGD(self.parameters(), args.lr) self.n_memories = args.n_memories self.n_sampled_memories = args.n_sampled_memories self.n_constraints = args.n_constraints self.gpu = args.cuda self.batch_size = args.batch_size self.n_iter = args.n_iter self.slack = args.slack self.normalize = args.normalize self.change_th = args.change_th # gradient direction change threshold to re-select constraints # allocate ring buffer self.memory_data = torch.FloatTensor(self.n_memories, n_inputs) self.memory_labs = torch.LongTensor(self.n_memories) # allocate selected memory self.sampled_memory_data = None self.sampled_memory_labs = None self.sampled_memory_taskids = None self.sampled_memory_age = None self.subselect = args.subselect # if 1, first select from recent memory and then add to samples memories # allocate selected constraints self.constraints_data = None self.constraints_labs = None # old grads to measure changes self.old_mem_grads = None if args.cuda: self.memory_data = self.memory_data.cuda() self.memory_labs = self.memory_labs.cuda() # allocate temporary synaptic memory self.grad_dims = [] for param in self.parameters(): self.grad_dims.append(param.data.numel()) # we keep few samples per task and use their gradients # if args.cuda: # self.grads = self.grads.cuda() # allocate counters self.observed_tasks = [] self.old_task = -1 self.mem_cnt = 0
def __init__(self, n_inputs, n_outputs, n_tasks, args): super(Net, self).__init__() nl, nh = args.n_layers, args.n_hiddens self.margin = args.memory_strength self.is_cifar = ('cifar10' in args.data_file) m = miosqp.MIOSQP() self.solver = m if self.is_cifar: self.net = ResNet18(n_outputs, bias=args.bias) else: self.net = MLP([n_inputs] + [nh] * nl + [n_outputs]) self.ce = nn.CrossEntropyLoss() self.n_outputs = n_outputs self.opt = optim.SGD(self.parameters(), args.lr) self.n_memories = args.n_memories # number of memories per task self.n_sampled_memories = args.n_sampled_memories # number of sampled memories per task self.n_constraints = args.n_constraints self.gpu = args.cuda self.batch_size = args.batch_size self.n_iter = args.n_iter # allocate ring buffer self.memory_data = torch.FloatTensor(self.n_memories, n_inputs) self.memory_labs = torch.LongTensor(self.n_memories) self.added_index = self.n_sampled_memories # allocate buffer for the current task self.sampled_memory_data = None self.sampled_memory_labs = None # allocate buffer for each task self.sampled_task_data = {} self.sampled_task_labs = {} # allocate selected constraints self.constraints_data = None self.constraints_labs = None self.cluster_distance = 0 # old grads to measure changes self.old_mem_grads = None if args.cuda: self.memory_data = self.memory_data.cuda() self.memory_labs = self.memory_labs.cuda() # allocate counters self.observed_tasks = [] self.old_task = -1 self.mem_cnt = 0 self.n_task = 0 self.n_old_task = 0 self.sample_size_list = [] self.task_buffer_size = 0 # for cross distillation self.distill = args.distill self.teacher = None self.T = args.T self.alpha = args.alpha
def _solve_MIQP(self, k): n = self._data_len m = self._X.shape[1] i_idx = np.random.choice(np.arange(0, m), k) beta = self._X P = csc_matrix(2 * np.dot(beta.T, beta)) q = -2 * np.dot(self._X.T, self._y) A = csc_matrix(np.ones((m, m))) l = k * np.ones(m) u = k * np.ones(m) i_l = np.zeros(k, dtype=np.int) i_u = np.ones(k, dtype=np.int) miosqp_settings = { # integer feasibility tolerance 'eps_int_feas': 1e-03, # maximum number of iterations 'max_iter_bb': 1000, # tree exploration rule # [0] depth first # [1] two-phase: depth first until first incumbent and then best bound 'tree_explor_rule': 1, # branching rule # [0] max fractional part 'verbose': False, 'branching_rule': 0, 'print_interval': 1 } osqp_settings = { 'eps_abs': 1e-03, 'eps_rel': 1e-03, 'eps_prim_inf': 1e-04, 'verbose': False } model = miosqp.MIOSQP() model.setup(P, q, A, l, u, i_idx, i_l, i_u, miosqp_settings, osqp_settings) result = model.solve() argx = np.argsort(result.x)[::-1][:k] return result.upper_glob, result.x, argx
def solve(n_vec, m_vec, p_vec, repeat, dns_level, seed, solver='gurobi'): """ Solve random optimization problems """ print("Solving random problems with solver %s\n" % solver) # Define statistics to record std_solve_time = np.zeros(len(n_vec)) avg_solve_time = np.zeros(len(n_vec)) min_solve_time = np.zeros(len(n_vec)) max_solve_time = np.zeros(len(n_vec)) n_prob = len(n_vec) # Store also OSQP time if solver == 'miosqp': # Add OSQP solve times statistics avg_osqp_solve_time = np.zeros(len(n_vec)) # reset random seed np.random.seed(seed) for i in range(n_prob): # Get dimensions n = n_vec[i] m = m_vec[i] p = p_vec[i] print("problem n = %i, m = %i, p = %i" % (n, m, p)) # Define vector of cpu times solve_time_temp = np.zeros(repeat) # Store also OSQP time if solver == 'miosqp': osqp_solve_time_temp = np.zeros(repeat) for j in tqdm(range(repeat)): # for j in range(repeat): # Generate random vector of indeces i_idx = np.random.choice(np.arange(0, n), p, replace=False) # Generate random Matrices Pt = spa.random(n, n, density=dns_level) P = spa.csc_matrix(np.dot(Pt, Pt.T)) q = sp.randn(n) A = spa.random(m, n, density=dns_level) u = 2 + sp.rand(m) l = -2 + sp.rand(m) # Enforce [0, 1] bounds on variables i_l = np.zeros(p) i_u = np.ones(p) # A, l, u = miosqp.add_bounds(i_idx, 0., 1., A, l, u) if solver == 'gurobi': # Solve with gurobi prob = mpbpy.QuadprogProblem(P, q, A, l, u, i_idx, i_l, i_u) res_gurobi = prob.solve(solver=mpbpy.GUROBI, verbose=False, Threads=1) if res_gurobi.status != 'optimal': import ipdb ipdb.set_trace() solve_time_temp[j] = 1e3 * res_gurobi.cputime elif solver == 'miosqp': # Define problem settings miosqp_settings = { # integer feasibility tolerance 'eps_int_feas': 1e-03, # maximum number of iterations 'max_iter_bb': 1000, # tree exploration rule # [0] depth first # [1] two-phase: depth first until first incumbent and then best bound 'tree_explor_rule': 1, # branching rule # [0] max fractional part 'branching_rule': 0, 'verbose': False, 'print_interval': 1} osqp_settings = {'eps_abs': 1e-03, 'eps_rel': 1e-03, 'eps_prim_inf': 1e-04, 'verbose': False} model = miosqp.MIOSQP() model.setup(P, q, A, l, u, i_idx, i_l, i_u, miosqp_settings, osqp_settings) res_miosqp = model.solve() # DEBUG (check if solutions match) # prob = mpbpy.QuadprogProblem(P, q, A, l, u, i_idx, i_l, i_u) # res_gurobi = prob.solve(solver=mpbpy.GUROBI, verbose=False) # if (np.linalg.norm(res_gurobi.x - res_miosqp.x) / # np.linalg.norm(res_gurobi.x)) > 1e-02: # import ipdb; ipdb.set_trace() # # import ipdb; ipdb.set_trace() if res_miosqp.status != miosqp.MI_SOLVED: import ipdb ipdb.set_trace() # Solution time solve_time_temp[j] = 1e3 * res_miosqp.run_time # Store OSQP time in percentage if solver == 'miosqp': osqp_solve_time_temp[j] = \ 100 * (res_miosqp.osqp_solve_time / res_miosqp.run_time) # Get time statistics std_solve_time[i] = np.std(solve_time_temp) avg_solve_time[i] = np.mean(solve_time_temp) max_solve_time[i] = np.max(solve_time_temp) min_solve_time[i] = np.min(solve_time_temp) # Store also OSQP time if solver == 'miosqp': avg_osqp_solve_time[i] = np.mean(osqp_solve_time_temp) # Create pandas dataframe for the results df_dict = {'n': n_vec, 'm': m_vec, 'p': p_vec, 't_min': min_solve_time, 't_max': max_solve_time, 't_avg': avg_solve_time, 't_std': std_solve_time} # Store also OSQP time if solver == 'miosqp': df_dict.update({'t_osqp_avg': avg_osqp_solve_time}) timings = pd.DataFrame(df_dict) return timings
def compute_mpc_input(self, x0, u_prev, solver='gurobi'): """ Compute MPC input at initial state x0 with specified solver """ qp = self.qp_matrices N = qp.N # Update objective q = 2. * (qp.q_x.dot(x0) + qp.q_u) # Update bounds SA_tildex0 = qp.SA_tilde.dot(x0) qp.u[:6 * N] = SA_tildex0 # qp.l[:6 * N] = -SA_tildex0 if solver == 'gurobi': # Solve problem prob = mpbpy.QuadprogProblem(qp.P, q, qp.A, qp.l, qp.u, qp.i_idx, qp.i_l, qp.i_u, x0=u_prev) res_gurobi = prob.solve(solver=mpbpy.GUROBI, verbose=False, Threads=1) u = res_gurobi.x obj_val = res_gurobi.obj_val solve_time = res_gurobi.cputime elif solver == 'miosqp': if self.solver is None: # Define problem settings miosqp_settings = {'eps_int_feas': 1e-02, # integer feasibility tolerance 'max_iter_bb': 2000, # maximum number of iterations 'tree_explor_rule': 1, # tree exploration rule # [0] depth first # [1] two-phase: depth first until first incumbent and then best bound 'branching_rule': 0, # branching rule # [0] max fractional part 'verbose': False, 'print_interval': 1} osqp_settings = {'eps_abs': 1e-03, 'eps_rel': 1e-03, 'eps_prim_inf': 1e-04, # 'rho': 0.001, # 'rho': 0.1, 'verbose': False} self.solver = miosqp.MIOSQP() self.solver.setup(qp.P, q, qp.A, qp.l, qp.u, qp.i_idx, qp.i_l, qp.i_u, miosqp_settings, osqp_settings) else: self.solver.update_vectors(q, qp.l, qp.u) self.solver.set_x0(u_prev) res_miosqp = self.solver.solve() # import ipdb; ipdb.set_trace() # DEBUG Check if gurobi gives same solution # N.B. They do not match when the norm of the # difference of the objective functions # is below the tolerance # # prob = mpbpy.QuadprogProblem(qp.P, q, qp.A, qp.l, qp.u, qp.i_idx) # res_gurobi = prob.solve(solver=mpbpy.GUROBI, verbose=False, x0=u_prev) # if np.linalg.norm(res_miosqp.x - res_gurobi.x)> 1e-02: # print("Norm of difference of solution = %.4e" % \ # np.linalg.norm(res_miosqp.x - res_gurobi.x)) # import ipdb; ipdb.set_trace() if res_miosqp.status != miosqp.MI_SOLVED: import ipdb; ipdb.set_trace() u = res_miosqp.x obj_val = res_miosqp.upper_glob solve_time = res_miosqp.run_time osqp_solve_time = 100 * res_miosqp.osqp_solve_time / res_miosqp.run_time # Get first input u0 = u[:6] if solver == 'miosqp': return u0, obj_val, solve_time, u, \ osqp_solve_time, \ res_miosqp.osqp_iter_avg else: return u0, obj_val, solve_time, u, 0, 0
def __call__(self, cardinality=None, threshold=None): if cardinality is threshold is None: raise ValueError("Set either cardinality or threshold.") if not self.initialized: self.initialize() # The number of restraints are the upper and lower boundaries on # each variable plus one for the sum(w_i) <= 1, plus nconformers to # set a threshold constraint plus 1 for a cardinality constraint # We set first the weights upper and lower bounds, then the sum # constraint, then the binary variables upper and lower boundary # and then the coupling restraints followed by the threshold # contraints and finally a cardinality constraint. # A_row effectively contains the constraint indices # A_col holds which variables are involved in the constraint A_data = [1] * (2 * self.nconformers) A_row = list(range( self.nconformers)) + [self.nconformers] * self.nconformers A_col = list(range(self.nconformers)) * 2 nconstraints = self.nconformers + 1 i_l = np.zeros(self.nconformers, np.int32) i_u = np.ones(self.nconformers, np.int32) i_idx = np.arange(self.nconformers, 2 * self.nconformers, dtype=np.int32) # Introduce an implicit cardinality constraint # 0 <= zi - wi <= 1 A_data += [-1] * self.nconformers + [1] * self.nconformers # The wi and zi indices start_row = self.nconformers + 1 A_row += list(range(start_row, start_row + self.nconformers)) * 2 A_col += list(range(2 * self.nconformers)) nconstraints += self.nconformers if threshold is not None: # Introduce threshold constraint # 0 <= wi - t * zi <= 1 A_data += [1] * self.nconformers + [-threshold ] * self.nconformers start_row += self.nconformers A_row += list(range(start_row, start_row + self.nconformers)) * 2 A_col += list(range(2 * self.nconformers)) nconstraints += self.nconformers if cardinality is not None: # Introduce explicit cardinality constraint # 0 <= sum(zi) <= cardinality A_data += [1] * self.nconformers A_row += [nconstraints] * self.nconformers A_col += list(range(self.nconformers, self.nconformers * 2)) nconstraints += 1 l = np.zeros(nconstraints) u = np.ones(nconstraints) if cardinality is not None: u[-1] = cardinality A = sparse.csc_matrix((A_data, (A_row, A_col))) miqp = miosqp.MIOSQP() miqp.setup(self.P, self.q, A, l, u, i_idx, i_l, i_u, self.MIOSQP_SETTINGS, self.OSQP_SETTINGS) result = miqp.solve() self.weights = np.asarray(result.x[:self.nconformers]) self.obj_value = 2 * result.upper_glob + np.inner( self.target, self.target)
def optimization(self, dynamic_map, target_lane_index, Xp, params, x_tf): """ Solver # x_tf: the final longitudinal position of target front vehicle """ '''cost function ''' if target_lane_index == -1: target_lane = dynamic_map.jmap.reference_path else: target_lane = dynamic_map.mmap.lanes[int(target_lane_index)] # y_des is the centre of the target lane, x_tf is final longitudinal position of target front vehicle y_des = target_lane.map_lane.central_path_points[0].position.y obj_fun = CostFunction(params, self.var_num, y_des, x_tf) qp_P = obj_fun.P qp_q = obj_fun.q ''' Optimization Loop iteration ''' flag = 1 count = 1 max_iter = params.max_iter Np = params.Np nx = params.nx while flag: print('================== iter={} ===================='.format( count)) if count > max_iter: print('Maximum iteration number!') break # Model Linearization ## based on the solution solved at previous step Ae, be = self.model_linearization(Xp, params) qp_A, qp_l, qp_u, i_idx, i_l, i_u = self.get_constraint_matrix( Ae, be, params, dynamic_map, target_lane_index) # P and A are both in the scipy sparse CSC format. qp_P = spa.csc_matrix(qp_P) qp_A = spa.csc_matrix(qp_A) # Solver settings miosqp_settings = { 'eps_int_feas': 1e-03, 'max_iter_bb': 1000, 'tree_explor_rule': 1, 'branching_rule': 0, 'verbose': False, 'print_interval': 1 } osqp_settings = { 'eps_abs': 1e-03, 'eps_rel': 1e-03, 'eps_prim_inf': 1e-04, 'verbose': False } mysolver = miosqp.MIOSQP() mysolver.setup(qp_P, qp_q, qp_A, qp_l, qp_u, i_idx, i_l, i_u, miosqp_settings, osqp_settings) # Set initial solution # The initial guess can speedup the branch-and-bound algorithm significantly. # To set an initial feasible solution x0 we can run:m.set_x0(x0) solution_init = np.empty(self.var_num) mysolver.set_x0(solution_init) # Solve the problem res_miosqp = mysolver.solve() # Solution X = res_miosqp.x ''' Iteration ''' if count == 1: Xp = X count = count + 1 if count > 1: x_differ = np.zeros((Np + 1, 1)) y_differ = np.zeros((Np + 1, 1)) for i in range(Np + 1): x_differ[i] = X[i * nx + 0] - Xp[i * nx + 0] y_differ[i] = X[i * nx + 1] - Xp[i * nx + 1] ''' stop condition''' if max([abs(fi) for fi in x_differ]) <= 0.05 and max( [abs(fj) for fj in y_differ]) <= 0.05: flag = 0 else: Xp = X return X