def main(_): milp = get_sample('milp-cauction-100-filtered', 'train', 0) val = milp.feasible_solution['x1'] mip = SCIPMIPInstance.fromMIPInstance(milp.mip) mip2 = mip.fix({'t_x1': val}) assert 't_x1' not in mip2.varname2var mip2.get_feasible_solution()
def sample(i): seed = i np.random.seed(seed) milp = get_sample(DATASET, 'train', i % LENGTH_MAP[DATASET]['train']) mip = SCIPMIPInstance.fromMIPInstance(milp.mip) all_integer_vars = [] feasible_ass = milp.feasible_solution for vname, var in mip.varname2var.items(): if var.vtype() in ['INTEGER', 'BINARY']: all_integer_vars.append(vname.lstrip('t_')) K = min(len(all_integer_vars), np.random.randint(20, 50)) fixed_ass = { all_integer_vars[i]: feasible_ass[all_integer_vars[i]] for i in np.random.choice( len(all_integer_vars), len(all_integer_vars) - K, replace=False) } model = mip.fix(fixed_ass) model.setBoolParam('randomization/permutevars', True) model.setIntParam('randomization/permutationseed', seed) model.setIntParam('randomization/randomseedshift', seed) model.optimize() solving_stats = ConfigDict(model.getSolvingStats()) results = ConfigDict( solving_time=solving_stats.solvingtime, determinstic_time=solving_stats.deterministictime, nnodes=model.getNNodes(), ) return results
def _sample(self, choice=None): graph_start_idx = self._graph_start_idx status = self._pick_sample_for_curriculum(choice) if status is not None: choice, sol, obj = status elif choice is None: choice = self._rnd_state.choice( range(graph_start_idx, graph_start_idx + self._get_n_graphs())) sol, obj = None, None self._milp_choice = choice milp = get_sample(self._dataset, self._dataset_type, choice) if sol is None: return milp, milp.feasible_solution, milp.feasible_objective else: return milp, sol, obj
def main(_): milp = get_sample('milp-cauction-300-filtered', 'train', 102) mip = SCIPMIPInstance.fromMIPInstance(milp.mip) times = [] for _ in range(10): with U.Timer() as timer: model = mip.get_scip_model() times.append(timer.to_seconds()) print(f'Avg time to copy the model: {np.mean(times[5:])}') for i in tqdm(range(20)): fixed_ass = { k: milp.feasible_solution[k] for k in np.random.permutation(list(milp.feasible_solution.keys()))[:500] } ass, obj = fix_and_solve(model, fixed_ass) print(obj) model.freeTransform()
def __init__( self, shell_class, shell_config, agent_class, agent_config, env_class, env_config, seed, dataset, dataset_type, graph_start_idx, batch_size=1, # num_envs use_parallel_envs=False, use_threaded_envs=False, create_shell=True, **config): self.config = ConfigDict(seed=seed, **config) self.batch_size = batch_size self.rng = np.random.RandomState(seed) self.env_config = env_config # get the mip instance self.k = env_config.k # init shell and environments # init environment env_configs = [] for _ in range(batch_size): env_config_copy = ConfigDict(**env_config) env_config_copy.update({ 'graph_start_idx': graph_start_idx, 'dataset': dataset, 'dataset_type': dataset_type, 'n_local_moves': int(1e10), # infinity }) env_configs.append(env_config_copy) milp = get_sample(dataset, dataset_type, graph_start_idx) self.mip = milp.mip self._optimal_lp_sol = milp.optimal_lp_sol if use_parallel_envs: self._env = ParallelBatchedEnv(batch_size, env_class, env_configs, seed, use_threads=use_threaded_envs) else: self._env = SerialBatchedEnv(batch_size, env_class, env_configs, seed) if create_shell: # disable sync shell_config['sync_period'] = None assert shell_config['restore_from'] self._shell = shell_class( action_spec=self._env.action_spec(), obs_spec=self._env.observation_spec(), agent_class=agent_class, agent_config=agent_config, batch_size=batch_size, seed=seed, verbose=False, **shell_config, )