def main(): parser = argparse.ArgumentParser() parser.add_argument('-s', '--state_size', type=int, default=128, help='Size of the variable states in RUN-CSP') parser.add_argument('-k', '--kappa', type=float, default=1.0, help='The parameter kappa for the loss function') parser.add_argument('-e', '--epochs', type=int, default=25, help='Number of training epochs') parser.add_argument( '-t', '--t_max', type=int, default=30, help= 'Number of iterations t_max for which RUN-CSP runs on each instance') parser.add_argument('-b', '--batch_size', type=int, default=10, help='Batch size for training') parser.add_argument( '-m', '--model_dir', type=str, help='Model directory in which the trained model is stored') parser.add_argument( '-d', '--data_path', help='A path to a training set of graphs in the dimacs format.') args = parser.parse_args() print('loading graphs...') names, graphs = data_utils.load_graphs(args.data_path) random.shuffle(graphs) print('Converting graphs to CSP Instances') instances = [ CSP_Instance.graph_to_csp_instance(g, is_language, 'NAND') for g in graphs ] # combine instances into batches train_batches = CSP_Instance.batch_instances(instances, args.batch_size) # construct new network network = Max_IS_Network(args.model_dir, state_size=args.state_size) train(network, train_batches, t_max=args.t_max, epochs=args.epochs)
def main(): parser = argparse.ArgumentParser() parser.add_argument('-e', '--epochs', type=int, default=20, help='Number of training epochs') parser.add_argument( '-t', '--t_max', type=int, default=25, help= 'Number of iterations t_max for which RUN-CSP runs on each instance') parser.add_argument('-b', '--batch_size', type=int, default=64, help='Batch size for training') parser.add_argument( '-m', '--model_dir', type=str, help='Model directory in which the trained model is stored') parser.add_argument( '-d', '--data_path', help='A path to a training set of graphs in the dimacs graph format.') parser.add_argument('--n_colors', type=int, default=3, help='Number of colors') args = parser.parse_args() language = Constraint_Language.get_coloring_language(args.n_colors) print('loading graphs...') names, graphs = data_utils.load_graphs(args.data_path) random.shuffle(graphs) print('Converting graphs to CSP Instances') instances = [ CSP_Instance.graph_to_csp_instance(g, language, 'NEQ') for g in tqdm(graphs) ] # combine instances into batches train_batches = CSP_Instance.batch_instances(instances, args.batch_size) # construct and train new network network = RUN_CSP(args.model_dir, language) train(network, train_batches, epochs=args.epochs, t_max=args.t_max)
def main(): parser = argparse.ArgumentParser() parser.add_argument('-b', '--batch_size', type=int, default=64, help='Batch size used during training') parser.add_argument('-e', '--epochs', type=int, default=20, help='Number of training epochs') parser.add_argument('-m', '--model_dir', type=str, help='The model directory of a trained network') parser.add_argument( '-t', '--t_max', type=int, default=100, help= 'Number of iterations t_max for which RUN-CSP runs on each instance') parser.add_argument('-a', '--attempts', type=int, default=64, help='Attempts for each graph') parser.add_argument('-i', '--n_instances', type=int, default=400, help='Number of instances for training.') parser.add_argument('-s', '--save_path', type=str, help='Path to a csv file to store results') args = parser.parse_args() language = mc_weighted_language print(f'Generating {args.n_instances} training instances') graphs = [get_random_graph() for _ in range(args.n_instances)] instances = [ CSP_Instance.graph_to_weighted_mc_instance(g) for g in tqdm(graphs) ] train_batches = CSP_Instance.batch_instances(instances, args.batch_size) net = RUN_CSP(args.model_dir, language=language) train(net, train_batches, t_max=args.t_max, epochs=args.epochs)
def main(): parser = argparse.ArgumentParser() parser.add_argument('-s', '--state_size', type=int, default=128, help='Size of the variable states in RUN-CSP') parser.add_argument('-e', '--epochs', type=int, default=25, help='Number of training epochs') parser.add_argument( '-t', '--t_max', type=int, default=30, help= 'Number of iterations t_max for which RUN-CSP runs on each instance') parser.add_argument('-b', '--batch_size', type=int, default=10, help='Batch size for training') parser.add_argument( '-m', '--model_dir', type=str, help='Model directory in which the trained model is stored') parser.add_argument( '-d', '--data_path', help='A path to a training set of formulas in the DIMACS cnf format.') args = parser.parse_args() print('loading cnf formulas...') names, formulas = data_utils.load_formulas(args.data_path) random.shuffle(formulas) print('Converting formulas to CSP instances') instances = [CSP_Instance.cnf_to_instance(f) for f in formulas] # combine instances into batches train_batches = CSP_Instance.batch_instances(instances, args.batch_size) # construct and train new network network = Max_2SAT_Network(args.model_dir, state_size=args.state_size) train(network, train_batches, t_max=args.t_max, epochs=args.epochs)
def main(): parser = argparse.ArgumentParser() parser.add_argument('-s', '--state_size', type=int, default=128, help='Size of the variable states in RUN-CSP') parser.add_argument('-b', '--batch_size', type=int, default=10, help='Batch size used during training') parser.add_argument('-e', '--epochs', type=int, default=25, help='Number of training epochs') parser.add_argument('-m', '--model_dir', type=str, help='The model directory of a trained network') parser.add_argument('-t', '--t_max', type=int, default=30, help='Number of iterations t_max for which RUN-CSP runs on each instance') parser.add_argument('-d', '--data_path', help='A path to a training set of graphs in the dimacs graph format') args = parser.parse_args() language = Constraint_Language.get_coloring_language(2) print('loading graphs...') names, graphs = data_utils.load_graphs(args.data_path) instances = [CSP_Instance.graph_to_csp_instance(g, language, 'NEQ') for g in graphs] train_batches = CSP_Instance.batch_instances(instances, args.batch_size) network = RUN_CSP(args.model_dir, language=language, state_size=args.state_size) train(network, train_batches, t_max=args.t_max, epochs=args.epochs)
def main(): parser = argparse.ArgumentParser() parser.add_argument('-m', '--model_dir', type=str, help='Path to the trained RUN-CSP instance') parser.add_argument('-v', '--n_variables', type=int, default=100, help='Number of variables in each training instance.') parser.add_argument( '--c_min', type=int, default=100, help='Minimum number of clauses in each training instance.') parser.add_argument( '--c_max', type=int, default=600, help='Maximum number of clauses in each training instance.') parser.add_argument('-i', '--n_instances', type=int, default=100, help='Number of instances for training.') parser.add_argument('-t', '--t_max', type=int, default=40, help='Number of network iterations t_max') parser.add_argument('-a', '--attempts', type=int, default=64, help='Number of attempts to boost results') args = parser.parse_args() # create RUN_CSP instance for given constraint language network = RUN_CSP.load(args.model_dir) language = network.language print(f'Generating {args.n_instances} evaluation instances') eval_instances = [ CSP_Instance.generate_random(args.n_variables, np.random.randint(args.c_min, args.c_max), language) for _ in tqdm(range(args.n_instances)) ] # train and store the network evaluate_boosted(network, eval_instances, args.t_max, args.attempts)
def main(): parser = argparse.ArgumentParser() parser.add_argument('-m', '--model_dir', type=str, help='Path to the trained RUN-CSP instance') parser.add_argument('-t', '--t_max', type=int, default=100, help='Number of iterations t_max for which RUN-CSP runs on each instance') parser.add_argument('-a', '--attempts', type=int, default=64, help='Attempts for each graph') parser.add_argument('-d', '--data_path', default=None, help='Path to the evaluation data. Expects a directory with graphs in dimacs format.') args = parser.parse_args() network = Max_IS_Network.load(args.model_dir) print('loading graphs...') names, graphs = data_utils.load_graphs(args.data_path) instances = [CSP_Instance.graph_to_csp_instance(g, is_language, 'NAND') for n, g in zip(names, graphs)] evaluate_boosted(network, instances, args.t_max, attempts=args.attempts)
def main(): parser = argparse.ArgumentParser() parser.add_argument('-m', '--model_dir', type=str, help='Path to the trained RUN-CSP instance') parser.add_argument( '-t', '--t_max', type=int, default=100, help= 'Number of iterations t_max for which RUN-CSP runs on each instance') parser.add_argument('-a', '--attempts', type=int, default=64, help='Attempts for each graph') parser.add_argument( '-d', '--data_path', default=None, help= 'Path to the evaluation data. Expects a directory with graphs in dimacs format.' ) args = parser.parse_args() network = Max_2SAT_Network.load(args.model_dir) print('loading cnf formulas...') names, formulas = data_utils.load_formulas(args.data_path) print('Converting formulas to CSP instances') instances = [ CSP_Instance.cnf_to_instance(f, name=n) for n, f in zip(names, formulas) ] conflicting_edges = evaluate_boosted(network, instances, args.t_max, attempts=args.attempts)
def predict_boosted(self, instance, iterations, attempts): """ Generate predictions with boosted performance by making multiple runs in paralleland using the best results. :param instance: A CSP_Instance object. :param iterations: The number of iterations that RUN-CSP performs on each instances. :param attempts: The number of parallel runs. :return: The predictions for the run with the least conflicts """ # duplicate instance and generate predictions in parallel combined = CSP_Instance.merge([instance for _ in range(attempts)]) output_dict = self.predict(combined, iterations=iterations) # soft assignments for all iterations phi = output_dict['phi'] phi = np.reshape(phi, (attempts, instance.n_variables, iterations, instance.language.domain_size)) # compute hard assignments assignments = np.argmax(phi, axis=3) # compute number of conflicts in each attempts at each iteration conf = np.zeros([attempts, iterations], np.int64) for r in instance.language.relations: # get binary encoding of whether or not each constraint has a conflict edge_conf = output_dict['edge_conflicts'][r] edge_conf = np.reshape(edge_conf, [attempts, len(instance.clauses[r]), iterations]) conf += np.int64(np.sum(edge_conf, axis=1)) # select solution with fewest conflicts as final output best = np.unravel_index(np.argmin(conf, axis=None), conf.shape) best_assignment = assignments[best[0], :, best[1]] best_conflicts = conf[best] best_conflict_ratio = best_conflicts / instance.n_clauses output = {'assignment': best_assignment, 'conflicts': best_conflicts, 'conflict_ratio': best_conflict_ratio, 'all_assignments': assignments, 'all_conflicts': conf} return output
def main(): parser = argparse.ArgumentParser() parser.add_argument('-m', '--model_dir', type=str, help='The model directory of a trained network') parser.add_argument( '-t', '--t_max', type=int, default=100, help= 'Number of iterations t_max for which RUN-CSP runs on each instance') parser.add_argument('-a', '--attempts', type=int, default=64, help='Attempts for each graph') parser.add_argument( '-d', '--data_path', default=None, help= 'A path to a training set of graphs in the NetworkX adj_list format. If left unspecified, random instances are used.' ) parser.add_argument( '-v', '--n_variables', type=int, default=400, help= 'Number of variables in each training instance. Only used when --data_path is not specified.' ) parser.add_argument( '--degree', type=int, default=3, help= 'Number of clauses in each training instance. Only used when --data_path is not specified.' ) parser.add_argument( '-i', '--n_instances', type=int, default=100, help= 'Number of instances for training. Only used when --data_path is not specified.' ) parser.add_argument('-s', '--save_path', type=str, help='Path to a csv file to store results') args = parser.parse_args() language = mc_weighted_language if args.data_path is not None: print('loading graphs...') graphs = [ data_utils.load_col_graph(p) for p in tqdm(glob.glob(args.data_path)) ] names = [os.path.basename(p) for p in glob.glob(args.data_path)] instances = [ CSP_Instance.graph_to_weighted_mc_instance(g, name=name) for g, name in zip(graphs, names) ] else: print(f'Generating {args.n_instances} training instances') # instances = [CSP_Instance.generate_random(args.n_variables, args.n_clauses, language) for _ in tqdm(range(args.n_instances))] graphs = [ nx.random_regular_graph(args.degree, args.n_variables) for _ in range(args.n_instances) ] instances = [ CSP_Instance.graph_to_csp_instance(g, language, 'NEQ') for g in tqdm(graphs) ] net = RUN_CSP.load(args.model_dir) if args.save_path is None: conflicting_edges = evaluate_boosted(net, instances, args.t_max, attempts=args.attempts) else: conflicting_edges = evaluate_and_save(args.save_path, net, instances, args.t_max, attempts=args.attempts)
def main(): parser = argparse.ArgumentParser() parser.add_argument('-m', '--model_dir', type=str, help='Path to the trained RUN-CSP instance') parser.add_argument( '-t', '--t_max', type=int, default=100, help= 'Number of iterations t_max for which RUN-CSP runs on each instance') parser.add_argument('-a', '--attempts', type=int, default=64, help='Attempts for each graph') parser.add_argument( '-d', '--data_path', default=None, help= 'Path to the evaluation data. Expects a directory with graphs in dimacs format.' ) parser.add_argument( '-v', '--n_variables', type=int, default=400, help= 'Number of variables in each training instance. Only used when --data_path is not specified.' ) parser.add_argument( '-c', '--n_clauses', type=int, default=1000, help= 'Number of clauses in each training instance. Only used when --data_path is not specified.' ) parser.add_argument( '-i', '--n_instances', type=int, default=100, help= 'Number of instances for training. Only used when --data_path is not specified.' ) args = parser.parse_args() network = RUN_CSP.load(args.model_dir) language = network.language if args.data_path is not None: print('loading graphs...') names, graphs = data_utils.load_graphs(args.data_path) instances = [ CSP_Instance.graph_to_csp_instance(g, language, 'NEQ', name=n) for n, g in zip(names, graphs) ] else: print(f'Generating {args.n_instances} training instances') instances = [ CSP_Instance.generate_random(args.n_variables, args.n_clauses, language) for _ in tqdm(range(args.n_instances)) ] conflicting_edges = evaluate_boosted(network, instances, args.t_max, attempts=args.attempts)
def main(): parser = argparse.ArgumentParser() parser.add_argument( '-l', '--language_config_path', type=str, help='The path to a json file that specifies the constraint language') parser.add_argument( '-m', '--model_dir', type=str, help= 'Path to the model directory where the trained RUN-CSP instance will be stored' ) parser.add_argument('-v', '--n_variables', type=int, default=100, help='Number of variables in each training instance.') parser.add_argument( '--c_min', type=int, default=100, help='Minimum number of clauses in each training instance.') parser.add_argument( '--c_max', type=int, default=600, help='Maximum number of clauses in each training instance.') parser.add_argument('-i', '--n_instances', type=int, default=4000, help='Number of instances for training.') parser.add_argument( '-t', '--t_max', type=int, default=30, help= 'Number of iterations t_max for which RUN-CSP runs on each instance') parser.add_argument('-s', '--state_size', type=int, default=128, help='Size of the variable states in RUN-CSP') parser.add_argument('-b', '--batch_size', type=int, default=10, help='Batch size used during training') parser.add_argument('-e', '--epochs', type=int, default=25, help='Number of training epochs') args = parser.parse_args() print(f'Loading constraint language from {args.language_config_path}') language = Constraint_Language.load(args.language_config_path) # create RUN_CSP instance for given constraint language network = RUN_CSP(args.model_dir, language, args.state_size) print(f'Generating {args.n_instances} training instances') train_instances = [ CSP_Instance.generate_random(args.n_variables, np.random.randint(args.c_min, args.c_max), language) for _ in tqdm(range(args.n_instances)) ] # combine instances into batches train_batches = CSP_Instance.batch_instances(train_instances, args.batch_size) # train and store the network train(network, train_batches, args.t_max, args.epochs)