def main(force_test=False): mp.set_start_method("spawn", force=True) parser = (argparse.ArgumentParser(description='Order embedding arguments') if not HYPERPARAM_SEARCH else HyperOptArgumentParser( strategy='grid_search')) utils.parse_optimizer(parser) parse_encoder(parser) args = parser.parse_args() print(args) args.n_workers = 1 if force_test: args.test = True # Currently due to parallelism in multi-gpu training, this code performs # sequential hyperparameter tuning. # All gpus are used for every run of training in hyperparameter search. if HYPERPARAM_SEARCH: for i, hparam_trial in enumerate( args.trials(HYPERPARAM_SEARCH_N_TRIALS)): print("Running hyperparameter search trial", i) print(hparam_trial) train_loop(hparam_trial) else: train_loop(args)
def main(): if not os.path.exists("plots/"): os.makedirs("plots/") if not os.path.exists("results/"): os.makedirs("results/") parser = argparse.ArgumentParser(description='Alignment arguments') utils.parse_optimizer(parser) parse_encoder(parser) parser.add_argument('--query_path', type=str, help='path of query graph', default="") parser.add_argument('--target_path', type=str, help='path of target graph', default="") args = parser.parse_args() args.test = True if args.query_path: with open(args.query_path, "rb") as f: query = pickle.load(f) else: query = nx.gnp_random_graph(8, 0.25) if args.target_path: with open(args.target_path, "rb") as f: target = pickle.load(f) else: target = nx.gnp_random_graph(16, 0.25) model = build_model(args) mat = gen_alignment_matrix(model, query, target, method_type=args.method_type) np.save("results/alignment.npy", mat) print("Saved alignment matrix in results/alignment.npy") plt.imshow(mat, interpolation="nearest") plt.savefig("plots/alignment.png") print("Saved alignment matrix plot in plots/alignment.png")
import pickle import torch.multiprocessing as mp from sklearn.decomposition import PCA from common import data from common import models from common import utils from subgraph_matching.config import parse_encoder # Now we load the model and a dataset to analyze embeddings on, here ENZYMES. from subgraph_matching.train import make_data_source parser = argparse.ArgumentParser() utils.parse_optimizer(parser) parse_encoder(parser) args = parser.parse_args("") args.model_path = os.path.join("..", args.model_path) print("Using dataset {}".format(args.dataset)) model = models.OrderEmbedder(1, args.hidden_dim, args) model.to(utils.get_device()) model.eval() model.load_state_dict( torch.load(args.model_path, map_location=utils.get_device())) train, test, task = data.load_dataset("wn18") from collections import Counter