Пример #1
0
def main():
    args = parse_arguments()
    config = get_config(args.config_file, is_test=args.test)
    np.random.seed(config.seed)
    torch.manual_seed(config.seed)
    torch.cuda.manual_seed_all(config.seed)
    config.use_gpu = config.use_gpu and torch.cuda.is_available()

    # log info
    log_file = os.path.join(config.save_dir, "log_exp_{}.txt".format(config.run_id))
    logger = setup_logging(args.log_level, log_file)
    logger.info("Writing log file to {}".format(log_file))
    logger.info("Exp instance id = {}".format(config.run_id))
    logger.info("Exp comment = {}".format(args.comment))
    logger.info("Config =")
    print(">" * 80)
    pprint(config)
    print("<" * 80)

    # Run the experiment
    try:
        runner = eval(config.runner)(config)
        if not args.test:
            runner.train()
        else:
            runner.test()
    except:
        logger.error(traceback.format_exc())

    sys.exit(0)
Пример #2
0
from utils.data_helper import create_graphs
import torch
from utils.arg_helper import get_config
from torch.utils.data import DataLoader
from random import shuffle
from torch.optim import Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau
from classifier.losses import MulticlassClassificationLoss

class Bunch:
    def __init__(self, **kwds):
        self.__dict__.update(kwds)



config = get_config('config/gran_PROTEINS.yaml', is_test='false')
config.use_gpu = config.use_gpu and torch.cuda.is_available()




def data_to_bunch(data):
    # for k in data[0].keys():
    #     l = data[0][k]

    #     if hasattr(l, 'shape'):
    #         print(k, " ", l.shape)
    #     else:
    #         print(k, " ", l)

    num_nodes = data[0]['num_nodes_gt']
Пример #3
0
    print('number of parameters : {}'.format(
        sum([np.prod(x.shape) for x in model.parameters()])))
    if not args.test:
        trained_model = train_graph_generation(args, config, train_loader,
                                               val_loader, test_loader, model)
    else:
        test_model = test(args, config, model, dataset)


if __name__ == '__main__':
    """
    Process command-line arguments, then call main()
    """

    args = parse_arguments()
    config = get_config(args.config_file, is_test=args.test)
    p_name = utils.project_name(config.dataset.name)

    if args.wandb:
        os.environ['WANDB_API_KEY'] = args.wandb_apikey
        wandb.init(project='{}'.format(p_name),
                   name='{}-{}'.format(args.namestr, args.model_name))
    ''' Fix Random Seed '''
    seed_everything(args.seed)
    # Check if settings file
    if os.path.isfile("settings.json"):
        with open('settings.json') as f:
            data = json.load(f)
        args.wandb_apikey = data.get("wandbapikey")

    main(args, config)
Пример #4
0
    )
    optimizer = optim.Adam(model.parameters(),
                           lr=config.train.lr_init,
                           betas=(0.9, 0.999),
                           eps=1e-8,
                           weight_decay=config.train.weight_decay)
    fit(model,
        optimizer,
        mc_sampler,
        train_dl,
        max_node_number=config.dataset.max_node_num,
        max_epoch=config.train.max_epoch,
        config=config,
        save_interval=config.train.save_interval,
        sample_interval=config.train.sample_interval,
        sigma_list=config.train.sigmas,
        sample_from_sigma_delta=0.0,
        test_dl=test_dl)

    sample_main(config, args)


if __name__ == "__main__":
    # torch.autograd.set_detect_anomaly(True)
    args = parse_arguments('train_ego_small.yaml')
    ori_config_dict = get_config(args)
    config_dict = edict(ori_config_dict.copy())
    process_config(config_dict)
    print(config_dict)
    train_main(config_dict, args)