Ejemplo n.º 1
0
def get_stats():
    model = create_model(to_device=False, dim_in=1, dim_out=1)
    return params_count(model)
Ejemplo n.º 2
0
                        nargs=argparse.REMAINDER)
    args = parser.parse_args()

    # Load config file
    cfg.merge_from_file(args.cfg_file)
    cfg.merge_from_list(args.opts)
    assert_cfg(cfg)

    # Set Pytorch environment
    torch.set_num_threads(cfg.num_threads)
    out_dir_parent = cfg.out_dir
    random.seed(cfg.seed)
    np.random.seed(cfg.seed)
    torch.manual_seed(cfg.seed)
    auto_select_device()

    # Set learning environment
    datasets = create_dataset()
    loaders = create_loader(datasets)
    model = create_model(datasets)
    ckpt = torch.load(args.ckpt_file)
    model.load_state_dict(ckpt['model_state'])

    for loader in loaders:
        for batch in loader:
            batch.to(torch.device(cfg.device))
            pred, true = model(batch)
            print(
                torch.argmax(torch.nn.functional.softmax(pred),
                             dim=1).tolist(), f'({true.tolist()})')
Ejemplo n.º 3
0
 dump_cfg(cfg)
 setup_printing()
 auto_select_device()
 print("using device " + str(cfg.device))
 # Set learning environment
 datasets = create_dataset()
 # create a loader for train split and for any other defined splits
 loaders = create_loader(datasets)
 # create a logger for each loader, i.e. report metrics on train/test/val splits
 meters = create_logger(datasets, loaders)
 # todo: for unsupervised case, specify dim_out explicitly since we do not have
 #   labels to infer shape from. Do this via config.
 if cfg.dataset.task_type == 'community':
     # in unsupervised case, need to specify output dimensionality explicitly
     # since we do not have labels to infer from
     model = create_model(datasets, dim_out=cfg.dataset.num_communities)
 else:
     model = create_model(datasets)
 optimizer = create_optimizer(model.parameters())
 scheduler = create_scheduler(optimizer)
 # Print model info
 logging.info(model)
 logging.info(cfg)
 cfg.params = params_count(model)
 logging.info('Num parameters: {}'.format(cfg.params))
 # Start training
 if cfg.train.mode == 'standard':
     train(meters, loaders, model, optimizer, scheduler)
 else:
     # NOTE: the import "from graphgym.contrib.train import *" is needed
     # to properly import train loop implementations; although an IDE