예제 #1
0
def run(m_idx, run_idx, params=params, list_mode='all'):
    dset = 0
    print('model number %s' % m_idx)
    print('model name %s' % gm.model_list[m_idx].__name__)
    for dl_train, dl_test in zip(train, test):
        print('dset %s;' % dset)
        t0 = time.time()
        path = os.path.join(save_path, 'run%s' % run_idx, 'model' + str(m_idx))
        pathlib.Path(os.path.join(path, 'data')).mkdir(parents=True,
                                                       exist_ok=True)
        pathlib.Path(os.path.join(path, 'models')).mkdir(parents=True,
                                                         exist_ok=True)
        for seed in seeds:
            np.random.seed(seed)
            torch.manual_seed(seed)
            model = gm.model_list[m_idx](*params)
            opt = torch.optim.Adam(model.parameters(), lr=lr)
            one_run(dset,
                    seed,
                    n_epochs,
                    model,
                    opt,
                    dl_train,
                    dl_test,
                    path,
                    cuda=False,
                    list_mode=list_mode)
        t = time.time()
        print('total running time for one ds %s seconds' % str(t - t0))
        dset += 1
예제 #2
0
def run(m_idx, run_idx):
    dset = 0
    print('model number %s' % m_idx)
    print('model name %s' % gm.model_list_double[m_idx].__name__)
    for dpath_train, dpath_val in zip(train_5, val_5):
        print('dset %s;' % dset)
        t0 = time.time()
        dl_train = load_dl(
            os.path.join(prefix, dpath_train), double=True)
        dl_val = load_dl(
            os.path.join(prefix, dpath_val), double=True)
        path = os.path.join(
            args.directory, 'run%s' % run_idx, 'model' + str(m_idx))
        pathlib.Path(
            os.path.join(path, 'data')).mkdir(parents=True, exist_ok=True)
        pathlib.Path(
            os.path.join(path, 'models')).mkdir(parents=True, exist_ok=True)
        for seed in seeds:
            np.random.seed(seed)
            torch.manual_seed(seed)
            model = gm.model_list_double[m_idx](*params)
            opt = torch.optim.Adam(model.parameters(), lr=lr)
            one_run(
                dset,
                seed,
                n_epochs,
                model,
                opt,
                dl_train,
                dl_val,
                path,
                cuda=False)
        t = time.time()
        print('total running time for one ds %s seconds' % str(t - t0))
        dset += 1
예제 #3
0
def cur_run(m_idx, run_idx, params=params):
    """
    For using a curriculum of dataloaders.
    """
    dlist = [load_dl(os.path.join(cur_prefix, p), double=True) \
        for p in train_cur]
    dl_val = load_dl(os.path.join(cur_prefix, val), double=True)
    dset = 0
    path = os.path.join(
        args.directory, 'run%s' % run_idx, 'model' + str(m_idx))
    pathlib.Path(
        os.path.join(path, 'data')).mkdir(parents=True, exist_ok=True)
    pathlib.Path(
        os.path.join(path, 'models')).mkdir(parents=True, exist_ok=True)
    for seed in seeds:
        t0 = time.time()
        np.random.seed(seed)
        torch.manual_seed(seed)
        model = gm.model_list_double[m_idx](*params)
        opt = torch.optim.Adam(model.parameters(), lr=lr)
        # res = model(g1, g2)
        one_run(
            dset,
            seed,
            n_epochs,
            model,
            opt,
            dlist,
            dl_val,
            path,
            cuda=False)
        t = time.time()
        print('total running time for one seed %s seconds' % str(t - t0))
예제 #4
0
             opt = torch.optim.Adam(model.parameters(), lr=hparams['lr'])
             mpath = op.join(path, m_str)
             pathlib.Path(op.join(mpath, 'data')).mkdir(parents=True,
                                                        exist_ok=True)
             if not preload_model:
                 pathlib.Path(op.join(mpath, 'models')).mkdir(parents=True,
                                                              exist_ok=True)
             else:
                 srcmpath = op.join(srcpath, m_str, 'models')
                 copytree(srcmpath, op.join(mpath, 'models'))
             one_run(i,
                     seed,
                     hparams['n_epochs'],
                     model,
                     opt,
                     train_dls,
                     test_dls,
                     mpath,
                     cuda=cuda,
                     n_obj=n_obj,
                     preload=preload_model)
             log(logfile,
                 'run completed, results saved in {}\n'.format(mpath))
         log(logfile,
             'training time for one seed %s\n' % (time.time() - t0))
 # close log file
 log(
     logfile,
     'finished experiment {} at {}'.format(expe_idx,
                                           str(datetime.datetime.now())))
 logfile.close()