Exemplo n.º 1
0
def makeTabularTrainer(**config):
    cfg = {'dataset':HEPMASS,'network':SmallNN,'net_config': {},
        'loader_config': {'amnt_labeled':20+5000,'amnt_dev':5000,'lab_BS':20},
        'opt_config': {'lr':1e-4},#{'lr':.1, 'momentum':.9, 'weight_decay':1e-4, 'nesterov':True},
        'num_epochs':200,
        'unlab_loader_config':{'batch_size':2000,'num_workers':4,'pin_memory':True},
        'trainer_config':{'log_dir':os.path.expanduser('~/tb-experiments/UCI/'),'log_args':{'minPeriod':.1, 'timeFrac':3/10}},
        }
    recursively_update(cfg,config)
    
    trainset = cfg['dataset'](train=True)
    testset = cfg['dataset'](train=False)
    print(f"Trainset: {len(trainset)}, Testset: {len(testset)}")
    device = torch.device('cuda')
    model = cfg['network'](num_classes=trainset.num_classes,dim_in=trainset.dim,**cfg['net_config']).to(device)
    dataloaders = {}
    dataloaders['lab'], dataloaders['dev'] = getLabLoader(trainset,**cfg['loader_config'])
    dataloaders['train'] = dataloaders['Train'] = dataloaders['lab']
    
    full_data_loader = DataLoader(trainset,shuffle=True,**cfg['unlab_loader_config'])
    dataloaders['_unlab'] = imap(lambda z: z[0], full_data_loader)
    dataloaders['test'] = DataLoader(testset,batch_size=cfg['loader_config']['lab_BS'],shuffle=False)
    dataloaders = {k:LoaderTo(v,device) for k,v in dataloaders.items()}
    opt_constr = lambda params: torch.optim.Adam(params, **cfg['opt_config'])
    lr_sched = lambda e: 1.#cosLr(cfg['num_epochs'])
    return cfg['trainer'](model,dataloaders,opt_constr,lr_sched,**cfg['trainer_config'])
Exemplo n.º 2
0
def makeTrainer(*, task='h**o', device='cuda', lr=3e-3, bs=75, num_epochs=500,network=MolecLieResNet, 
                net_config={'k':1536,'nbhd':100,'act':'swish','group':lieGroups.T(3),
                'bn':True,'aug':True,'mean':True,'num_layers':6}, recenter=False,
                subsample=False, trainer_config={'log_dir':None,'log_suffix':''}):#,'log_args':{'timeFrac':1/4,'minPeriod':0}}):
    # Create Training set and model
    device = torch.device(device)
    with FixedNumpySeed(0):
        datasets, num_species, charge_scale = QM9datasets()
        if subsample: datasets.update(split_dataset(datasets['train'],{'train':subsample}))
    ds_stats = datasets['train'].stats[task]
    if recenter:
        m = datasets['train'].data['charges']>0
        pos = datasets['train'].data['positions'][m]
        mean,std = pos.mean(dim=0),1#pos.std()
        for ds in datasets.values():
            ds.data['positions'] = (ds.data['positions']-mean[None,None,:])/std
    model = network(num_species,charge_scale,**net_config).to(device)
    # Create train and Val(Test) dataloaders and move elems to gpu
    dataloaders = {key:LoaderTo(DataLoader(dataset,batch_size=bs,num_workers=0,
                    shuffle=(key=='train'),pin_memory=False,collate_fn=collate_fn,drop_last=True),
                    device) for key,dataset in datasets.items()}
    # subsampled training dataloader for faster logging of training performance
    dataloaders['Train'] = islice(dataloaders['train'],len(dataloaders['test']))#islice(dataloaders['train'],len(dataloaders['train'])//10)
    
    # Initialize optimizer and learning rate schedule
    opt_constr = functools.partial(Adam, lr=lr)
    cos = cosLr(num_epochs)
    lr_sched = lambda e: min(e / (.01 * num_epochs), 1) * cos(e)
    return MoleculeTrainer(model,dataloaders,opt_constr,lr_sched,
                            task=task,ds_stats=ds_stats,**trainer_config)
def makeTrainer(*,
                network=CHNN,
                net_cfg={},
                lr=3e-3,
                n_train=800,
                regen=False,
                dataset=RigidBodyDataset,
                body=ChainPendulum(3),
                C=5,
                dtype=torch.float32,
                device=torch.device("cuda"),
                bs=200,
                num_epochs=100,
                trainer_config={},
                opt_cfg={'weight_decay': 1e-5}):
    # Create Training set and model
    angular = not issubclass(network, (CH, CL))
    splits = {"train": n_train, "test": 200}
    with FixedNumpySeed(0):
        dataset = dataset(n_systems=n_train + 200,
                          regen=regen,
                          chunk_len=C,
                          body=body,
                          angular_coords=angular)
        datasets = split_dataset(dataset, splits)

    dof_ndim = dataset.body.D if angular else dataset.body.d
    model = network(dataset.body.body_graph,
                    dof_ndim=dof_ndim,
                    angular_dims=dataset.body.angular_dims,
                    **net_cfg)
    model = model.to(device=device, dtype=dtype)
    # Create train and Dev(Test) dataloaders and move elems to gpu
    dataloaders = {
        k: LoaderTo(DataLoader(v,
                               batch_size=min(bs, splits[k]),
                               num_workers=0,
                               shuffle=(k == "train")),
                    device=device,
                    dtype=dtype)
        for k, v in datasets.items()
    }
    dataloaders["Train"] = dataloaders["train"]
    # Initialize optimizer and learning rate schedule
    opt_constr = lambda params: AdamW(params, lr=lr, **opt_cfg)
    lr_sched = cosLr(num_epochs)
    return IntegratedDynamicsTrainer(model,
                                     dataloaders,
                                     opt_constr,
                                     lr_sched,
                                     log_args={
                                         "timeFrac": 1 / 4,
                                         "minPeriod": 0.0
                                     },
                                     **trainer_config)
Exemplo n.º 4
0
 def makeTrainer(config):
     cfg = {
         'dataset': CIFAR10,
         'network': iCNN,
         'net_config': {},
         'loader_config': {
             'amnt_dev': 5000,
             'lab_BS': 32,
             'pin_memory': True,
             'num_workers': 3
         },
         'opt_config': {
             'lr': .0003,
         },  # 'momentum':.9, 'weight_decay':1e-4,'nesterov':True},
         'num_epochs': 100,
         'trainer_config': {},
         'parallel': False,
     }
     recursively_update(cfg, config)
     train_transforms = transforms.Compose(
         [transforms.ToTensor(),
          transforms.RandomHorizontalFlip()])
     trainset = cfg['dataset'](
         '~/datasets/{}/'.format(cfg['dataset']),
         flow=True,
     )
     device = torch.device('cuda')
     fullCNN = cfg['network'](num_classes=trainset.num_classes,
                              **cfg['net_config']).to(device)
     if cfg['parallel']: fullCNN = multigpu_parallelize(fullCNN, cfg)
     dataloaders = {}
     dataloaders['train'], dataloaders['dev'] = getLabLoader(
         trainset, **cfg['loader_config'])
     dataloaders['Train'] = islice(dataloaders['train'],
                                   10000 // cfg['loader_config']['lab_BS'])
     if len(dataloaders['dev']) == 0:
         testset = cfg['dataset']('~/datasets/{}/'.format(cfg['dataset']),
                                  train=False,
                                  flow=True)
         dataloaders['test'] = DataLoader(
             testset,
             batch_size=cfg['loader_config']['lab_BS'],
             shuffle=False)
     dataloaders = {k: LoaderTo(v, device)
                    for k, v in dataloaders.items()}  #LoaderTo(v,device)
     opt_constr = lambda params: torch.optim.Adam(params, **cfg['opt_config'
                                                                ])
     lr_sched = cosLr(cfg['num_epochs'])
     return Flow(fullCNN, dataloaders, opt_constr, lr_sched,
                 **cfg['trainer_config'])
Exemplo n.º 5
0
def makeTrainer(*,
                dataset=YAHOO,
                network=SmallNN,
                num_epochs=15,
                bs=5000,
                lr=1e-3,
                optim=AdamW,
                device='cuda',
                trainer=Classifier,
                split={
                    'train': 20,
                    'val': 5000
                },
                net_config={},
                opt_config={'weight_decay': 1e-5},
                trainer_config={
                    'log_dir': os.path.expanduser('~/tb-experiments/UCI/'),
                    'log_args': {
                        'minPeriod': .1,
                        'timeFrac': 3 / 10
                    }
                },
                save=False):

    # Prep the datasets splits, model, and dataloaders
    with FixedNumpySeed(0):
        datasets = split_dataset(dataset(), splits=split)
        datasets['_unlab'] = dmap(lambda mb: mb[0], dataset())
        datasets['test'] = dataset(train=False)
        #print(datasets['test'][0])
    device = torch.device(device)
    model = network(num_classes=datasets['train'].num_classes,
                    dim_in=datasets['train'].dim,
                    **net_config).to(device)

    dataloaders = {
        k: LoaderTo(
            DataLoader(v,
                       batch_size=min(bs, len(datasets[k])),
                       shuffle=(k == 'train'),
                       num_workers=0,
                       pin_memory=False), device)
        for k, v in datasets.items()
    }
    dataloaders['Train'] = dataloaders['train']
    opt_constr = partial(optim, lr=lr, **opt_config)
    lr_sched = cosLr(num_epochs)  #lambda e:1#
    return trainer(model, dataloaders, opt_constr, lr_sched, **trainer_config)
Exemplo n.º 6
0
def makeTrainer(*,
                dataset=MnistRotDataset,
                network=ImgLieResnet,
                num_epochs=100,
                bs=50,
                lr=3e-3,
                aug=True,
                optim=Adam,
                device='cuda',
                trainer=Classifier,
                split={'train': 12000},
                small_test=False,
                net_config={},
                opt_config={},
                trainer_config={'log_dir': None}):

    # Prep the datasets splits, model, and dataloaders
    datasets = split_dataset(dataset(f'~/datasets/{dataset}/'), splits=split)
    datasets['test'] = dataset(f'~/datasets/{dataset}/', train=False)
    device = torch.device(device)
    model = network(num_targets=datasets['train'].num_targets,
                    **net_config).to(device)
    if aug:
        model = torch.nn.Sequential(datasets['train'].default_aug_layers(),
                                    model)
    model, bs = try_multigpu_parallelize(model, bs)

    dataloaders = {
        k: LoaderTo(
            DataLoader(v,
                       batch_size=bs,
                       shuffle=(k == 'train'),
                       num_workers=0,
                       pin_memory=False), device)
        for k, v in datasets.items()
    }
    dataloaders['Train'] = islice(dataloaders['train'],
                                  1 + len(dataloaders['train']) // 10)
    if small_test:
        dataloaders['test'] = islice(dataloaders['test'],
                                     1 + len(dataloaders['train']) // 10)
    # Add some extra defaults if SGD is chosen
    opt_constr = partial(optim, lr=lr, **opt_config)
    lr_sched = cosLr(num_epochs)
    return trainer(model, dataloaders, opt_constr, lr_sched, **trainer_config)
def makeTrainer(*,network,net_cfg,lr=1e-2,n_train=5000,regen=False,
                dtype=torch.float32,device=torch.device('cuda'),bs=200,num_epochs=2,
                trainer_config={'log_dir':'data_scaling_study_final'}):
    # Create Training set and model
    splits = {'train':n_train,'val':min(n_train,2000),'test':2000}
    dataset = SpringDynamics(n_systems=100000, regen=regen)
    with FixedNumpySeed(0):
        datasets = split_dataset(dataset,splits)
    model = network(**net_cfg).to(device=device,dtype=dtype)
    # Create train and Dev(Test) dataloaders and move elems to gpu
    dataloaders = {k:LoaderTo(DataLoader(v,batch_size=min(bs,n_train),num_workers=0,shuffle=(k=='train')),
                                device=device,dtype=dtype) for k,v in datasets.items()}
    dataloaders['Train'] = islice(dataloaders['train'],len(dataloaders['val']))
    # Initialize optimizer and learning rate schedule
    opt_constr = lambda params: Adam(params, lr=lr)
    lr_sched = cosLr(num_epochs)
    return IntegratedDynamicsTrainer2(model,dataloaders,opt_constr,lr_sched,
                                    log_args={'timeFrac':1/4,'minPeriod':0.0},**trainer_config)
Exemplo n.º 8
0
 def makeTrainer(config):
     cfg = {
         'dataset': CIFAR10,
         'network': layer13s,
         'net_config': {},
         'loader_config': {
             'amnt_dev': 5000,
             'lab_BS': 20,
             'pin_memory': True,
             'num_workers': 2
         },
         'opt_config': {
             'lr': .3e-4
         },  #, 'momentum':.9, 'weight_decay':1e-4,'nesterov':True},
         'num_epochs': 100,
         'trainer_config': {},
     }
     recursively_update(cfg, config)
     trainset = cfg['dataset']('~/datasets/{}/'.format(cfg['dataset']),
                               flow=True)
     device = torch.device('cuda')
     fullCNN = torch.nn.Sequential(
         trainset.default_aug_layers(),
         cfg['network'](num_classes=trainset.num_classes,
                        **cfg['net_config']).to(device))
     dataloaders = {}
     dataloaders['train'], dataloaders['dev'] = getLabLoader(
         trainset, **cfg['loader_config'])
     dataloaders['Train'] = islice(dataloaders['train'],
                                   10000 // cfg['loader_config']['lab_BS'])
     if len(dataloaders['dev']) == 0:
         testset = cfg['dataset']('~/datasets/{}/'.format(cfg['dataset']),
                                  train=False)
         dataloaders['test'] = DataLoader(
             testset,
             batch_size=cfg['loader_config']['lab_BS'],
             shuffle=False)
     dataloaders = {k: LoaderTo(v, device) for k, v in dataloaders.items()}
     opt_constr = lambda params: torch.optim.Adam(params, **cfg[
         'opt_config'])  #torch.optim.SGD(params, **cfg['opt_config'])
     lr_sched = cosLr(cfg['num_epochs'])
     return iClassifier(fullCNN, dataloaders, opt_constr, lr_sched,
                        **cfg['trainer_config'])
Exemplo n.º 9
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     device = torch.device('cuda')
     datasets, num_species, charge_scale = QM9datasets()
     dataloaders = {
         key: LoaderTo(
             DataLoader(dataset,
                        batch_size=5,
                        num_workers=0,
                        shuffle=(key == 'train'),
                        pin_memory=True,
                        collate_fn=collate_fn), device)
         for key, dataset in datasets.items()
     }
     for mb in dataloaders['train']:
         self.mb = mb
         break
     #meanstd = datasets['train'].stats['h**o']
     self.model = MolecLieResNet(num_species,
                                 charge_scale,
                                 nbhd=10,
                                 mean=True,
                                 radius=1.5,
                                 liftsamples=6).to(device)
Exemplo n.º 10
0
def make_trainer(
        chunk_len: int,
        angular: Union[Tuple, bool],
        body,
        bs: int,
        dataset,
        dt: float,
        lr: float,
        n_train: int,
        n_val: int,
        n_test: int,
        net_cfg: dict,
        network,
        num_epochs: int,
        regen: bool,
        seed: int = 0,
        device=torch.device("cuda"),
        dtype=torch.float32,
        trainer_config={},
):
    # Create Training set and model
    splits = {"train": n_train, "val": n_val, "test": n_test}
    dataset = dataset(
        n_systems=n_train + n_val + n_test,
        regen=regen,
        chunk_len=chunk_len,
        body=body,
        dt=dt,
        integration_time=10,
        angular_coords=angular,
    )
    # dataset=CartpoleDataset(batch_size=500,regen=regen)
    with FixedNumpySeed(seed):
        datasets = split_dataset(dataset, splits)
    model = network(G=dataset.body.body_graph, **net_cfg).to(device=device,
                                                             dtype=dtype)

    # Create train and Dev(Test) dataloaders and move elems to gpu
    dataloaders = {
        k: LoaderTo(
            DataLoader(v,
                       batch_size=min(bs, splits[k]),
                       num_workers=0,
                       shuffle=(k == "train")),
            device=device,
            dtype=dtype,
        )
        for k, v in datasets.items()
    }
    dataloaders["Train"] = dataloaders["train"]
    # Initialize optimizer and learning rate schedule
    opt_constr = lambda params: Adam(params, lr=lr)
    lr_sched = cosLr(num_epochs)
    return IntegratedDynamicsTrainer(model,
                                     dataloaders,
                                     opt_constr,
                                     lr_sched,
                                     log_args={
                                         "timeFrac": 1 / 4,
                                         "minPeriod": 0.0
                                     },
                                     **trainer_config)
Exemplo n.º 11
0
        avg_logits = logits2average_depth(filtered_logits,labels[None,:,None,None])
        return avg_logits

if __name__=='__main__':
    eps_start=1e-3
    r = 5
    ds=16
    niters=1
    device = torch.device('cuda')
    model = CRFdepthUpsampler(r=r,eps=eps_start,niters=niters).to(device)
    trainset= StereoUpsampling05(downsize=ds,val=False,use_vgg=False)
    valset = StereoUpsampling05(downsize=ds,val=True,use_vgg=False)
    train_loader = DataLoader(trainset,batch_size=1,shuffle=True)
    val_loader = DataLoader(valset,batch_size=1,shuffle=True)
    dataloaders = {'train':train_loader,'train_':train_loader,'val':val_loader}
    dataloaders = {k:LoaderTo(v,device) for k,v in dataloaders.items()}
    opt_constr = lambda params: torch.optim.Adam(params, lr=3e-3,betas=(.9,.9))
    trialname = 'upsampling/r_{}_{}_niters{}'.format(r,ds,niters)
    trainer = Dupsampling(model,dataloaders,opt_constr,log_suffix=trialname,log_args={'minPeriod':.1})
    trainer.train(100)

# if __name__=='__main__':
#     device = torch.device('cuda')
#     eps_start=1e-2
#     r = 50
#     model = CRFdepthRefiner(r=r,eps=eps_start).to(device)
#     trainset=MBStereo14Unary(downsize=8)
#     train_loader = DataLoader(trainset,batch_size=1,shuffle=True)
#     dataloaders = {'train':train_loader,'train_':train_loader}
#     dataloaders = {k:LoaderTo(v,device) for k,v in dataloaders.items()}
#     opt_constr = lambda params: torch.optim.Adam(params, lr=3e-3,betas=(.9,.9))
Exemplo n.º 12
0
def make_trainer(
        train_data,
        test_data,
        bs=5000,
        split={
            'train': 200,
            'val': 5000
        },
        network=RealNVPTabularWPrior,
        net_config={},
        num_epochs=15,
        optim=AdamW,
        lr=1e-3,
        opt_config={'weight_decay': 1e-5},
        swag=False,
        swa_config={
            'swa_dec_pct': .5,
            'swa_start_pct': .75,
            'swa_freq_pct': .05,
            'swa_lr_factor': .1
        },
        swag_config={
            'subspace': 'covariance',
            'max_num_models': 20
        },
        #                 subspace='covariance', max_num_models=20,
        trainer=SemiFlow,
        trainer_config={
            'log_dir': os.path.expanduser('~/tb-experiments/UCI/'),
            'log_args': {
                'minPeriod': .1,
                'timeFrac': 3 / 10
            }
        },
        dev='cuda',
        save=False):
    with FixedNumpySeed(0):
        datasets = split_dataset(train_data, splits=split)
        datasets['_unlab'] = dmap(lambda mb: mb[0], train_data)
        datasets['test'] = test_data

    device = torch.device(dev)

    dataloaders = {
        k: LoaderTo(
            DataLoader(v,
                       batch_size=min(bs, len(datasets[k])),
                       shuffle=(k == 'train'),
                       num_workers=0,
                       pin_memory=False), device)
        for k, v in datasets.items()
    }
    dataloaders['Train'] = dataloaders['train']

    #     model = network(num_classes=train_data.num_classes, dim_in=train_data.dim, **net_config).to(device)
    #     swag_model = SWAG(model_cfg.base,
    #                     subspace_type=args.subspace, subspace_kwargs={'max_rank': args.max_num_models},
    #                     *model_cfg.args, num_classes=num_classes, **model_cfg.kwargs)

    #     swag_model.to(args.device)
    opt_constr = partial(optim, lr=lr, **opt_config)
    model = network(num_classes=train_data.num_classes,
                    dim_in=train_data.dim,
                    **net_config).to(device)
    if swag:
        swag_model = RealNVPTabularSWAG(dim_in=train_data.dim,
                                        **net_config,
                                        **swag_config)
        #         swag_model = SWAG(RealNVPTabular,
        #                           subspace_type=subspace, subspace_kwargs={'max_rank': max_num_models},
        #                           num_classes=train_data.num_classes, dim_in=train_data.dim,
        #                           num_coupling_layers=coupling_layers,in_dim=dim_in,**net_config)
        #         swag_model.to(device)
        #         swag_model = SWAG(RealNVPTabular, num_classes=train_data.num_classes, dim_in=train_data.dim,
        #                         swag=True, **swag_config, **net_config)
        #         model.to(device)
        swag_model.to(device)
        swa_config['steps_per_epoch'] = len(dataloaders['_unlab'])
        swa_config['num_epochs'] = num_epochs
        lr_sched = swa_learning_rate(**swa_config)
        #         lr_sched = cosLr(num_epochs)
        return trainer(model,
                       dataloaders,
                       opt_constr,
                       lr_sched,
                       swag_model=swag_model,
                       **swa_config,
                       **trainer_config)
    else:
        #         model = network(num_classes=train_data.num_classes, dim_in=train_data.dim, **net_config).to(device)
        lr_sched = cosLr(num_epochs)
        #     lr_sched = lambda e:1
        return trainer(model, dataloaders, opt_constr, lr_sched,
                       **trainer_config)