Example #1
0
def simpleFlowTrial(strict=False):
    def makeTrainer(config):
        cfg = {
            'dataset': CIFAR10,
            'network': iCNN,
            'net_config': {},
            'loader_config': {
                'amnt_dev': 5000,
                'lab_BS': 32,
                'pin_memory': True,
                'num_workers': 3
            },
            'opt_config': {
                'lr': .0003,
            },  # 'momentum':.9, 'weight_decay':1e-4,'nesterov':True},
            'num_epochs': 100,
            'trainer_config': {},
            'parallel': False,
        }
        recursively_update(cfg, config)
        train_transforms = transforms.Compose(
            [transforms.ToTensor(),
             transforms.RandomHorizontalFlip()])
        trainset = cfg['dataset'](
            '~/datasets/{}/'.format(cfg['dataset']),
            flow=True,
        )
        device = torch.device('cuda')
        fullCNN = cfg['network'](num_classes=trainset.num_classes,
                                 **cfg['net_config']).to(device)
        if cfg['parallel']: fullCNN = multigpu_parallelize(fullCNN, cfg)
        dataloaders = {}
        dataloaders['train'], dataloaders['dev'] = getLabLoader(
            trainset, **cfg['loader_config'])
        dataloaders['Train'] = islice(dataloaders['train'],
                                      10000 // cfg['loader_config']['lab_BS'])
        if len(dataloaders['dev']) == 0:
            testset = cfg['dataset']('~/datasets/{}/'.format(cfg['dataset']),
                                     train=False,
                                     flow=True)
            dataloaders['test'] = DataLoader(
                testset,
                batch_size=cfg['loader_config']['lab_BS'],
                shuffle=False)
        dataloaders = {k: LoaderTo(v, device)
                       for k, v in dataloaders.items()}  #LoaderTo(v,device)
        opt_constr = lambda params: torch.optim.Adam(params, **cfg['opt_config'
                                                                   ])
        lr_sched = cosLr(cfg['num_epochs'])
        return Flow(fullCNN, dataloaders, opt_constr, lr_sched,
                    **cfg['trainer_config'])

    return train_trial(makeTrainer, strict)
Example #2
0
def simpleiClassifierTrial(strict=False):
    def makeTrainer(config):
        cfg = {
            'dataset': CIFAR10,
            'network': layer13s,
            'net_config': {},
            'loader_config': {
                'amnt_dev': 5000,
                'lab_BS': 20,
                'pin_memory': True,
                'num_workers': 2
            },
            'opt_config': {
                'lr': .3e-4
            },  #, 'momentum':.9, 'weight_decay':1e-4,'nesterov':True},
            'num_epochs': 100,
            'trainer_config': {},
        }
        recursively_update(cfg, config)
        trainset = cfg['dataset']('~/datasets/{}/'.format(cfg['dataset']),
                                  flow=True)
        device = torch.device('cuda')
        fullCNN = torch.nn.Sequential(
            trainset.default_aug_layers(),
            cfg['network'](num_classes=trainset.num_classes,
                           **cfg['net_config']).to(device))
        dataloaders = {}
        dataloaders['train'], dataloaders['dev'] = getLabLoader(
            trainset, **cfg['loader_config'])
        dataloaders['Train'] = islice(dataloaders['train'],
                                      10000 // cfg['loader_config']['lab_BS'])
        if len(dataloaders['dev']) == 0:
            testset = cfg['dataset']('~/datasets/{}/'.format(cfg['dataset']),
                                     train=False)
            dataloaders['test'] = DataLoader(
                testset,
                batch_size=cfg['loader_config']['lab_BS'],
                shuffle=False)
        dataloaders = {k: LoaderTo(v, device) for k, v in dataloaders.items()}
        opt_constr = lambda params: torch.optim.Adam(params, **cfg[
            'opt_config'])  #torch.optim.SGD(params, **cfg['opt_config'])
        lr_sched = cosLr(cfg['num_epochs'])
        return iClassifier(fullCNN, dataloaders, opt_constr, lr_sched,
                           **cfg['trainer_config'])

    return train_trial(makeTrainer, strict)
Example #3
0
    }
    # subsampled training dataloader for faster logging of training performance
    dataloaders['Train'] = islice(dataloaders['train'],
                                  len(dataloaders['train']) // 10)

    # Initialize optimizer and learning rate schedule
    opt_constr = functools.partial(Adam, lr=lr)
    cos = cosLr(num_epochs)
    lr_sched = lambda e: min(e / (.01 * num_epochs), 1) * cos(e)
    return MoleculeTrainer(model,
                           dataloaders,
                           opt_constr,
                           lr_sched,
                           task=task,
                           ds_stats=ds_stats,
                           **trainer_config)


Trial = train_trial(makeTrainer)
if __name__ == '__main__':
    defaults = copy.deepcopy(makeTrainer.__kwdefaults__)
    defaults['trainer_config']['early_stop_metric'] = 'Train_MAE'
    print(
        Trial(
            argupdated_config(defaults,
                              namespace=(moleculeTrainer, lieGroups))))

    # thestudy = Study(simpleTrial,argupdated_config(config_spec,namespace=__init__),
    #                 study_name="point2d",base_log_dir=log_dir)
    # thestudy.run(ordered=False)
Example #4
0
    device = torch.device('cuda')
    model = cfg['network'](num_classes=trainset.num_classes,dim_in=trainset.dim,**cfg['net_config']).to(device)
    dataloaders = {}
    dataloaders['lab'], dataloaders['dev'] = getLabLoader(trainset,**cfg['loader_config'])
    dataloaders['train'] = dataloaders['Train'] = dataloaders['lab']
    
    full_data_loader = DataLoader(trainset,shuffle=True,**cfg['unlab_loader_config'])
    dataloaders['_unlab'] = imap(lambda z: z[0], full_data_loader)
    dataloaders['test'] = DataLoader(testset,batch_size=cfg['loader_config']['lab_BS'],shuffle=False)
    dataloaders = {k:LoaderTo(v,device) for k,v in dataloaders.items()}
    opt_constr = lambda params: torch.optim.Adam(params, **cfg['opt_config'])
    lr_sched = lambda e: 1.#cosLr(cfg['num_epochs'])
    return cfg['trainer'](model,dataloaders,opt_constr,lr_sched,**cfg['trainer_config'])


PI_trial = train_trial(makeTabularTrainer,strict=True)

uci_pi_spec = {'network':SmallNN,'net_config': {},'dataset':[MINIBOONE,HEPMASS],
        #'loader_config': {'lab_BS':200},
        'opt_config': {'lr':[1e-3,3e-3,1e-4]},
        'loader_config': {'amnt_labeled':20+5000,'amnt_dev':5000,'lab_BS':20},
        'num_epochs':50,#100,#100,#5,#800,
        #'unlab_loader_config':{'batch_size':2000},
        'net_config':{'k':[256,512]},'trainer':PiModel,
        'trainer_config':{'log_dir':os.path.expanduser('~/tb-experiments/UCI/t3layer_pi_uci3/'),
        'cons_weight':[20,30,50]}#[1,.1,.3,3],}#'advEps':[10,3,1,.3]}
        }
uci_pi_spec2 = {'network':SmallNN,'net_config': {},'dataset':[MINIBOONE],
        #'loader_config': {'lab_BS':200},
        'opt_config': {'lr':lambda cfg: 3e-3 if cfg['dataset']==HEPMASS else 3e-5},
        'loader_config': {'amnt_labeled':20+5000,'amnt_dev':5000,'lab_BS':20},
Example #5
0
    model = network(num_classes=datasets['train'].num_classes,
                    dim_in=datasets['train'].dim,
                    **net_config).to(device)

    dataloaders = {
        k: LoaderTo(
            DataLoader(v,
                       batch_size=min(bs, len(datasets[k])),
                       shuffle=(k == 'train'),
                       num_workers=0,
                       pin_memory=False), device)
        for k, v in datasets.items()
    }
    dataloaders['Train'] = dataloaders['train']
    opt_constr = partial(optim, lr=lr, **opt_config)
    lr_sched = cosLr(num_epochs)  #lambda e:1#
    return trainer(model, dataloaders, opt_constr, lr_sched, **trainer_config)


tabularTrial = train_trial(makeTrainer)

if __name__ == '__main__':
    defaults = copy.deepcopy(makeTrainer.__kwdefaults__)
    cfg = argupdated_config(defaults,
                            namespace=(tabular_datasets, flows, archs,
                                       trainers))
    cfg.pop('local_rank')
    trainer = makeTrainer(**cfg)
    #tabularTrial()
    trainer.train(cfg['num_epochs'])