Exemplo n.º 1
0
def makeTabularTrainer(**config):
    cfg = {'dataset':HEPMASS,'network':SmallNN,'net_config': {},
        'loader_config': {'amnt_labeled':20+5000,'amnt_dev':5000,'lab_BS':20},
        'opt_config': {'lr':1e-4},#{'lr':.1, 'momentum':.9, 'weight_decay':1e-4, 'nesterov':True},
        'num_epochs':200,
        'unlab_loader_config':{'batch_size':2000,'num_workers':4,'pin_memory':True},
        'trainer_config':{'log_dir':os.path.expanduser('~/tb-experiments/UCI/'),'log_args':{'minPeriod':.1, 'timeFrac':3/10}},
        }
    recursively_update(cfg,config)
    
    trainset = cfg['dataset'](train=True)
    testset = cfg['dataset'](train=False)
    print(f"Trainset: {len(trainset)}, Testset: {len(testset)}")
    device = torch.device('cuda')
    model = cfg['network'](num_classes=trainset.num_classes,dim_in=trainset.dim,**cfg['net_config']).to(device)
    dataloaders = {}
    dataloaders['lab'], dataloaders['dev'] = getLabLoader(trainset,**cfg['loader_config'])
    dataloaders['train'] = dataloaders['Train'] = dataloaders['lab']
    
    full_data_loader = DataLoader(trainset,shuffle=True,**cfg['unlab_loader_config'])
    dataloaders['_unlab'] = imap(lambda z: z[0], full_data_loader)
    dataloaders['test'] = DataLoader(testset,batch_size=cfg['loader_config']['lab_BS'],shuffle=False)
    dataloaders = {k:LoaderTo(v,device) for k,v in dataloaders.items()}
    opt_constr = lambda params: torch.optim.Adam(params, **cfg['opt_config'])
    lr_sched = lambda e: 1.#cosLr(cfg['num_epochs'])
    return cfg['trainer'](model,dataloaders,opt_constr,lr_sched,**cfg['trainer_config'])
Exemplo n.º 2
0
def makeTrainer():
    device = torch.device('cuda')
    CNN = smallCNN(**net_config).to(device)
    fullCNN = nn.Sequential(C10augLayers(),CNN)
    trainset, testset = CIFAR10(False, '~/datasets/cifar10/')

    dataloaders = {}
    dataloaders['train'], dataloaders['dev'] = getLabLoader(trainset,**loader_config)
    dataloaders = {k: loader_to(device)(v) for k,v in dataloaders.items()}

    opt_constr = lambda params: optim.SGD(params, **opt_config)
    lr_sched = cosLr(**sched_config)
    return Classifier(fullCNN,dataloaders,opt_constr,lr_sched,**trainer_config,tracked_hypers=all_hypers)
Exemplo n.º 3
0
 def makeTrainer(config):
     cfg = {
         'dataset': CIFAR10,
         'network': iCNN,
         'net_config': {},
         'loader_config': {
             'amnt_dev': 5000,
             'lab_BS': 32,
             'pin_memory': True,
             'num_workers': 3
         },
         'opt_config': {
             'lr': .0003,
         },  # 'momentum':.9, 'weight_decay':1e-4,'nesterov':True},
         'num_epochs': 100,
         'trainer_config': {},
         'parallel': False,
     }
     recursively_update(cfg, config)
     train_transforms = transforms.Compose(
         [transforms.ToTensor(),
          transforms.RandomHorizontalFlip()])
     trainset = cfg['dataset'](
         '~/datasets/{}/'.format(cfg['dataset']),
         flow=True,
     )
     device = torch.device('cuda')
     fullCNN = cfg['network'](num_classes=trainset.num_classes,
                              **cfg['net_config']).to(device)
     if cfg['parallel']: fullCNN = multigpu_parallelize(fullCNN, cfg)
     dataloaders = {}
     dataloaders['train'], dataloaders['dev'] = getLabLoader(
         trainset, **cfg['loader_config'])
     dataloaders['Train'] = islice(dataloaders['train'],
                                   10000 // cfg['loader_config']['lab_BS'])
     if len(dataloaders['dev']) == 0:
         testset = cfg['dataset']('~/datasets/{}/'.format(cfg['dataset']),
                                  train=False,
                                  flow=True)
         dataloaders['test'] = DataLoader(
             testset,
             batch_size=cfg['loader_config']['lab_BS'],
             shuffle=False)
     dataloaders = {k: LoaderTo(v, device)
                    for k, v in dataloaders.items()}  #LoaderTo(v,device)
     opt_constr = lambda params: torch.optim.Adam(params, **cfg['opt_config'
                                                                ])
     lr_sched = cosLr(cfg['num_epochs'])
     return Flow(fullCNN, dataloaders, opt_constr, lr_sched,
                 **cfg['trainer_config'])
Exemplo n.º 4
0
 def makeTrainer(config):
     cfg = {
         'dataset': CIFAR10,
         'network': layer13s,
         'net_config': {},
         'loader_config': {
             'amnt_dev': 5000,
             'lab_BS': 20,
             'pin_memory': True,
             'num_workers': 2
         },
         'opt_config': {
             'lr': .3e-4
         },  #, 'momentum':.9, 'weight_decay':1e-4,'nesterov':True},
         'num_epochs': 100,
         'trainer_config': {},
     }
     recursively_update(cfg, config)
     trainset = cfg['dataset']('~/datasets/{}/'.format(cfg['dataset']),
                               flow=True)
     device = torch.device('cuda')
     fullCNN = torch.nn.Sequential(
         trainset.default_aug_layers(),
         cfg['network'](num_classes=trainset.num_classes,
                        **cfg['net_config']).to(device))
     dataloaders = {}
     dataloaders['train'], dataloaders['dev'] = getLabLoader(
         trainset, **cfg['loader_config'])
     dataloaders['Train'] = islice(dataloaders['train'],
                                   10000 // cfg['loader_config']['lab_BS'])
     if len(dataloaders['dev']) == 0:
         testset = cfg['dataset']('~/datasets/{}/'.format(cfg['dataset']),
                                  train=False)
         dataloaders['test'] = DataLoader(
             testset,
             batch_size=cfg['loader_config']['lab_BS'],
             shuffle=False)
     dataloaders = {k: LoaderTo(v, device) for k, v in dataloaders.items()}
     opt_constr = lambda params: torch.optim.Adam(params, **cfg[
         'opt_config'])  #torch.optim.SGD(params, **cfg['opt_config'])
     lr_sched = cosLr(cfg['num_epochs'])
     return iClassifier(fullCNN, dataloaders, opt_constr, lr_sched,
                        **cfg['trainer_config'])