Esempio n. 1
0
def makeTrainer(*, task='h**o', device='cuda', lr=3e-3, bs=75, num_epochs=500,network=MolecLieResNet, 
                net_config={'k':1536,'nbhd':100,'act':'swish','group':lieGroups.T(3),
                'bn':True,'aug':True,'mean':True,'num_layers':6}, recenter=False,
                subsample=False, trainer_config={'log_dir':None,'log_suffix':''}):#,'log_args':{'timeFrac':1/4,'minPeriod':0}}):
    # Create Training set and model
    device = torch.device(device)
    with FixedNumpySeed(0):
        datasets, num_species, charge_scale = QM9datasets()
        if subsample: datasets.update(split_dataset(datasets['train'],{'train':subsample}))
    ds_stats = datasets['train'].stats[task]
    if recenter:
        m = datasets['train'].data['charges']>0
        pos = datasets['train'].data['positions'][m]
        mean,std = pos.mean(dim=0),1#pos.std()
        for ds in datasets.values():
            ds.data['positions'] = (ds.data['positions']-mean[None,None,:])/std
    model = network(num_species,charge_scale,**net_config).to(device)
    # Create train and Val(Test) dataloaders and move elems to gpu
    dataloaders = {key:LoaderTo(DataLoader(dataset,batch_size=bs,num_workers=0,
                    shuffle=(key=='train'),pin_memory=False,collate_fn=collate_fn,drop_last=True),
                    device) for key,dataset in datasets.items()}
    # subsampled training dataloader for faster logging of training performance
    dataloaders['Train'] = islice(dataloaders['train'],len(dataloaders['test']))#islice(dataloaders['train'],len(dataloaders['train'])//10)
    
    # Initialize optimizer and learning rate schedule
    opt_constr = functools.partial(Adam, lr=lr)
    cos = cosLr(num_epochs)
    lr_sched = lambda e: min(e / (.01 * num_epochs), 1) * cos(e)
    return MoleculeTrainer(model,dataloaders,opt_constr,lr_sched,
                            task=task,ds_stats=ds_stats,**trainer_config)
Esempio n. 2
0
def load(config):

    dataset = SpringDynamics(
        n_systems=config.n_systems,
        root_dir=config.data_path,
        space_dim=config.space_dim,
        num_particles=config.num_particles,
        chunk_len=config.chunk_len,
        load_preprocessed=config.load_preprocessed,
    )

    splits = {
        "train": config.n_train,
        "val": min(config.n_train, config.n_val),
        "test": config.n_test,
    }

    with FixedNumpySeed(config.data_seed):
        datasets = split_dataset(dataset, splits)

    dataloaders = {
        k: DataLoader(
            v,
            batch_size=min(config.batch_size, config.n_train),
            num_workers=0,
            shuffle=(k == "train"),
        )
        for k, v in datasets.items()
    }

    dataloaders["Train"] = islice(dataloaders["train"],
                                  len(dataloaders["val"]))

    return dataloaders, f"spring_dynamics"
def makeTrainer(
    *,
    dataset=Inertia,
    network=EMLP,
    num_epochs=300,
    ndata=1000 + 2000,
    seed=2021,
    aug=False,
    bs=500,
    lr=3e-3,
    device='cuda',
    split={
        'train': -1,
        'val': 1000,
        'test': 1000
    },
    net_config={
        'num_layers': 3,
        'ch': 384,
        'group': None
    },
    log_level='info',
    trainer_config={
        'log_dir': None,
        'log_args': {
            'minPeriod': .02,
            'timeFrac': .75
        },
        'early_stop_metric': 'val_MSE'
    },
    save=False,
):

    logging.getLogger().setLevel(log_levels[log_level])
    # Prep the datasets splits, model, and dataloaders
    with FixedNumpySeed(seed), FixedPytorchSeed(seed):
        base_dataset = dataset(ndata)
        datasets = split_dataset(base_dataset, splits=split)
    if net_config['group'] is None: net_config['group'] = base_dataset.symmetry
    model = network(base_dataset.rep_in, base_dataset.rep_out, **net_config)
    if aug: model = base_dataset.default_aug(model)
    model = Standardize(model, datasets['train'].stats)
    dataloaders = {
        k: LoaderTo(
            DataLoader(v,
                       batch_size=min(bs, len(v)),
                       shuffle=(k == 'train'),
                       num_workers=0,
                       pin_memory=False))
        for k, v in datasets.items()
    }
    dataloaders['Train'] = dataloaders['train']
    opt_constr = objax.optimizer.Adam
    lr_sched = lambda e: lr  #*min(1,e/(num_epochs/10)) # Learning rate warmup
    return RegressorPlus(model, dataloaders, opt_constr, lr_sched,
                         **trainer_config)
def makeTrainer(*,
                network=CHNN,
                net_cfg={},
                lr=3e-3,
                n_train=800,
                regen=False,
                dataset=RigidBodyDataset,
                body=ChainPendulum(3),
                C=5,
                dtype=torch.float32,
                device=torch.device("cuda"),
                bs=200,
                num_epochs=100,
                trainer_config={},
                opt_cfg={'weight_decay': 1e-5}):
    # Create Training set and model
    angular = not issubclass(network, (CH, CL))
    splits = {"train": n_train, "test": 200}
    with FixedNumpySeed(0):
        dataset = dataset(n_systems=n_train + 200,
                          regen=regen,
                          chunk_len=C,
                          body=body,
                          angular_coords=angular)
        datasets = split_dataset(dataset, splits)

    dof_ndim = dataset.body.D if angular else dataset.body.d
    model = network(dataset.body.body_graph,
                    dof_ndim=dof_ndim,
                    angular_dims=dataset.body.angular_dims,
                    **net_cfg)
    model = model.to(device=device, dtype=dtype)
    # Create train and Dev(Test) dataloaders and move elems to gpu
    dataloaders = {
        k: LoaderTo(DataLoader(v,
                               batch_size=min(bs, splits[k]),
                               num_workers=0,
                               shuffle=(k == "train")),
                    device=device,
                    dtype=dtype)
        for k, v in datasets.items()
    }
    dataloaders["Train"] = dataloaders["train"]
    # Initialize optimizer and learning rate schedule
    opt_constr = lambda params: AdamW(params, lr=lr, **opt_cfg)
    lr_sched = cosLr(num_epochs)
    return IntegratedDynamicsTrainer(model,
                                     dataloaders,
                                     opt_constr,
                                     lr_sched,
                                     log_args={
                                         "timeFrac": 1 / 4,
                                         "minPeriod": 0.0
                                     },
                                     **trainer_config)
def makeTrainer(*,
                network=EMLP,
                num_epochs=500,
                seed=2020,
                aug=False,
                bs=50,
                lr=1e-3,
                device='cuda',
                net_config={
                    'num_layers': 3,
                    'ch': rep,
                    'group': Cube()
                },
                log_level='info',
                trainer_config={
                    'log_dir': None,
                    'log_args': {
                        'minPeriod': .02,
                        'timeFrac': 50
                    }
                },
                save=False):
    levels = {
        'critical': logging.CRITICAL,
        'error': logging.ERROR,
        'warn': logging.WARNING,
        'warning': logging.WARNING,
        'info': logging.INFO,
        'debug': logging.DEBUG
    }
    logging.getLogger().setLevel(levels[log_level])
    # Prep the datasets splits, model, and dataloaders
    with FixedNumpySeed(seed), FixedPytorchSeed(seed):
        datasets = {
            'train': InvertedCube(train=True),
            'test': InvertedCube(train=False)
        }
    model = Standardize(
        network(datasets['train'].rep_in, datasets['train'].rep_out,
                **net_config), datasets['train'].stats)
    dataloaders = {
        k: LoaderTo(
            DataLoader(v,
                       batch_size=min(bs, len(v)),
                       shuffle=(k == 'train'),
                       num_workers=0,
                       pin_memory=False))
        for k, v in datasets.items()
    }
    dataloaders['Train'] = dataloaders['train']
    opt_constr = objax.optimizer.Adam
    lr_sched = lambda e: lr * cosLr(num_epochs)(e)
    return ClassifierPlus(model, dataloaders, opt_constr, lr_sched,
                          **trainer_config)
Esempio n. 6
0
def makeTrainer(
    *,
    dataset=DoubleSpringPendulum,
    network=EMLPode,
    num_epochs=2000,
    ndata=5000,
    seed=2021,
    aug=False,
    bs=500,
    lr=3e-3,
    device='cuda',
    split={
        'train': 500,
        'val': .1,
        'test': .1
    },
    net_config={
        'num_layers': 3,
        'ch': 128,
        'group': O2eR3()
    },
    log_level='warn',
    trainer_config={
        'log_dir': None,
        'log_args': {
            'minPeriod': .02,
            'timeFrac': .75
        },
    },  #'early_stop_metric':'val_MSE'},
    save=False,
):

    logging.getLogger().setLevel(levels[log_level])
    # Prep the datasets splits, model, and dataloaders
    with FixedNumpySeed(seed), FixedPytorchSeed(seed):
        base_ds = dataset(n_systems=ndata, chunk_len=5)
        datasets = split_dataset(base_ds, splits=split)
    if net_config['group'] is None: net_config['group'] = base_ds.symmetry
    model = network(base_ds.rep_in, base_ds.rep_in, **net_config)
    dataloaders = {
        k: LoaderTo(
            DataLoader(v,
                       batch_size=min(bs, len(v)),
                       shuffle=(k == 'train'),
                       num_workers=0,
                       pin_memory=False))
        for k, v in datasets.items()
    }
    dataloaders['Train'] = dataloaders['train']
    #equivariance_test(model,dataloaders['train'],net_config['group'])
    opt_constr = objax.optimizer.Adam
    lr_sched = lambda e: lr  #*cosLr(num_epochs)(e)#*min(1,e/(num_epochs/10))
    return IntegratedODETrainer(model, dataloaders, opt_constr, lr_sched,
                                **trainer_config)
Esempio n. 7
0
def get_dsmb(dsclass):
    seed = 2021
    bs = 50
    with FixedNumpySeed(seed), FixedPytorchSeed(seed):
        ds = dsclass(100)
    dataloader = DataLoader(ds,
                            batch_size=min(bs, len(ds)),
                            num_workers=0,
                            pin_memory=False)
    mb = next(iter(dataloader))
    mb = jax.device_put(mb[0].numpy()), jax.device_put(mb[1].numpy())
    return ds, mb
Esempio n. 8
0
 def __init__(self, bobs=2, m=1, l=1,k=10):
     self.body_graph = BodyGraph()#nx.Graph()
     self.arg_string = f"n{bobs}m{m or 'r'}l{l}"
     with FixedNumpySeed(0):
         ms = [.6+.8*np.random.rand() for _ in range(bobs)] if m is None else bobs*[m]
     self.ms = copy.deepcopy(ms)
     ls = bobs*[l]
     self.ks = torch.tensor((bobs-1)*[k]).float()
     self.locs = torch.zeros(bobs,3)
     self.locs[:,0] = 1*torch.arange(bobs).float()
     for i in range(bobs):
         self.body_graph.add_extended_nd(i, m=ms.pop(), d=0,tether=(self.locs[i],ls.pop()))
     self.n = bobs
     self.D = 2*self.n # Spherical coordinates, phi, theta per bob
     self.angular_dims = range(self.D)
Esempio n. 9
0
def makeTrainer(*,
                dataset=YAHOO,
                network=SmallNN,
                num_epochs=15,
                bs=5000,
                lr=1e-3,
                optim=AdamW,
                device='cuda',
                trainer=Classifier,
                split={
                    'train': 20,
                    'val': 5000
                },
                net_config={},
                opt_config={'weight_decay': 1e-5},
                trainer_config={
                    'log_dir': os.path.expanduser('~/tb-experiments/UCI/'),
                    'log_args': {
                        'minPeriod': .1,
                        'timeFrac': 3 / 10
                    }
                },
                save=False):

    # Prep the datasets splits, model, and dataloaders
    with FixedNumpySeed(0):
        datasets = split_dataset(dataset(), splits=split)
        datasets['_unlab'] = dmap(lambda mb: mb[0], dataset())
        datasets['test'] = dataset(train=False)
        #print(datasets['test'][0])
    device = torch.device(device)
    model = network(num_classes=datasets['train'].num_classes,
                    dim_in=datasets['train'].dim,
                    **net_config).to(device)

    dataloaders = {
        k: LoaderTo(
            DataLoader(v,
                       batch_size=min(bs, len(datasets[k])),
                       shuffle=(k == 'train'),
                       num_workers=0,
                       pin_memory=False), device)
        for k, v in datasets.items()
    }
    dataloaders['Train'] = dataloaders['train']
    opt_constr = partial(optim, lr=lr, **opt_config)
    lr_sched = cosLr(num_epochs)  #lambda e:1#
    return trainer(model, dataloaders, opt_constr, lr_sched, **trainer_config)
def makeTrainer(*,network,net_cfg,lr=1e-2,n_train=5000,regen=False,
                dtype=torch.float32,device=torch.device('cuda'),bs=200,num_epochs=2,
                trainer_config={'log_dir':'data_scaling_study_final'}):
    # Create Training set and model
    splits = {'train':n_train,'val':min(n_train,2000),'test':2000}
    dataset = SpringDynamics(n_systems=100000, regen=regen)
    with FixedNumpySeed(0):
        datasets = split_dataset(dataset,splits)
    model = network(**net_cfg).to(device=device,dtype=dtype)
    # Create train and Dev(Test) dataloaders and move elems to gpu
    dataloaders = {k:LoaderTo(DataLoader(v,batch_size=min(bs,n_train),num_workers=0,shuffle=(k=='train')),
                                device=device,dtype=dtype) for k,v in datasets.items()}
    dataloaders['Train'] = islice(dataloaders['train'],len(dataloaders['val']))
    # Initialize optimizer and learning rate schedule
    opt_constr = lambda params: Adam(params, lr=lr)
    lr_sched = cosLr(num_epochs)
    return IntegratedDynamicsTrainer2(model,dataloaders,opt_constr,lr_sched,
                                    log_args={'timeFrac':1/4,'minPeriod':0.0},**trainer_config)
 def __init__(self, links=2, beams=False, m=None, l=None):
     self.body_graph = BodyGraph()  #nx.Graph()
     self.arg_string = f"n{links}{'b' if beams else ''}m{m or 'r'}l{l or 'r'}"
     assert not beams, "beams temporarily not supported"
     with FixedNumpySeed(0):
         ms = [.6 + .8 * np.random.rand()
               for _ in range(links)] if m is None else links * [m]
         ls = [.6 + .8 * np.random.rand()
               for _ in range(links)] if l is None else links * [l]
     self.ms = copy.deepcopy(ms)
     self.body_graph.add_extended_nd(0,
                                     m=ms.pop(),
                                     d=0,
                                     tether=(torch.zeros(2), ls.pop()))
     for i in range(1, links):
         self.body_graph.add_extended_nd(i, m=ms.pop(), d=0)
         self.body_graph.add_edge(i - 1, i, l=ls.pop())
     self.D = self.n = links
     self.angular_dims = range(links)
 def __init__(self, mass=3, l=1, q=.3, magnets=2):
     with FixedNumpySeed(0):
         mass = np.random.rand() * .8 + 2.4 if mass is None else mass
     self.ms = [mass]
     self.arg_string = f"m{mass or 'r'}l{l}q{q}mn{magnets}"
     self.body_graph = BodyGraph()
     self.body_graph.add_extended_nd(0,
                                     m=mass,
                                     d=0,
                                     tether=(torch.zeros(3), l))
     self.q = q  # magnetic moment magnitude
     theta = torch.linspace(0, 2 * np.pi, magnets + 1)[:-1]
     self.magnet_positions = torch.stack(
         [
             0.1 * theta.cos(), 0.1 * theta.sin(),
             -(1.05) * l * torch.ones_like(theta)
         ],
         dim=-1,
     )
     self.magnet_dipoles = q * torch.stack(
         [0 * theta, 0 * theta,
          torch.ones_like(theta)], dim=-1)  # +z direction
Esempio n. 13
0
def load(config, **unused_kwargs):

    with FixedNumpySeed(config.data_seed):
        datasets, num_species, charge_scale = QM9datasets(
            os.path.join(config.data_dir, "qm9"))
        if config.subsample_trainset != 1.0:
            datasets.update(
                split_dataset(datasets["train"],
                              {"train": config.subsample_trainset}))
        if config.batch_fit != 0:
            datasets.update(
                split_dataset(datasets["train"], {"train": config.batch_fit}))
            datasets["test"] = datasets["train"]
            datasets["valid"] = datasets["train"]

    ds_stats = datasets["train"].stats[config.task]

    if config.recenter:
        m = datasets["train"].data["charges"] > 0
        pos = datasets["train"].data["positions"][m]
        mean, std = pos.mean(dim=0), pos.std()
        for ds in datasets.values():
            ds.data["positions"] = (ds.data["positions"] -
                                    mean[None, None, :]) / std

    dataloaders = {
        key: DataLoader(
            dataset,
            batch_size=config.batch_size,
            num_workers=0,
            shuffle=(key == "train"),
            pin_memory=False,
            collate_fn=collate_fn,
            drop_last=config.batch_fit == 0,
        )
        for key, dataset in datasets.items()
    }

    return dataloaders, num_species, charge_scale, ds_stats, f"QM9_{config.task}"
    opt_constr = lambda params: AdamW(params, lr=lr, **opt_cfg)
    lr_sched = cosLr(num_epochs)
    return IntegratedDynamicsTrainer(model,
                                     dataloaders,
                                     opt_constr,
                                     lr_sched,
                                     log_args={
                                         "timeFrac": 1 / 4,
                                         "minPeriod": 0.0
                                     },
                                     **trainer_config)


#Trial = train_trial(makeTrainer)
if __name__ == "__main__":
    with FixedNumpySeed(0):
        defaults = copy.deepcopy(makeTrainer.__kwdefaults__)
        defaults["save"] = False
        namespace = (datasets, systems, models)
        cfg = argupdated_config(defaults, namespace=namespace)
        cfg.pop('local_rank')
        save = cfg.pop('save')
        trainer = makeTrainer(**cfg)
        trainer.train(cfg['num_epochs'])
        if save: print(f"saved at: {trainer.save_checkpoint()}")
        rollouts = trainer.test_rollouts(
            angular_to_euclidean=not issubclass(cfg['network'], (CH, CL)))
        print(
            f"rollout error GeoMean {rollouts[0][:,1:].log().mean().exp():.3E}"
        )
        fname = f"rollout_errs_{cfg['network']}_{cfg['body']}.np"
Esempio n. 15
0
def make_trainer(
        chunk_len: int,
        angular: Union[Tuple, bool],
        body,
        bs: int,
        dataset,
        dt: float,
        lr: float,
        n_train: int,
        n_val: int,
        n_test: int,
        net_cfg: dict,
        network,
        num_epochs: int,
        regen: bool,
        seed: int = 0,
        device=torch.device("cuda"),
        dtype=torch.float32,
        trainer_config={},
):
    # Create Training set and model
    splits = {"train": n_train, "val": n_val, "test": n_test}
    dataset = dataset(
        n_systems=n_train + n_val + n_test,
        regen=regen,
        chunk_len=chunk_len,
        body=body,
        dt=dt,
        integration_time=10,
        angular_coords=angular,
    )
    # dataset=CartpoleDataset(batch_size=500,regen=regen)
    with FixedNumpySeed(seed):
        datasets = split_dataset(dataset, splits)
    model = network(G=dataset.body.body_graph, **net_cfg).to(device=device,
                                                             dtype=dtype)

    # Create train and Dev(Test) dataloaders and move elems to gpu
    dataloaders = {
        k: LoaderTo(
            DataLoader(v,
                       batch_size=min(bs, splits[k]),
                       num_workers=0,
                       shuffle=(k == "train")),
            device=device,
            dtype=dtype,
        )
        for k, v in datasets.items()
    }
    dataloaders["Train"] = dataloaders["train"]
    # Initialize optimizer and learning rate schedule
    opt_constr = lambda params: Adam(params, lr=lr)
    lr_sched = cosLr(num_epochs)
    return IntegratedDynamicsTrainer(model,
                                     dataloaders,
                                     opt_constr,
                                     lr_sched,
                                     log_args={
                                         "timeFrac": 1 / 4,
                                         "minPeriod": 0.0
                                     },
                                     **trainer_config)
def make_trainer(
        train_data,
        test_data,
        bs=5000,
        split={
            'train': 200,
            'val': 5000
        },
        network=RealNVPTabularWPrior,
        net_config={},
        num_epochs=15,
        optim=AdamW,
        lr=1e-3,
        opt_config={'weight_decay': 1e-5},
        swag=False,
        swa_config={
            'swa_dec_pct': .5,
            'swa_start_pct': .75,
            'swa_freq_pct': .05,
            'swa_lr_factor': .1
        },
        swag_config={
            'subspace': 'covariance',
            'max_num_models': 20
        },
        #                 subspace='covariance', max_num_models=20,
        trainer=SemiFlow,
        trainer_config={
            'log_dir': os.path.expanduser('~/tb-experiments/UCI/'),
            'log_args': {
                'minPeriod': .1,
                'timeFrac': 3 / 10
            }
        },
        dev='cuda',
        save=False):
    with FixedNumpySeed(0):
        datasets = split_dataset(train_data, splits=split)
        datasets['_unlab'] = dmap(lambda mb: mb[0], train_data)
        datasets['test'] = test_data

    device = torch.device(dev)

    dataloaders = {
        k: LoaderTo(
            DataLoader(v,
                       batch_size=min(bs, len(datasets[k])),
                       shuffle=(k == 'train'),
                       num_workers=0,
                       pin_memory=False), device)
        for k, v in datasets.items()
    }
    dataloaders['Train'] = dataloaders['train']

    #     model = network(num_classes=train_data.num_classes, dim_in=train_data.dim, **net_config).to(device)
    #     swag_model = SWAG(model_cfg.base,
    #                     subspace_type=args.subspace, subspace_kwargs={'max_rank': args.max_num_models},
    #                     *model_cfg.args, num_classes=num_classes, **model_cfg.kwargs)

    #     swag_model.to(args.device)
    opt_constr = partial(optim, lr=lr, **opt_config)
    model = network(num_classes=train_data.num_classes,
                    dim_in=train_data.dim,
                    **net_config).to(device)
    if swag:
        swag_model = RealNVPTabularSWAG(dim_in=train_data.dim,
                                        **net_config,
                                        **swag_config)
        #         swag_model = SWAG(RealNVPTabular,
        #                           subspace_type=subspace, subspace_kwargs={'max_rank': max_num_models},
        #                           num_classes=train_data.num_classes, dim_in=train_data.dim,
        #                           num_coupling_layers=coupling_layers,in_dim=dim_in,**net_config)
        #         swag_model.to(device)
        #         swag_model = SWAG(RealNVPTabular, num_classes=train_data.num_classes, dim_in=train_data.dim,
        #                         swag=True, **swag_config, **net_config)
        #         model.to(device)
        swag_model.to(device)
        swa_config['steps_per_epoch'] = len(dataloaders['_unlab'])
        swa_config['num_epochs'] = num_epochs
        lr_sched = swa_learning_rate(**swa_config)
        #         lr_sched = cosLr(num_epochs)
        return trainer(model,
                       dataloaders,
                       opt_constr,
                       lr_sched,
                       swag_model=swag_model,
                       **swa_config,
                       **trainer_config)
    else:
        #         model = network(num_classes=train_data.num_classes, dim_in=train_data.dim, **net_config).to(device)
        lr_sched = cosLr(num_epochs)
        #     lr_sched = lambda e:1
        return trainer(model, dataloaders, opt_constr, lr_sched,
                       **trainer_config)