Exemple #1
0
 def _get_rollout_mse(self):
     assert self.traj_data is not None
     ts, true_zs, sys_params = self.traj_data
     z0 = true_zs[:, 0]
     with Eval(self.model), torch.no_grad():
         pred_zs = self._rollout_model(z0, ts, sys_params)
     return (pred_zs - true_zs).pow(2).mean().item()
Exemple #2
0
 def logStuff(self, step, minibatch=None):
     """ Handles Logging and any additional needs for subclasses,
         should have no impact on the training """
     with Eval(self.model), torch.no_grad():
         if minibatch is not None:
             z = self.model[1].body(minibatch[0])
             reconstructed = self.model[1].body.inverse(z)
             rel_err = (torch.mean((minibatch[0]-reconstructed)**2).sqrt()\
                 /torch.mean(minibatch[0]**2).sqrt()).cpu().data.numpy()
             if rel_err < 0.03:
                 self.hypers['ld_weight'] *= .8
             elif rel_err > 0.06:
                 self.hypers['ld_weight'] *= 1.25
             self.rel_err = rel_err
             p = self.rel_err**self.hypers['kp']
             self.logger.add_scalars(
                 'info', {
                     'recons_err': rel_err,
                     'ld_weight': self.hypers['ld_weight'],
                     'p': p
                 }, step)
     # if hasattr(self.model,'sample') and minibatch is not None:
     #     with Eval(self.model):
     #         self.model.nll(minibatch[0]) # Forward through network to populate shape info
     #         with torch.no_grad():
     #             fake_images = self.model.sample(32).cpu().data
     #     img_grid = vutils.make_grid(fake_images, normalize=False,range=(0,1))
     #     self.logger.add_image('samples', img_grid, step)
     super().logStuff(step, minibatch)
Exemple #3
0
 def evalAverageMetrics(self, loader, metrics):
     num_total, loss_totals = 0, 0
     with Eval(self.model), torch.no_grad():
         for minibatch in loader:
             try:
                 mb_size = minibatch[0].shape[0]
             except AttributeError:
                 mb_size = 1
             loss_totals += mb_size * metrics(minibatch)
             num_total += mb_size
     if num_total == 0: raise KeyError("dataloader is empty")
     return loss_totals / num_total
    def test_rollouts(self, angular_to_euclidean=False, pert_eps=1e-4):
        #self.model.cpu().double()
        dataloader = self.dataloaders["test"]
        rel_errs = []
        pert_rel_errs = []
        with Eval(self.model), torch.no_grad():
            for mb in dataloader:
                z0, T = mb[0]  # assume timesteps evenly spaced for now
                #z0 = z0.cpu().double()
                T = T[0]
                body = dataloader.dataset.body
                long_T = body.dt * torch.arange(
                    10 * body.integration_time // body.dt).to(
                        z0.device, z0.dtype)
                zt_pred = self.model.integrate(z0,
                                               long_T,
                                               tol=1e-7,
                                               method='dopri5')
                bs, Nlong, *rest = zt_pred.shape
                # add conversion from angular to euclidean

                if angular_to_euclidean:
                    z0 = body.body2globalCoords(z0)
                    flat_pred = body.body2globalCoords(
                        zt_pred.reshape(bs * Nlong, *rest))
                    zt_pred = flat_pred.reshape(bs, Nlong,
                                                *flat_pred.shape[1:])
                zt = dataloader.dataset.body.integrate(z0, long_T)
                perturbation = pert_eps * torch.randn_like(
                    z0)  # perturbation does not respect constraints
                z0_perturbed = project_onto_constraints(body.body_graph,
                                                        z0 + perturbation,
                                                        tol=1e-5)  #project
                zt_pert = body.integrate(z0_perturbed, long_T)
                # (bs,T,2,n,2)
                rel_error = (
                    (zt_pred - zt)**2).sum(-1).sum(-1).sum(-1).sqrt() / (
                        (zt_pred + zt)**2).sum(-1).sum(-1).sum(-1).sqrt()
                rel_errs.append(rel_error)
                pert_rel_error = ((zt_pert - zt) ** 2).sum(-1).sum(-1).sum(-1 \
                ).sqrt() / ((zt_pert + zt) ** 2).sum(-1).sum(-1).sum(-1).sqrt()
                pert_rel_errs.append(pert_rel_error)
            rel_errs = torch.cat(rel_errs, dim=0)  # (D,T)
            pert_rel_errs = torch.cat(pert_rel_errs, dim=0)  # (D,T)
            both = (rel_errs, pert_rel_errs, zt_pred, zt_pert)
        return both
Exemple #5
0
 def logStuff(self, step, minibatch=None):
     bpd_func = lambda mb: (self.model.nll(mb).mean().cpu().data.numpy() /
                            mb.shape[-1] + np.log(256)) / np.log(2)
     acc_func = lambda mb: self.model.prior.classify(self.model(mb[
         0])).type_as(mb[1]).eq(mb[1]).cpu().data.numpy().mean()
     metrics = {}
     with Eval(self.model), torch.no_grad():
         #metrics['Train_bpd'] = self.evalAverageMetrics(self.dataloaders['unlab'],bpd_func)
         metrics['val_bpd'] = self.evalAverageMetrics(
             imap(lambda z: z[0], self.dataloaders['val']), bpd_func)
         metrics['Train_Acc'] = self.evalAverageMetrics(
             self.dataloaders['Train'], acc_func)
         metrics['val_Acc'] = self.evalAverageMetrics(
             self.dataloaders['val'], acc_func)
         metrics['test_Acc'] = self.evalAverageMetrics(
             self.dataloaders['test'], acc_func)
         if minibatch:
             metrics['Unlab_loss(mb)'] = self.model.nll(
                 minibatch[1]).mean().cpu().data.numpy()
     self.logger.add_scalars('metrics', metrics, step)
     super().logStuff(step, minibatch)
Exemple #6
0
 def metrics(self, loader):
     nll_func = lambda mb: self.loss(mb).cpu().data.numpy()
     with Eval(self.model):
         nll = self.evalAverageMetrics(loader, nll_func)
     return {'bpd': (nll + np.log(256)) / np.log(2)}
Exemple #7
0
 def getModelOutputs(self, loader, output_func):
     output = []
     with Eval(self.swag_model), torch.no_grad():
         for minibatch in loader:
             output.append(output_func(minibatch))
     return np.concatenate(output)