Esempio n. 1
0
def log_kv_or_listkv(dic, prefix):
    for k, v in dic.items():
        if isinstance(v, (list, tuple)):
            for i, val in enumerate(v):
                if isinstance(val, (float, int)):
                    logger.logkv("{}/{}/{}".format(prefix, k, i), val)
        elif isinstance(v, (float, int)):
            logger.logkv("{}/{}".format(prefix, k), v)
Esempio n. 2
0
    def ecb(epoch):

        params = list(system.parameters())
        vparams = parameters_to_vector(params)

        error = (vparams - true_params).norm().item()

        logger.logkv('test/log10_paramerror', np.log10(error))

        return
Esempio n. 3
0
File: ceemnl.py Progetto: sisl/CEEM
 def ecb(epoch):
     torch.save(sys.state_dict(),
                os.path.join(logger.get_dir(), 'ckpts', 'model_{}.th'.format(epoch)))
     y_pred = gen_ypred_model(sys, val_u, val_y)
     rms = compute_rms(val_y[:, 25:], y_pred[:, 25:], y_std)
     val_rmse = float(rms.mean())
     logger.logkv('test/val_rmse', val_rmse)
     if val_rmse < tracker.best_val_rmse:
         tracker.best_val_rmse = val_rmse
         torch.save(sys.state_dict(), os.path.join(logger.get_dir(), 'ckpts', 'best_model.th'))
     return
Esempio n. 4
0
    def tcb(epoch):

        with torch.no_grad():
            tgt_test_pr = system.step_derivs(t_test, x_test)
            error = float(torch.nn.functional.mse_loss(tgt_test_pr, tgt_test))

        logger.logkv('test/log10_error', np.log10(error))

        last_10_errors._arr.append(np.log10(error))

        if len(last_10_errors._arr) > 100:
            last_10_errors._arr = last_10_errors._arr[-100:]

            l10err = torch.tensor(last_10_errors._arr)

            convcrit = float((l10err.min() - l10err.max()).abs())
            logger.logkv('test/log10_convcrit', np.log10(convcrit))
            if convcrit < 1e-3:
                return True

        return False
Esempio n. 5
0
    def tcb(epoch):

        params = list(system.parameters())
        vparams = parameters_to_vector(params)

        error = (vparams - true_params).norm().item()

        last_10_errors._arr.append(float(error))

        logger.logkv('test/log10_error', np.log10(error))

        if len(last_10_errors._arr) > 10:
            last_10_errors._arr = last_10_errors._arr[-10:]

            l10err = torch.tensor(last_10_errors._arr)

            convcrit = float((l10err.min() - l10err.max()).abs())
            logger.logkv('test/log10_convcrit', np.log10(convcrit))
            if convcrit < 1e-4:
                return True

        return False
Esempio n. 6
0
    def ecb(k):

        logger.logkv('train/epoch', k)
        logger.logkv('train/elapsed_time', time() - start_time)

        # update lr
        learner_opt_kwargs['lr'] = lr_sched(k)

        logger.logkv('train/lr', lr_sched(k))

        torch.save(system.state_dict(),
                   os.path.join(logger.get_dir(), 'ckpts', 'best_model.th'))
        torch.save(xsm, os.path.join(logger.get_dir(), 'ckpts', 'best_xsm.th'))

        logger.logkv('train/mu', float(system.logmu.exp()))
Esempio n. 7
0
    def train(self, params, callbacks=[]):

        vparams0 = parameters_to_vector(params).clone()

        xsms = []

        t_start = timeit.default_timer()

        for k in range(self._max_k):

            with utils.Timer() as time:
                ## E-step
                xfilt, xfiltr, wfilt, meanll = self._fapf.filter(self._y)
                xsm = self._fapf.FFBSi(xfilt, wfilt)
                xsms.append(xsm)
                if self._xlen_cutoff:
                    if len(xsms) > self._xlen_cutoff:
                        xsms = xsms[-self._xlen_cutoff:]

            logger.logkv('train/Etime', time.dt)

            ## M-step

            with utils.Timer() as time:
                obj = lambda: -self.recursive_Q(xsms, self._y, 0, 0.)

                self._optimizer(obj, params)

            logger.logkv('train/Mtime', time.dt)

            logger.logkv('train/elapsedtime', timeit.default_timer() - t_start)

            ## log the current value of Q

            Q = float(self._fapf.Q_MCEM(self._y, xsms[-1]))
            logger.logkv('train/Q', Q)

            for callback in callbacks:
                callback(k)

            logger.dumpkvs()

        return params
Esempio n. 8
0
    def ecb(epoch):

        logger.logkv('time/epoch', epoch)

        params = list(system.parameters())
        vparams = parameters_to_vector(params)

        error = (vparams - true_params).norm().item()

        logger.logkv('test/log10_paramerror', np.log10(error))

        logger.logkv('time/epochtime',
                     timeit.default_timer() - timer['start_time'])

        timer['start_time'] = timeit.default_timer()

        with torch.no_grad():
            tgt_test_pr = system.step_derivs(t_test, x_test)
            error = float(torch.nn.functional.mse_loss(tgt_test_pr, tgt_test))

        logger.logkv('test/log10_error', np.log10(error))

        return
Esempio n. 9
0
def train_net(net,
              train_data,
              train_trgt,
              test_data,
              test_trgt,
              valid_data,
              valid_trgt,
              y_std,
              lr,
              H,
              logdir,
              n_epochs=1000):

    T = train_data.shape[1]

    opt = torch.optim.Adam(net.parameters(), lr=lr)
    scheduler_off = 1000.
    scheduler = torch.optim.lr_scheduler.LambdaLR(
        opt, lambda epoch: scheduler_off / (scheduler_off + epoch))

    best_val_loss = np.inf

    t0 = time.time()
    for e in range(n_epochs):
        logger.logkv('epoch', e)
        # train
        ll = []
        coord_error = 0
        for t in range(T - H + 1):
            opt.zero_grad()

            u = train_data[:, t:t + H]
            y = train_trgt[:, t + H - 1]
            y_pred = net(u)
            assert y.size() == y_pred.size()
            loss = compute_rms(y.unsqueeze(0), y_pred.unsqueeze(0), y_std)
            loss.backward()
            opt.step()
            ll.append(float(loss))
        mean_train_loss = np.mean(ll)
        logger.logkv('log10_train_loss', np.log10(mean_train_loss))
        coord_error /= (T - H)

        scheduler.step()

        for param_group in opt.param_groups:
            logger.logkv('log10_lr', np.log10(param_group['lr']))

        if e % 100 == 0:
            # validation
            ll = []
            coord_error = 0
            for t in range(T - H):
                with torch.no_grad():
                    u = valid_data[:, t:t + H]
                    y = valid_trgt[:, t + H - 1]
                    y_pred = net(u)
                    loss = compute_rms(y.unsqueeze(0), y_pred.unsqueeze(0),
                                       y_std)
                    ll.append(float(loss))
            mean_val_loss = np.mean(ll)
            logger.logkv('log10_val_loss', np.log10(mean_val_loss))
            coord_error /= (T - H)

            # Test
            ll = []
            coord_error = 0
            for t in range(T - H):
                with torch.no_grad():
                    u = test_data[:, t:t + H]
                    y = test_trgt[:, t + H - 1]
                    y_pred = net(u)
                    loss = compute_rms(y.unsqueeze(0), y_pred.unsqueeze(0),
                                       y_std)
                ll.append(float(loss))
            mean_test_loss = np.mean(ll)
            logger.logkv('log10_test_loss', np.log10(mean_test_loss))

            # Save
            if mean_val_loss < best_val_loss:
                torch.save(net.state_dict(),
                           os.path.join(logdir, 'best_net.th'))
                best_val_loss = mean_val_loss

        if time.time() - t0 > 2:
            t0 = time.time()
            logger.dumpkvs()

    return net
Esempio n. 10
0
    def ecb(epoch):
        logger.logkv('test/rho', float(system._rho))
        logger.logkv('test/sigma', float(system._sigma))
        logger.logkv('test/beta', float(system._beta))

        logger.logkv('test/rho_pcterr_log10',
                     float(torch.log10((true_system._rho - system._rho).abs() / true_system._rho)))
        logger.logkv(
            'test/sigma_pcterr_log10',
            float(torch.log10((true_system._sigma - system._sigma).abs() / true_system._sigma)))
        logger.logkv(
            'test/beta_pcterr_log10',
            float(torch.log10((true_system._beta - system._beta).abs() / true_system._beta)))

        return
Esempio n. 11
0
    def ecb(epoch):
        logger.logkv('test/rho', float(system._rho))
        logger.logkv('test/sigma', float(system._sigma))
        logger.logkv('test/beta', float(system._beta))

        logger.logkv('test/rho_pcterr_log10',
                     float(torch.log10((true_system._rho - system._rho).abs() / true_system._rho)))
        logger.logkv(
            'test/sigma_pcterr_log10',
            float(torch.log10((true_system._sigma - system._sigma).abs() / true_system._sigma)))
        logger.logkv(
            'test/beta_pcterr_log10',
            float(torch.log10((true_system._beta - system._beta).abs() / true_system._beta)))


        logger.logkv('time/epochtime', timeit.default_timer() - timer['start_time'])

        timer['start_time'] = timeit.default_timer()

        return
Esempio n. 12
0
def run(seed, lr, method, noise, damped, smoketest=False):
    """
    Args:
        seed (int): seed
        lr (float): init learning rate
        method (str): training method in ['qdd', 'nqqd', 'del+mnorm', 'del+logdet']
        noise (float): amount of noise in [0.01,0.05, 0.1]
        damped (bool): use damped pendulum data
        smoketest (bool): if smoketest, runs 2 epochs
    """
    torch.set_default_dtype(torch.float64)
    dtype = torch.get_default_dtype()

    torch.manual_seed(seed)

    # load the data
    dataset = 'damped_' if damped else ''
    noisedict = {
        0.01: '0p01',
        0.05: '0p05',
        0.10: '0p10',
        0.20: '0p20',
        0.30: '0p30',
        0.40: '0p40',
        0.50: '0p50',
        1.0: '1p0'
    }
    dataset += 'dubpen_%s_smoothed.td' % noisedict[noise]
    dataset = './datasets/' + dataset

    data = torch.load('./datasets/%sdubpen_qddot.td' %
                      ('damped_' if damped else ''))
    data_ = torch.load(dataset)
    dt = 0.05
    logdir = 'data/%s_%s_%.1e_%.3f_%d' % ('damped' if damped else 'undamped',
                                          method, lr, noise, seed)
    logger.setup(logdir, action='d')

    inds = torch.randperm(16)

    Btr = 8
    Bte = 4
    Bva = 4

    train_data_ = data_[inds[:Btr]]
    test_data_ = data_[inds[Btr:Btr + Bte]]
    val_data_ = data_[inds[Btr + Bte:Btr + Bte + Bva]]
    train_data = data[inds[:Btr]]
    test_data = data[inds[Btr:Btr + Bte]]
    val_data = data[inds[Btr + Bte:Btr + Bte + Bva]]

    t_, smq, smdq, smddq = train_data_[:]
    ttest_, smqtest, smdqtest, smddqtest = test_data_[:]
    tval_, smqval, smdqval, smddqval = val_data_[:]
    t, q, dq, ddq = train_data[:]
    ttest, qtest, dqtest, ddqtest = test_data[:]
    tval, qval, dqval, ddqval = val_data[:]

    B, T, qdim = q.shape

    # create the appropriate dataloader
    if 'del' in method:
        smq_1 = smq[:, :-2]
        smq_2 = smq[:, 1:-1]
        smq_3 = smq[:, 2:]
        smq_B = torch.stack([smq_1, smq_2, smq_3], dim=2).reshape(-1, 3,
                                                                  2).detach()
        print(smq_B.shape, smq.shape)
        dataset = TensorDataset(smq_B)
        dataloader = DataLoader(dataset, batch_size=256, shuffle=True)
    elif method == 'qdd':
        dataset = TensorDataset(smq.reshape(-1, 1, 2), smdq.reshape(-1, 1, 2),
                                smddq.reshape(-1, 1, 2))
        dataloader = DataLoader(dataset, batch_size=256, shuffle=True)
    elif method == 'nqqd':
        x = torch.cat([smq, smdq], dim=-1)
        inp = x[:, :-1]
        out = x[:, 1:]
        inp = inp.reshape(-1, 1, 4)
        out = out.reshape(-1, 1, 4)
        dataset = TensorDataset(inp, out)
        dataloader = DataLoader(dataset, batch_size=256, shuffle=True)
    else:
        raise NotImplementedError

    # set up logdir and model
    if damped:
        system = ForcedSMM(qdim=qdim, dt=dt)
    else:
        system = StructuredMechanicalModel(qdim=qdim, dt=dt)

    # create the appropriate closure
    def qddcrit(system, smq_, smdq_, smddq_):

        ddq_ = system.compute_qddot(smq_, smdq_, create_graph=True)
        ddq_loss = torch.nn.functional.mse_loss(ddq_, smddq_)

        return ddq_loss

    def nqqdcrit(system, inp, out):

        out_ = system.step(torch.ones_like(inp)[..., 0], inp)
        nqqd_loss = torch.nn.functional.mse_loss(out_, out)

        return nqqd_loss

    if 'del' in method:
        dyncrit = DELCriterion(t_)

        if 'logdet' in method:
            bc = LogDetBarrierCriterion
            bcf = LogDetBarrierCriterion.mineig
        else:
            bc = MxNormBarrierCriterion
            bcf = MxNormBarrierCriterion.mmxnorm

        # initialize the barrier criterion, and find an appropriate coefficient between it and DELcrit
        lb = bcf(system, smq).detach() * 0.99  # interior point init

        barriercrit = bc(lb)
        delcrit = DELCriterion(t)
        with torch.no_grad():
            dyncritloss = dyncrit(system, smq)
            barriercritloss = barriercrit(system, smq)

            mu = float(dyncritloss /
                       barriercritloss)  # mu makes them ~equal at init

        barriercrit = bc(lb, mu=mu, x_override=smq)

        crit = GroupCriterion([dyncrit, barriercrit])
    elif method == 'qdd':
        crit = qddcrit
    elif method == 'nqqd':
        crit = nqqdcrit
    else:
        raise NotImplementedError

    # setup optimizer, scheduler
    opt = torch.optim.Adam(system.parameters(), lr=lr)
    sched = torch.optim.lr_scheduler.LambdaLR(opt, lambda k: 500. / (500 + k))

    # train
    best_val_loss = np.inf
    best_val_loss_test_qddot = np.inf

    next_params = ptv(system.parameters()).detach()

    for epoch in range(2 if smoketest else 500):

        # train with SGD
        for batch in dataloader:

            prev_params = next_params

            opt.zero_grad()

            loss = crit(system, *batch)

            loss.backward()

            opt.step()

            if 'del' in method:
                # check line search
                n_ls = 0

                while True:

                    next_params = ptv(system.parameters()).detach()

                    del_params = next_params - prev_params

                    with torch.no_grad():
                        c = crit(system, smq)

                    if torch.isnan(c):
                        next_params = prev_params + 0.5 * del_params
                        vtp(next_params, system.parameters())

                        n_ls += 1
                    else:
                        break

        sched.step()

        with torch.no_grad():
            val_sqmddqloss = qddcrit(system, smqval, smdqval, smddqval)
            train_qdd_loss = qddcrit(system, q, dq, ddq)
            test_qdd_loss = qddcrit(system, qtest, dqtest, ddqtest)
            val_qdd_loss = qddcrit(system, qval, dqval, ddqval)

        # select best model using validation error
        if val_sqmddqloss < best_val_loss:
            best_val_loss = float(val_sqmddqloss)
            best_val_loss_test_qddot_loss = float(test_qdd_loss)

            torch.save(
                system.state_dict(),
                os.path.join(logger.get_dir(), 'ckpts', 'best_model.th'))

        logger.logkv("train/epoch", epoch)
        logger.logkv("train/loss", float(loss))
        logger.logkv("train/log10lr",
                     np.log10(float(opt.param_groups[0]['lr'])))

        logger.logkv("eval/val_sqmddqloss", float(val_sqmddqloss))
        logger.logkv("eval/train_qdd_loss", float(train_qdd_loss))
        logger.logkv("eval/test_qdd_loss", float(test_qdd_loss))
        logger.logkv("eval/val_qdd_loss", float(val_qdd_loss))

        logger.logkv("eval/best_val_loss", float(best_val_loss))
        logger.logkv("eval/best_val_loss_test_qddot_loss",
                     float(best_val_loss_test_qddot_loss))

        logger.dumpkvs()