Exemple #1
0
def run_irl(world, car, reward, theta, data):
    def gen():
        for point in data:
            for c, x0, u in zip(world.cars, point['x0'], point['u']):
                c.traj.x0.set_value(x0)
                for cu, uu in zip(c.traj.u, u):
                    cu.set_value(uu)
            yield

    r = car.traj.reward(reward)
    g = utils.grad(r, car.traj.u)
    H = utils.hessian(r, car.traj.u)
    I = tt.eye(utils.shape(H)[0])
    reg = utils.vector(1)
    reg.set_value([1e-1])
    H = H - reg[0] * I
    L = tt.dot(g, tt.dot(tn.MatrixInverse()(H), g)) + tt.log(tn.Det()(-H))
    for _ in gen():
        pass
    optimizer = utils.Maximizer(L, [theta],
                                gen=gen,
                                method='gd',
                                eps=0.1,
                                debug=True,
                                iters=1000,
                                inf_ignore=10)
    optimizer.maximize()
    print theta.get_value()
Exemple #2
0
def test_hess():
    x0 = torch.tensor([-1.0, 1.0], requires_grad=True)
    fx = rosenbrock(*x0)

    auto_ddx = U.hessian(fx, x0)
    analytical_ddx = hess_rosenbrock(*x0)

    assert (torch.norm(analytical_ddx - auto_ddx) == 0)

    print("hess test success")
Exemple #3
0
def image_ridge_line(image):
    rms = rms_image(image)
    im = image.image.copy()
    im_shape = np.shape(im)
    im[im < 5. * rms] = 0
    # (2, imsize, imsize) array with gradient (in x & y -directions)
    grad = np.gradient(im, edge_order=2)
    from utils import hessian
    # (2, 2, imsize, imsize) array with second derivates
    hess = hessian(im)
    det_ar = np.zeros(im_shape)
    for i in range(im_shape[0]):
        for j in range(im_shape[1]):
            det_ar[i, j] = np.linalg.det(hess[:, :, i, j])
Exemple #4
0
def train_normal(num_epochs,
                 model,
                 dset_train,
                 batch_size,
                 grad_clip,
                 logging_freq,
                 optim="sgd",
                 **kwargs):
    train_loader = torch.utils.data.DataLoader(dset_train,
                                               batch_size=batch_size,
                                               shuffle=True)

    model.train()
    for epoch in range(num_epochs):

        epoch_loss = AverageMeter()
        for batch_idx, batch in enumerate(train_loader):
            x, y = batch
            w_optimizer.zero_grad()

            y_pred = model(x)
            loss = criterion(y_pred, y)
            loss.backward(retain_graph=True)

            epoch_loss.update(loss.item())
            if optim == "newton":
                linear_weight = list(model.weight_params())[0]
                hessian_newton = torch.inverse(
                    hessian(loss * 1, linear_weight,
                            linear_weight).reshape(linear_weight.size()[1],
                                                   linear_weight.size()[1]))
                with torch.no_grad():
                    for w in model.weight_params():
                        w = w.subtract_(torch.matmul(w.grad, hessian_newton))
            elif optim == "sgd":
                torch.nn.utils.clip_grad_norm_(model.weight_params(), 1)
                w_optimizer.step()
            else:
                raise NotImplementedError

            wandb.log({
                "Train loss": epoch_loss.avg,
                "Epoch": epoch,
                "Batch": batch_idx
            })

            if batch_idx % logging_freq == 0:
                print("Epoch: {}, Batch: {}, Loss: {}, Alphas: {}".format(
                    epoch, batch_idx, epoch_loss.avg, model.fc1.alphas.data))
Exemple #5
0
    def apply_step(self, *args):
        loss_g, loss_h = args[:2]

        for x in self.params:
            g = jacobian(loss_g, x)
            h = hessian(loss_h, x)

            with torch.no_grad():
                g = g.reshape((-1, 1))
                h = h.reshape((g.shape[0], g.shape[0]))
                dx = conjugate_gradient(h,
                                        g,
                                        n_iterations=self.n_cg,
                                        tol=self.tol).reshape(x.shape)
                x.add_(dx, alpha=-self.lr)
Exemple #6
0
def run_irl(world, car, reward, theta, data):
    def gen():
        for point in data:
            for c, x0, u in zip(world.cars, point['x0'], point['u']):
                c.traj.x0.set_value(x0)
                for cu, uu in zip(c.traj.u, u):
                    cu.set_value(uu)
            yield
    r = car.traj.reward(reward)
    g = utils.grad(r, car.traj.u)
    H = utils.hessian(r, car.traj.u)
    I = tt.eye(utils.shape(H)[0])
    reg = utils.vector(1)
    reg.set_value([1e-1])
    H = H-reg[0]*I
    L = tt.dot(g, tt.dot(tn.MatrixInverse()(H), g))+tt.log(tn.Det()(-H))
    for _ in gen():
        pass
    optimizer = utils.Maximizer(L, [theta], gen=gen, method='gd', eps=0.1, debug=True, iters=1000, inf_ignore=10)
    optimizer.maximize()
    print theta.get_value()