示例#1
0
    def __init__(self, input_size, hidden_size, solver_type="dopri5"):
        super(OdeLstmCell, self).__init__()
        self.solver_type = solver_type
        self.fixed_step_solver = solver_type.startswith("fixed_")
        self.lstm = nn.LSTMCell(input_size, hidden_size)

        self.f_node = nn.Sequential(
            nn.Linear(hidden_size, hidden_size),
            nn.Tanh(),
            nn.Linear(hidden_size, hidden_size),
        )
        self.input_size = input_size
        self.hidden_size = hidden_size
        if not self.fixed_step_solver:
            self.node = NeuralODE(self.f_node, solver=solver_type)
        else:
            options = {
                "fixed_euler": self.euler,
                "fixed_heun": self.heun,
                "fixed_rk4": self.rk4,
            }
            if solver_type not in options.keys():
                raise ValueError(
                    "Unknown solver type '{:}'".format(solver_type))
            self.node = options[self.solver_type]
示例#2
0
def test_integral_adjoint_integral_autograd(testintloss):
    """Compare ODE Adjoint vs Autograd gradients (with integral loss), s := [0, 1], adaptive-step"""
    f = nn.Sequential(nn.Linear(2, 64), nn.Tanh(), nn.Linear(64, 2))
    aug = Augmenter(1, 1)
    torch.manual_seed(0)
    model_autograd = NeuralODE(f,
                               solver='dopri5',
                               sensitivity='autograd',
                               atol=1e-5,
                               rtol=1e-5,
                               intloss=testintloss()).to(device)
    torch.manual_seed(0)
    model_adjoint = NeuralODE(f,
                              solver='dopri5',
                              sensitivity='adjoint',
                              atol=1e-5,
                              rtol=1e-5,
                              intloss=testintloss()).to(device)

    torch.manual_seed(0)
    x = torch.randn(batch_size, 2).to(device)
    x = x.requires_grad_(True)
    a = model_autograd(aug(x))
    loss = a[:, 0].sum()
    loss.backward()
    g_autograd = deepcopy(x.grad)

    torch.manual_seed(0)
    x = torch.randn(batch_size, 2).to(device)
    x = x.requires_grad_(True)
    a = model_adjoint(x)
    loss = 0. * a.sum()
    loss.backward()
    g_adjoint = deepcopy(x.grad)
    assert torch.abs(g_autograd - g_adjoint).norm(dim=1, p=2).mean() < 5e-3
示例#3
0
def test_trajectory(moons_trainloader, small_mlp, testlearner, device):
    model = NeuralODE(small_mlp)
    learn = testlearner(model, trainloader=moons_trainloader)
    trainer = pl.Trainer(min_epochs=500, max_epochs=500)
    trainer.fit(learn)
    s_span = torch.linspace(0, 1, 100)

    x, _ = next(iter(moons_trainloader))
    trajectory = model.trajectory(x, s_span)
    assert len(trajectory) == 100
示例#4
0
def test_default_run_gallinear(moons_trainloader, testlearner, basis):
    f = nn.Sequential(nn.Linear(2, 8), nn.Tanh(), DepthCat(1),
                      GalLinear(8, 2, expfunc=basis))
    model = NeuralODE(f)
    learn = testlearner(model, trainloader=moons_trainloader)
    trainer = pl.Trainer(min_epochs=1000, max_epochs=1000)
    trainer.fit(learn)
    assert trainer.logged_metrics['train_loss'] < 1e-1
示例#5
0
def test_adjoint_autograd():
    """Compare ODE Adjoint vs Autograd gradients, s := [0, 1], adaptive-step"""
    d = ToyDataset()
    X, yn = d.generate(n_samples=512, dataset_type='moons', noise=.4)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    X_train = torch.Tensor(X).to(device)
    y_train = torch.LongTensor(yn.long()).to(device)
    train = data.TensorDataset(X_train, y_train)
    trainloader = data.DataLoader(train, batch_size=len(X), shuffle=False)
    f = nn.Sequential(nn.Linear(2, 64), nn.Tanh(), nn.Linear(64, 2))

    model = NeuralODE(f,
                      solver='dopri5',
                      sensitivity='adjoint',
                      atol=1e-5,
                      rtol=1e-8).to(device)
    x, y = next(iter(trainloader))
    # adjoint gradients
    y_hat = model(x)
    loss = nn.CrossEntropyLoss()(y_hat, y)
    loss.backward()
    adj_grad = torch.cat([p.grad.flatten() for p in model.parameters()])
    # autograd gradients
    model.zero_grad()
    model.sensitivity = 'autograd'
    y_hat = model(x)
    loss = nn.CrossEntropyLoss()(y_hat, y)
    loss.backward()
    bp_grad = torch.cat([p.grad.flatten() for p in model.parameters()])
    assert (torch.abs(bp_grad - adj_grad) <= 1e-3
            ).all(), f'Gradient error: {torch.abs(bp_grad - adj_grad).sum()}'
示例#6
0
def test_deepcopy(small_mlp, device):
    model = NeuralODE(small_mlp)
    x = torch.rand(1, 2)
    copy_before_forward = copy.deepcopy(model)
    assert type(copy_before_forward) == NeuralODE

    # do a forward+backward pass
    y = model(x)
    loss = y.sum()
    loss.backward()
    copy_after_forward = copy.deepcopy(model)
    assert type(copy_after_forward) == NeuralODE
示例#7
0
def test_stable_neural_de(testlearner):
    """Stable: basic functionality"""
    d = ToyDataset()
    X, yn = d.generate(n_samples=512, dataset_type='moons', noise=.4)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    X_train = torch.Tensor(X).to(device)
    y_train = torch.LongTensor(yn.long()).to(device)
    train = data.TensorDataset(X_train, y_train)
    trainloader = data.DataLoader(train, batch_size=len(X), shuffle=False)
    f = Stable(nn.Sequential(nn.Linear(2, 64), nn.Tanh(), nn.Linear(64, 1)))
    model = NeuralODE(f).to(device)
    learn = testlearner(model, trainloader=trainloader)
    trainer = pl.Trainer(min_epochs=10, max_epochs=30)
    trainer.fit(learn)
def test_cnf_vanilla():
    device = torch.device('cpu')
    net = nn.Sequential(
            nn.Linear(2, 512),
            nn.ELU(),
            nn.Linear(512, 2)
        )
    defunc = CNF(net)
    nde = NeuralODE(defunc, solver='dopri5', s_span=torch.linspace(0, 1, 2), atol=1e-5, rtol=1e-5, sensitivity='adjoint')
    model = nn.Sequential(Augmenter(augment_idx=1, augment_dims=1),
                          nde).to(device)
    x = torch.randn((512, 2)).to(device)
    out = model(x)
    assert out.shape[1] == x.shape[1] + 1
def test_hutch_vanilla():
    device = torch.device('cpu')
    net = nn.Sequential(
            nn.Linear(2, 512),
            nn.ELU(),
            nn.Linear(512, 2)
        )
    noise_dist = MultivariateNormal(torch.zeros(2).to(device), torch.eye(2).to(device))
    defunc = nn.Sequential(CNF(net, trace_estimator=hutch_trace, noise_dist=noise_dist))

    nde = NeuralODE(defunc, solver='dopri5', s_span=torch.linspace(0, 1, 2), atol=1e-5, rtol=1e-5, sensitivity='adjoint')
    model = nn.Sequential(Augmenter(augment_idx=1, augment_dims=1),
                          nde).to(device)
    x = torch.randn((512, 2)).to(device)
    out = model(x)
    assert out.shape[1] == x.shape[1] + 1
示例#10
0
def test_default_run(moons_trainloader, vector_field, testlearner, device):
    model = NeuralODE(vector_field)
    learn = testlearner(model, trainloader=moons_trainloader)
    trainer = pl.Trainer(min_epochs=500, max_epochs=500)
    trainer.fit(learn)
    assert trainer.logged_metrics['train_loss'] < 1e-1
示例#11
0
def test_repr(small_mlp):
    model = NeuralODE(small_mlp)
    assert type(model.__repr__()) == str and 'NFE' in model.__repr__()
示例#12
0
class OdeLstmCell(nn.Module):
    def __init__(self, input_size, hidden_size, solver_type="dopri5"):
        super(OdeLstmCell, self).__init__()
        self.solver_type = solver_type
        self.fixed_step_solver = solver_type.startswith("fixed_")
        self.lstm = nn.LSTMCell(input_size, hidden_size)

        self.f_node = nn.Sequential(
            nn.Linear(hidden_size, hidden_size),
            nn.Tanh(),
            nn.Linear(hidden_size, hidden_size),
        )
        self.input_size = input_size
        self.hidden_size = hidden_size
        if not self.fixed_step_solver:
            self.node = NeuralODE(self.f_node, solver=solver_type)
        else:
            options = {
                "fixed_euler": self.euler,
                "fixed_heun": self.heun,
                "fixed_rk4": self.rk4,
            }
            if solver_type not in options.keys():
                raise ValueError(
                    "Unknown solver type '{:}'".format(solver_type))
            self.node = options[self.solver_type]

    def forward(self, input, hx, ts):
        new_h, new_c = self.lstm(input, hx)
        if self.fixed_step_solver:
            new_h = self.solve_fixed(new_h, ts)
        else:
            indices = torch.argsort(ts)
            batch_size = ts.size(0)
            device = input.device
            s_sort = ts[indices]
            s_sort = s_sort + torch.linspace(
                0, 1e-4, batch_size, device=device)
            trajectory = self.node.trajectory(new_h, s_sort)
            new_h = trajectory[indices,
                               torch.arange(batch_size, device=device)]

        return (new_h, new_c)

    def solve_fixed(self, x, ts):
        ts = ts.view(-1, 1)
        for _ in range(3):  # 3 unfolds
            x = self.node(x, ts * (1.0 / 3))
        return x

    def euler(self, y, delta_t):
        dy = self.f_node(y)
        return y + delta_t * dy

    def heun(self, y, delta_t):
        k1 = self.f_node(y)
        k2 = self.f_node(y + delta_t * k1)
        return y + delta_t * 0.5 * (k1 + k2)

    def rk4(self, y, delta_t):
        k1 = self.f_node(y)
        k2 = self.f_node(y + k1 * delta_t * 0.5)
        k3 = self.f_node(y + k2 * delta_t * 0.5)
        k4 = self.f_node(y + k3 * delta_t)

        return y + delta_t * (k1 + 2 * k2 + 2 * k3 + k4) / 6.0