예제 #1
0
    def __init__(self, input_size, hidden_size):
        super().__init__()

        self.input_size = input_size
        self.hidden_size = hidden_size
        
        self.sspan = torch.linspace(0.0,1.0,2)
        
        self.ode_WHH = nodefunc(hidden_size)
        self.nodeWHH = NeuralDE(self.ode_WHH, sensitivity="adjoint", solver="dopri5", rtol=0.01, atol=0.01, s_span=self.sspan)
        self.ode_WCC = nodefunc(hidden_size)
        self.nodeWCC = NeuralDE(self.ode_WCC, sensitivity="adjoint", solver="dopri5", rtol=0.01, atol=0.01, s_span=self.sspan)
           
        self.tanh = nn.Tanh()
        self.sig = nn.Sigmoid()

        self.WMF = nn.Linear(input_size, hidden_size)
        self.WIF = nn.Linear(hidden_size, hidden_size)

        self.WMI = nn.Linear(input_size, hidden_size)
        self.WII = nn.Linear(hidden_size, hidden_size)

        self.WMC = nn.Linear(input_size, hidden_size)
        self.WIC = nn.Linear(hidden_size, hidden_size)

        self.WMO = nn.Linear(input_size, hidden_size)
        self.WIO = nn.Linear(hidden_size, hidden_size)
예제 #2
0
    def __init__(self, cartpole, controller, method='dopri5'):
        super().__init__()
        self.cartpole, self.controller = cartpole, controller

        self.model_of_dyn_system = NeuralDE(
            controller, sensitivity='adjoint', solver=method
        ).to(device)
예제 #3
0
    def __init__(self, input_size, hidden_size):
        super().__init__()

        self.input_size = input_size
        self.hidden_size = hidden_size
        self.sspan = torch.linspace(0.0,1.0,2)
        self.ode_func = nodefunc(input_size, hidden_size)
        self.neural_ode = NeuralDE(self.ode_func, sensitivity="adjoint", solver="dopri5", rtol=0.01, atol=0.01, s_span=self.sspan)
예제 #4
0
class NODE(nn.Module):
    def __init__(self, hidden_size, sequence_size, other_module):
        super().__init__()
        self.sequence_size = sequence_size

        self.other_mod = other_module

        self.odef = nodefunc(hidden_size + 10)
        self.sspan = torch.linspace(0.0, 1.0, 2)
        self.node = NeuralDE(self.odef,
                             sensitivity="adjoint",
                             solver="dopri5",
                             rtol=0.01,
                             atol=0.01,
                             s_span=self.sspan)
        self.encode = Augmenter(augment_func=nn.Linear(hidden_size, 10))
        self.decode = nn.Linear(hidden_size + 10, hidden_size)

        self.loss_func = nn.CrossEntropyLoss()

    def process_first(self, xfirst):
        sspan = torch.linspace(0.0, 1.0, self.sequence_size)
        x = torch.squeeze(xfirst, 0)
        enc = self.encode(x)
        traj = self.node.trajectory(enc, sspan)
        out = self.decode(traj)
        out = torch.unsqueeze(out, 1)
        return out

    def forward(self, x, h):
        return self.other_mod(x, h)
예제 #5
0
    def __init__(self):
        super(Net, self).__init__()

        self.func = GCNLayer(input_size=64, output_size=64)

        self.conv1 = SplineConv(dataset.num_features, 64, dim=1, kernel_size=2).to(device)
        self.neuralDE = NeuralDE(self.func, solver='rk4', s_span=torch.linspace(0, 1, 3)).to(device)
        self.conv2 = SplineConv(64, dataset.num_classes, dim=1, kernel_size=2).to(device)
예제 #6
0
    def __init__(self, hidden_size, sequence_size, other_module):
        super().__init__()
        self.sequence_size = sequence_size

        self.other_mod = other_module

        self.odef = nodefunc(hidden_size + 10)
        self.sspan = torch.linspace(0.0, 1.0, 2)
        self.node = NeuralDE(self.odef,
                             sensitivity="adjoint",
                             solver="dopri5",
                             rtol=0.01,
                             atol=0.01,
                             s_span=self.sspan)
        self.encode = Augmenter(augment_func=nn.Linear(hidden_size, 10))
        self.decode = nn.Linear(hidden_size + 10, hidden_size)

        self.loss_func = nn.CrossEntropyLoss()
예제 #7
0
class LSTMINNODELayer(nn.Module):

    def __init__(self, input_size, hidden_size):
        super().__init__()

        self.input_size = input_size
        self.hidden_size = hidden_size
        self.sspan = torch.linspace(0.0,1.0,2)
        self.ode_func = nodefunc(input_size, hidden_size)
        self.neural_ode = NeuralDE(self.ode_func, sensitivity="adjoint", solver="dopri5", rtol=0.01, atol=0.01, s_span=self.sspan)

    def forward(self, x, hidden, cell):
        nx = torch.cat([x,hidden,cell], dim=2)
        y = self.neural_ode(nx)
        self.neural_ode.reset()
        output = y[:,:,:self.input_size]
        hidden = y[:,:,self.input_size:(self.input_size+self.hidden_size)]
        cell = y[:,:,(self.input_size+self.hidden_size):]
        return output, hidden, cell
예제 #8
0
    def __init__(self, input_size, hidden_size):
        super().__init__()

        self.input_size = input_size
        self.hidden_size = hidden_size
        
        self.sspan = torch.linspace(0.0,1.0,2)
        self.ode_func = nodefunc(hidden_size)
        self.nodeWHH = NeuralDE(self.ode_func, sensitivity="adjoint", solver="dopri5", rtol=0.01, atol=0.01, s_span=self.sspan)
        
        self.WIH = nn.Linear(input_size, hidden_size)
        self.WHH = nn.Linear(hidden_size, hidden_size)
        self.WHO = nn.Linear(hidden_size, input_size)
        self.tanh = nn.Tanh()
예제 #9
0
    def __init__(self, x_dim, u_dim, dt, hyperparams):
        self._hyperparams = hyperparams
        self.dt = dt
        self.x_dim = x_dim
        self.u_dim = u_dim

        lnn = DeepLagrangianNetwork(
            x_dim // 2,
            hyperparams["hidden_size"],
            angular_dims=hyperparams["angular_dims"],
            input_matrix=InputMatrixLayer(x_dim // 2, u_dim,
                                          np.array(hyperparams["input_mat"])),
        )

        self.model = NeuralDE(func=lnn, solver="dopri8")  # torch.nn.Module
        self.learner = LNNLearner(self.model, self.dt, self.x_dim,
                                  hyperparams["lr"])  # pl moudle
예제 #10
0
        # print(qddot.shape)
        q_qd_qdd = torch.cat([q, q_dot, qddot], dim=1)

        tau, H, c, g = self.inverse_dyn(q_qd_qdd)
        qddot_pred = torch.einsum("ijk, ij -> ik", H.inverse(),
                                  (self.input_matrix(u) - c - g))

        return torch.cat([q_dot, qddot_pred, torch.zeros_like(u)], dim=1)

    def discrete_predict(self, xu):
        def func(order, xu):
            return self.forward(xu)

        sol = torchdyn.odeint(func, xu, torch.linspace(0, 0.01, 10))

        return sol[-1]


if __name__ == "__main__":
    from torchdyn.models import NeuralDE

    n_dof = 2
    batch = 2
    network = DeepLagrangianNetwork(n_dof, 64)

    model = NeuralDE(func=network, solver="dopri5")

    test_input = torch.ones(batch, n_dof * 3)
    print(model.defunc(0, test_input))
    print(network(test_input))
예제 #11
0
class NeuralOde(torch.nn.Module):
    """
    A wrapper of the continuous neural network that represents the ODE.

    """
    def __init__(self, cartpole, controller, method='dopri5'):
        super().__init__()
        self.cartpole, self.controller = cartpole, controller

        self.model_of_dyn_system = NeuralDE(
            controller, sensitivity='adjoint', solver=method
        ).to(device)

    def final_state_loss(self, state):
        _, dx, theta = state[:, 0], state[:, 1], state[:, 2]

        # get theta in [-pi,+pi]
        theta = pi_mod(theta + math.pi) - math.pi

        return 4*theta**2 + torch.abs(dx)

    def train(self, n_epochs=100, batch_size=200, lr_patience=10,
              early_stop_patience=20, epsilon=0.1):
        optimizer = torch.optim.Adam(
            self.model_of_dyn_system.parameters(), lr=.1)

        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
            optimizer, 'min', patience=lr_patience, factor=0.5)

        steps_since_plat, last_plat = 0, 0
        for i in range(n_epochs):
            optimizer.zero_grad()

            # setup training scenario
            start_state = cartpole.sample_state(batch_size).to(device)

            # run simulation
            final_state = self.model_of_dyn_system(start_state)

            # evaluate performance
            loss = self.final_state_loss(final_state)
            step_loss = loss.mean()
            print("epoch: {}, loss: {}: ".format(i, step_loss))

            loss.sum().backward()
            optimizer.step()
            scheduler.step(step_loss)

            # if stuck on minimum, stop
            delta_loss = abs(last_plat - step_loss.data)
            if ((steps_since_plat >= early_stop_patience) and
                (delta_loss <= epsilon)):
                break
            elif abs(last_plat - step_loss.data) > epsilon:
                last_plat, steps_since_plat = step_loss, 0
            steps_since_plat += 1

    def trajectory(self, state, T=1, time_steps=200):
        """
        Data trajectory from t = 0 to t = T

        """

        state = state.to(device)
        t = torch.linspace(0, T, time_steps).to(device)

        # integrate and remove batch dim
        traj = self.model_of_dyn_system.trajectory(state, t)
        return traj.detach().cpu()[:, 0, :]