def __init__(self, s, hidden_size, output_size, collision_margin=None):
        super(RelationEncoder, self).__init__()
        self.collision_margin = collision_margin
        self.output_size = output_size
        # Input is s_o1 - s_o2
        if collision_margin is None:
            self.model = nn.Sequential(
                nn.Linear(s, hidden_size),
                nn.ReLU(),
                nn.Linear(hidden_size, output_size + 1),
            )
        else:
            self.model = nn.Sequential(
                nn.Linear(s, hidden_size),
                nn.ReLU(),
                nn.Linear(hidden_size, output_size),
            )

        self.round = tut.Round()
        self.relu = nn.ReLU()
        self.sigmoid = nn.Sigmoid()

        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_normal_(m.weight)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0.0)
Пример #2
0
    def __init__(self, s, rel_size, hidden_size, output_size, collision_margin=None, SN=False):
        super(Rel_Embedding, self).__init__()
        self.collision_margin = collision_margin
        self.output_size = output_size
        # Input is s_o1 - s_o2
        # if collision_margin is None:
        self.model_rel = nn.Sequential(
            nn.Linear(3*s, hidden_size // 2),
            nn.ReLU(),
            nn.Linear(hidden_size // 2, hidden_size // 2),
            nn.ReLU(),
            nn.Linear(hidden_size // 2, rel_size + 1),
        )

        self.model = nn.Sequential(
            nn.Linear(s + rel_size, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, output_size),
        )

        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_normal_(m.weight)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0.0)
        if SN:
            self.model = SpectralNorm(self.model)
            self.model_rel = SpectralNorm(self.model_u)

        self.round = tut.Round()
        self.sigmoid = nn.Sigmoid()
        self.relu = nn.ReLU()
Пример #3
0
    def __init__(self,
                 state_dim,
                 nf_particle,
                 nf_effect,
                 g_dim,
                 u_dim,
                 n_timesteps,
                 deriv_in_state=False,
                 num_sys=0,
                 alpha=1,
                 init_scale=1):

        super(KoopmanOperators, self).__init__()

        self.n_timesteps = n_timesteps
        self.u_dim = u_dim

        if deriv_in_state and n_timesteps > 2:
            first_deriv_dim = n_timesteps - 1
            sec_deriv_dim = n_timesteps - 2
        else:
            first_deriv_dim = 0
            sec_deriv_dim = 0
        ''' state '''
        input_particle_dim = state_dim * (
            n_timesteps + first_deriv_dim + sec_deriv_dim
        )  #+ g_dim # g_dim added for recursive sampling

        self.mapping = encoderNet(input_particle_dim, g_dim, ALPHA=alpha)
        self.composed_mapping = encoderNet(input_particle_dim,
                                           g_dim * 2,
                                           ALPHA=alpha)
        self.inv_mapping = decoderNet(state_dim, g_dim, ALPHA=alpha, SN=False)
        self.dynamics = dynamics(g_dim, init_scale)
        self.backdynamics = dynamics_back(g_dim, self.dynamics)
        self.u_dynamics = u_dynamics(g_dim, u_dim)

        # self.gru_u_mapping = nn.GRU(input_particle_dim, u_dim, num_layers = 1, batch_first=True)
        # self.linear_u_mapping = nn.Linear(u_dim, u_dim * 2)
        #
        self.nlinear_u_mapping = nn.Sequential(
            nn.Linear(input_particle_dim, state_dim), nn.ReLU(),
            nn.Linear(state_dim, u_dim * 2))

        self.softmax = nn.Softmax(dim=-1)
        self.st_gumbel_softmax = tut.STGumbelSoftmax(-1)
        self.round = tut.Round()
        self.st_gumbel_sigmoid = tut.STGumbelSigmoid()

        self.system_identify = self.fit_block_diagonal
        # self.system_identify_with_A = self.fit_with_A
        # self.system_identify_with_compositional_A = self.fit_with_compositional_A
        # self.system_identify = self.fit_across_objects
        self.simulate = self.rollout
        self.simulate_no_input = self.rollout_no_input
Пример #4
0
    def __init__(self, b, ud):
        super(u_dynamics, self).__init__()
        self.u_dynamics = nn.Linear(ud, b, bias=False)
        self.linear_u_mapping = nn.Linear(b, ud)

        # self.u_dynamics.weight.data = gaussian_init_2dim([b, ud], std=1)
        self.u_dynamics.weight.data = torch.zeros_like(
            self.u_dynamics.weight.data) + 0.001

        # self.linear_u_mapping.weight.data = gaussian_init_2dim([b, ud], std=1)

        self.round = tut.Round()
Пример #5
0
    def __init__(self, input_size, u_size, hidden_size, output_size, SN=False):
        super(U_Embedding, self).__init__()

        self.input_size = input_size
        self.u_size = u_size
        self.hidden_size = hidden_size
        self.output_size = output_size

        self.model_u = nn.Sequential(
            nn.Linear(input_size, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, input_size +1),
        )
        # self.model_u = nn.Sequential(
        #     nn.Linear(input_size, u_size +1),
        # )
        self.model = nn.Sequential(
            nn.Linear(input_size, hidden_size), #  + u_size
            nn.ReLU(),
            nn.Linear(hidden_size, output_size),
        )

        # self.model_u = nn.Sequential(
        #     SpectralNorm(nn.Linear(input_size, hidden_size // 2)),
        #     nn.ReLU(),
        #     SpectralNorm(nn.Linear(hidden_size // 2, hidden_size // 2)),
        #     nn.ReLU(),
        #     SpectralNorm(nn.Linear(hidden_size // 2, u_size +1)),
        # )
        # self.model = nn.Sequential(
        #     SpectralNorm(nn.Linear(input_size + u_size, hidden_size)),
        #     nn.ReLU(),
        #     SpectralNorm(nn.Linear(hidden_size, hidden_size)),
        #     nn.ReLU(),
        #     SpectralNorm(nn.Linear(hidden_size, output_size)),
        # )
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_normal_(m.weight)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0.0)

        if SN:
            self.model = SpectralNorm(self.model)
            self.model_u = SpectralNorm(self.model_u)
        #TODO: adopt Sandesh's SpectralNorm.


        self.round = tut.Round()
        self.relu = nn.ReLU()
        self.sigmoid = nn.Sigmoid()
    def __init__(self,
                 input_dim,
                 nf_particle,
                 nf_effect,
                 g_dim,
                 r_dim,
                 u_dim,
                 with_interactions=False,
                 residual=False,
                 collision_margin=None,
                 n_timesteps=None):
        #TODO: added r_dim
        super(EncoderNetwork, self).__init__()

        eo_dim = nf_particle
        es_dim = nf_particle
        self.with_interactions = with_interactions
        self.collision_margin = collision_margin
        self.n_timesteps = n_timesteps
        '''Encoder Networks'''
        self.s_encoder = StateEncoder(input_dim,
                                      hidden_size=nf_particle,
                                      output_size=es_dim)

        self.u_encoder = InputEncoder(input_dim,
                                      hidden_size=nf_effect,
                                      output_size=u_dim,
                                      collision_margin=collision_margin)
        self.u_propagator = InputPropagator(es_dim, u_dim, output_size=u_dim)

        self.embedding = Embedding(es_dim, hidden_size=nf_particle, g=g_dim)

        input_emb_dim = es_dim + u_dim

        if with_interactions:
            self.rel_encoder = RelationEncoder(
                input_dim,
                hidden_size=nf_effect,
                output_size=r_dim,
                collision_margin=collision_margin)
            self.rel_propagator = RelationPropagator(es_dim,
                                                     r_dim,
                                                     output_size=r_dim)
            input_emb_dim += r_dim

        self.composed_embedding = ComposedEmbedding(input_emb_dim,
                                                    hidden_size=nf_particle,
                                                    output_size=g_dim)

        self.round = tut.Round()
        self.softmax = nn.Softmax(dim=-1)
    def __init__(self, b, s, ud):
        super(u_dynamics, self).__init__()
        self.u_dynamics = nn.Linear(ud*b, b, bias=False)
        self.linear_u_mapping = nn.Linear(s*2, ud)
        # self.linear_u_mapping.weight.data = gaussian_init_2dim([s, ud], std=1)
        self.nlinear_u_mapping = nn.Sequential(nn.Linear(s, s*2), nn.CELU(), self.linear_u_mapping)


        # self.u_dynamics.weight.data = gaussian_init_2dim([b, ud], std=1)
        self.u_dynamics.weight.data = torch.zeros_like(self.u_dynamics.weight.data) + 0.001
        # self.linear_u_mapping.weight.data = gaussian_init_2dim([b, ud], std=1)

        self.round = tut.Round()
        self.count = 0
    def __init__(self,
                 input_dim,
                 nf_particle,
                 nf_effect,
                 g_dim,
                 r_dim,
                 u_dim,
                 n_chan,
                 with_interactions=False,
                 residual=False,
                 collision_margin=None,
                 n_timesteps=None):
        #TODO: added r_dim
        super(EncoderNetwork, self).__init__()

        # eo_dim = nf_particle
        # es_dim = nf_particle
        self.with_interactions = with_interactions
        self.collision_margin = collision_margin
        self.n_timesteps = n_timesteps
        '''Encoder Networks'''
        self.embedding = Embedding(input_dim,
                                   n_chan,
                                   hidden_size=nf_particle,
                                   g=g_dim)
        self.u_embedding = U_Embedding(input_dim,
                                       n_chan,
                                       hidden_size=nf_effect,
                                       output_size=g_dim,
                                       collision_margin=collision_margin)

        if with_interactions:
            self.rel_embedding = Rel_Embedding(
                input_dim,
                n_chan,
                hidden_size=nf_effect,
                output_size=g_dim,
                collision_margin=collision_margin)

        # TODO: There's the Gumbel Softmax, if this wasn't working. Check the Tracking code if they do KL divergence in that case
        self.softmax = nn.Softmax(dim=-1)
        self.round = tut.Round()
        self.st_gumbel_softmax = tut.STGumbelSoftmax(-1)
Пример #9
0
    def __init__(self,
                 s,
                 c,
                 hidden_size,
                 output_size,
                 collision_margin=None,
                 SN=False):
        super(U_Embedding, self).__init__()
        self.collision_margin = collision_margin
        self.output_size = output_size
        self.c = c
        self.u_dim = output_size
        self.sigmoid = nn.Sigmoid()
        self.gumbel_sigmoid = tut.STGumbelSigmoid()
        # Input is s_o1 - s_o2
        # if collision_margin is None:
        self.model_u = nn.Sequential(nn.Linear(s, hidden_size // 2), nn.ReLU(),
                                     nn.Linear(hidden_size // 2, 1))

        self.model = nn.Sequential(
            nn.Linear(s, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, output_size * c),
        )

        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_normal_(m.weight)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0.0)

        self.round = tut.Round()
        self.sigmoid = nn.Sigmoid()
        self.relu = nn.ReLU()
Пример #10
0
    def __init__(self, state_dim, nf_particle, nf_effect, g_dim, u_dim, n_timesteps, n_blocks=1, residual=False, deriv_in_state=False, fixed_A=False, fixed_B=False, num_sys=0):
        super(KoopmanOperators, self).__init__()

        self.residual = residual
        self.n_timesteps = n_timesteps
        self.u_dim = u_dim

        if deriv_in_state and n_timesteps > 2:
            first_deriv_dim = n_timesteps - 1
            sec_deriv_dim = n_timesteps - 2
        else:
            first_deriv_dim = 0
            sec_deriv_dim = 0

        ''' state '''
        # TODO: state_dim * n_timesteps if not hankel. Pass hankel as parameter.
        input_particle_dim = state_dim * (n_timesteps + first_deriv_dim + sec_deriv_dim) #+ g_dim #TODO: g_dim added for recursive sampling

        self.mapping = SimplePropagationNetwork(
            input_particle_dim=input_particle_dim, nf_particle=nf_particle,
            nf_effect=nf_effect, output_dim=g_dim, output_action_dim = u_dim, tanh=False,  # use tanh to enforce the shape of the code space
            residual=residual) # g * 2

        self.composed_mapping = SimplePropagationNetwork(
            input_particle_dim=input_particle_dim, nf_particle=nf_particle,
            nf_effect=nf_effect, output_dim=2 * g_dim, output_action_dim = u_dim, tanh=False,  # use tanh to enforce the shape of the code space
            residual=residual) # g * 2


        self.gru_u_mapping = nn.GRU(input_particle_dim, u_dim, num_layers = 1, batch_first=True)
        self.linear_u_mapping = nn.Linear(u_dim, u_dim * 2)
        #
        self.nlinear_u_mapping = nn.Sequential(nn.Linear(input_particle_dim, state_dim),
                                          nn.ReLU(),
                                          nn.Linear(state_dim, u_dim * 2))

        # self.nlinear_u_mapping = nn.Sequential(nn.Linear(g_dim, state_dim),
        #                                        nn.ReLU(),
        #                                        nn.Linear(state_dim, u_dim * 2))

        # the state for decoding phase is replaced with code of g_dim
        input_particle_dim = g_dim
        # print('state_decoder', 'node', input_particle_dim, 'edge', input_relation_dim)
        self.inv_mapping = SimplePropagationNetwork(
            input_particle_dim=input_particle_dim, nf_particle=nf_particle,
            nf_effect=nf_effect, output_dim=state_dim, tanh=False, residual=residual, spectral_norm=True) # TRUE

        ''' dynamical system coefficient: A'''

        # self.system_identify = self.fit
        # self.simulate = self.rollout
        # self.step = self.linear_forward

        self.A_reg = torch.eye(g_dim // n_blocks).unsqueeze(0)

        if fixed_A:
            # self.A = nn.Parameter( torch.randn((1, n_blocks, g_dim // n_blocks, g_dim // n_blocks), requires_grad=True) * .1 + (1 / (g_dim // n_blocks)))
            self.A = nn.Parameter(torch.zeros(1, n_blocks, g_dim // n_blocks, g_dim // n_blocks, requires_grad=True) + torch.eye(g_dim // n_blocks)[None, None])
        if fixed_B:
            self.B = nn.Parameter( torch.randn((1, n_blocks, u_dim // n_blocks, g_dim // n_blocks), requires_grad=True) * .1 + (1 / (g_dim // n_blocks)))
        if num_sys > 0:
            # self.A = nn.Parameter( torch.randn((1, num_sys, g_dim // n_blocks, g_dim // n_blocks), requires_grad=True) * .1 + (1 / (g_dim // n_blocks)))
            self.A = nn.Parameter(torch.zeros(1, num_sys, g_dim // n_blocks, g_dim // n_blocks, requires_grad=True) + torch.eye(g_dim // n_blocks)[None, None])
            # ids = torch.arange(0,self.A.shape[-1])
            # self.A[..., ids, ids] = 1
            # self.A = nn.Parameter(self.A)
            self.selector_fc = nn.Sequential(nn.Linear(g_dim, g_dim),
                                              nn.ReLU(),
                                              nn.Linear(g_dim, num_sys),
                                            nn.ReLU())


        self.softmax = nn.Softmax(dim=-1)
        self.st_gumbel_softmax = tut.STGumbelSoftmax(-1)
        self.round = tut.Round()
        self.st_gumbel_sigmoid = tut.STGumbelSigmoid()

        self.system_identify = self.fit_block_diagonal
        self.system_identify_with_A = self.fit_with_A
        self.system_identify_with_compositional_A = self.fit_with_compositional_A
        # self.system_identify = self.fit_across_objects
        self.simulate = self.rollout_block_diagonal
        self.step = self.linear_forward_block_diagonal
        self.num_blocks = n_blocks
    def __init__(self,
                 state_dim,
                 nf_particle,
                 nf_effect,
                 g_dim,
                 u_dim,
                 n_timesteps,
                 deriv_in_state=False,
                 num_sys=0,
                 alpha=1,
                 init_scale=1):

        super(KoopmanOperators, self).__init__()

        self.n_timesteps = n_timesteps
        self.u_dim = u_dim

        if deriv_in_state and n_timesteps > 2:
            first_deriv_dim = n_timesteps - 1
            sec_deriv_dim = n_timesteps - 2
        else:
            first_deriv_dim = 0
            sec_deriv_dim = 0
        ''' state '''
        input_particle_dim = state_dim * (
            n_timesteps + first_deriv_dim + sec_deriv_dim
        )  #+ g_dim # g_dim added for recursive sampling

        self.mapping = encoderNet(input_particle_dim, g_dim, ALPHA=alpha)
        self.composed_mapping = encoderNet(input_particle_dim,
                                           g_dim * 2,
                                           ALPHA=alpha)
        self.inv_mapping = decoderNet(state_dim, g_dim, ALPHA=alpha)
        self.dynamics = dynamics(g_dim, init_scale)
        self.backdynamics = dynamics_back(g_dim, self.dynamics)

        # self.gru_u_mapping = nn.GRU(input_particle_dim, u_dim, num_layers = 1, batch_first=True)
        # self.linear_u_mapping = nn.Linear(u_dim, u_dim * 2)
        #
        self.nlinear_u_mapping = nn.Sequential(
            nn.Linear(input_particle_dim, state_dim), nn.ReLU(),
            nn.Linear(state_dim, u_dim * 2))

        # if num_sys > 0:
        #     # self.A = nn.Parameter( torch.randn((1, num_sys, g_dim // n_blocks, g_dim // n_blocks), requires_grad=True) * .1 + (1 / (g_dim // n_blocks) **2))
        #     self.A = torch.zeros(1, num_sys, g_dim, g_dim)
        #     ids = torch.arange(0,self.A.shape[-1])
        #     self.A[..., ids, ids] = 1
        #     self.A = nn.Parameter(self.A, requires_grad=True)
        #     self.selector_fc = nn.Sequential(nn.Linear(g_dim, g_dim),
        #                                       nn.ReLU(),
        #                                       nn.Linear(g_dim, num_sys),
        #                                     nn.ReLU())

        self.softmax = nn.Softmax(dim=-1)
        self.st_gumbel_softmax = tut.STGumbelSoftmax(-1)
        self.round = tut.Round()
        self.st_gumbel_sigmoid = tut.STGumbelSigmoid()

        self.system_identify = self.fit_block_diagonal
        self.system_identify_with_A = self.fit_with_A
        self.system_identify_with_compositional_A = self.fit_with_compositional_A
        # self.system_identify = self.fit_across_objects
        self.simulate = self.rollout_block_diagonal
        self.step = self.linear_forward_block_diagonal