Exemplo n.º 1
0
    def __init__(self,
                 state_dim,
                 nf_particle,
                 nf_effect,
                 g_dim,
                 u_dim,
                 n_timesteps,
                 deriv_in_state=False,
                 num_sys=0,
                 alpha=1,
                 init_scale=1):

        super(KoopmanOperators, self).__init__()

        self.n_timesteps = n_timesteps
        self.u_dim = u_dim

        if deriv_in_state and n_timesteps > 2:
            first_deriv_dim = n_timesteps - 1
            sec_deriv_dim = n_timesteps - 2
        else:
            first_deriv_dim = 0
            sec_deriv_dim = 0
        ''' state '''
        input_particle_dim = state_dim * (
            n_timesteps + first_deriv_dim + sec_deriv_dim
        )  #+ g_dim # g_dim added for recursive sampling

        self.mapping = encoderNet(input_particle_dim, g_dim, ALPHA=alpha)
        self.composed_mapping = encoderNet(input_particle_dim,
                                           g_dim * 2,
                                           ALPHA=alpha)
        self.inv_mapping = decoderNet(state_dim, g_dim, ALPHA=alpha, SN=False)
        self.dynamics = dynamics(g_dim, init_scale)
        self.backdynamics = dynamics_back(g_dim, self.dynamics)
        self.u_dynamics = u_dynamics(g_dim, u_dim)

        # self.gru_u_mapping = nn.GRU(input_particle_dim, u_dim, num_layers = 1, batch_first=True)
        # self.linear_u_mapping = nn.Linear(u_dim, u_dim * 2)
        #
        self.nlinear_u_mapping = nn.Sequential(
            nn.Linear(input_particle_dim, state_dim), nn.ReLU(),
            nn.Linear(state_dim, u_dim * 2))

        self.softmax = nn.Softmax(dim=-1)
        self.st_gumbel_softmax = tut.STGumbelSoftmax(-1)
        self.round = tut.Round()
        self.st_gumbel_sigmoid = tut.STGumbelSigmoid()

        self.system_identify = self.fit_block_diagonal
        # self.system_identify_with_A = self.fit_with_A
        # self.system_identify_with_compositional_A = self.fit_with_compositional_A
        # self.system_identify = self.fit_across_objects
        self.simulate = self.rollout
        self.simulate_no_input = self.rollout_no_input
    def __init__(self,
                 input_dim,
                 nf_particle,
                 nf_effect,
                 g_dim,
                 r_dim,
                 u_dim,
                 n_chan,
                 with_interactions=False,
                 residual=False,
                 collision_margin=None,
                 n_timesteps=None):
        #TODO: added r_dim
        super(EncoderNetwork, self).__init__()

        # eo_dim = nf_particle
        # es_dim = nf_particle
        self.with_interactions = with_interactions
        self.collision_margin = collision_margin
        self.n_timesteps = n_timesteps
        '''Encoder Networks'''
        self.embedding = Embedding(input_dim,
                                   n_chan,
                                   hidden_size=nf_particle,
                                   g=g_dim)
        self.u_embedding = U_Embedding(input_dim,
                                       n_chan,
                                       hidden_size=nf_effect,
                                       output_size=g_dim,
                                       collision_margin=collision_margin)

        if with_interactions:
            self.rel_embedding = Rel_Embedding(
                input_dim,
                n_chan,
                hidden_size=nf_effect,
                output_size=g_dim,
                collision_margin=collision_margin)

        # TODO: There's the Gumbel Softmax, if this wasn't working. Check the Tracking code if they do KL divergence in that case
        self.softmax = nn.Softmax(dim=-1)
        self.round = tut.Round()
        self.st_gumbel_softmax = tut.STGumbelSoftmax(-1)
Exemplo n.º 3
0
 def __init__(self, n_objects, dims):
     super(NTM, self).__init__()
     self.n_objects = n_objects
     conf_dim, layer_dim, pose_dim, shape_dim, app_dim = dims[
         'confidence'], dims['layer'], dims['pose'], dims['shape'], dims[
             'appearance']
     dim_y = conf_dim + layer_dim + pose_dim + shape_dim + app_dim
     self.dims = dims
     self.dim_h_o = dims['tracker_state']
     self.fcn = nn.Sequential(nn.Linear(self.dim_h_o, self.dim_h_o),
                              nn.CELU(),
                              nn.Linear(self.dim_h_o, self.dim_h_o),
                              nn.CELU(), nn.Linear(self.dim_h_o, dim_y))
     self.fcn = nn.Linear(self.dim_h_o, dim_y)
     self.softmax = nn.Softmax(dim=1)
     self.st_gumbel_sigmoid = ut.STGumbelSigmoid()
     self.st_gumbel_softmax = ut.STGumbelSoftmax(1)
     self.permutation_matrix_calculator = ut.PermutationMatrixCalculator()
     # dim, feat_dim, n_objects, H, W
     self.ntm_cell = NTMCell(self.dim_h_o, dims['features'], n_objects)
     self.t = 0
     self.att = None
     self.mem = None
Exemplo n.º 4
0
    def __init__(self, state_dim, nf_particle, nf_effect, g_dim, u_dim, n_timesteps, n_blocks=1, residual=False, deriv_in_state=False, fixed_A=False, fixed_B=False, num_sys=0):
        super(KoopmanOperators, self).__init__()

        self.residual = residual
        self.n_timesteps = n_timesteps
        self.u_dim = u_dim

        if deriv_in_state and n_timesteps > 2:
            first_deriv_dim = n_timesteps - 1
            sec_deriv_dim = n_timesteps - 2
        else:
            first_deriv_dim = 0
            sec_deriv_dim = 0

        ''' state '''
        # TODO: state_dim * n_timesteps if not hankel. Pass hankel as parameter.
        input_particle_dim = state_dim * (n_timesteps + first_deriv_dim + sec_deriv_dim) #+ g_dim #TODO: g_dim added for recursive sampling

        self.mapping = SimplePropagationNetwork(
            input_particle_dim=input_particle_dim, nf_particle=nf_particle,
            nf_effect=nf_effect, output_dim=g_dim, output_action_dim = u_dim, tanh=False,  # use tanh to enforce the shape of the code space
            residual=residual) # g * 2

        self.composed_mapping = SimplePropagationNetwork(
            input_particle_dim=input_particle_dim, nf_particle=nf_particle,
            nf_effect=nf_effect, output_dim=2 * g_dim, output_action_dim = u_dim, tanh=False,  # use tanh to enforce the shape of the code space
            residual=residual) # g * 2


        self.gru_u_mapping = nn.GRU(input_particle_dim, u_dim, num_layers = 1, batch_first=True)
        self.linear_u_mapping = nn.Linear(u_dim, u_dim * 2)
        #
        self.nlinear_u_mapping = nn.Sequential(nn.Linear(input_particle_dim, state_dim),
                                          nn.ReLU(),
                                          nn.Linear(state_dim, u_dim * 2))

        # self.nlinear_u_mapping = nn.Sequential(nn.Linear(g_dim, state_dim),
        #                                        nn.ReLU(),
        #                                        nn.Linear(state_dim, u_dim * 2))

        # the state for decoding phase is replaced with code of g_dim
        input_particle_dim = g_dim
        # print('state_decoder', 'node', input_particle_dim, 'edge', input_relation_dim)
        self.inv_mapping = SimplePropagationNetwork(
            input_particle_dim=input_particle_dim, nf_particle=nf_particle,
            nf_effect=nf_effect, output_dim=state_dim, tanh=False, residual=residual, spectral_norm=True) # TRUE

        ''' dynamical system coefficient: A'''

        # self.system_identify = self.fit
        # self.simulate = self.rollout
        # self.step = self.linear_forward

        self.A_reg = torch.eye(g_dim // n_blocks).unsqueeze(0)

        if fixed_A:
            # self.A = nn.Parameter( torch.randn((1, n_blocks, g_dim // n_blocks, g_dim // n_blocks), requires_grad=True) * .1 + (1 / (g_dim // n_blocks)))
            self.A = nn.Parameter(torch.zeros(1, n_blocks, g_dim // n_blocks, g_dim // n_blocks, requires_grad=True) + torch.eye(g_dim // n_blocks)[None, None])
        if fixed_B:
            self.B = nn.Parameter( torch.randn((1, n_blocks, u_dim // n_blocks, g_dim // n_blocks), requires_grad=True) * .1 + (1 / (g_dim // n_blocks)))
        if num_sys > 0:
            # self.A = nn.Parameter( torch.randn((1, num_sys, g_dim // n_blocks, g_dim // n_blocks), requires_grad=True) * .1 + (1 / (g_dim // n_blocks)))
            self.A = nn.Parameter(torch.zeros(1, num_sys, g_dim // n_blocks, g_dim // n_blocks, requires_grad=True) + torch.eye(g_dim // n_blocks)[None, None])
            # ids = torch.arange(0,self.A.shape[-1])
            # self.A[..., ids, ids] = 1
            # self.A = nn.Parameter(self.A)
            self.selector_fc = nn.Sequential(nn.Linear(g_dim, g_dim),
                                              nn.ReLU(),
                                              nn.Linear(g_dim, num_sys),
                                            nn.ReLU())


        self.softmax = nn.Softmax(dim=-1)
        self.st_gumbel_softmax = tut.STGumbelSoftmax(-1)
        self.round = tut.Round()
        self.st_gumbel_sigmoid = tut.STGumbelSigmoid()

        self.system_identify = self.fit_block_diagonal
        self.system_identify_with_A = self.fit_with_A
        self.system_identify_with_compositional_A = self.fit_with_compositional_A
        # self.system_identify = self.fit_across_objects
        self.simulate = self.rollout_block_diagonal
        self.step = self.linear_forward_block_diagonal
        self.num_blocks = n_blocks
    def __init__(self,
                 state_dim,
                 nf_particle,
                 nf_effect,
                 g_dim,
                 u_dim,
                 n_timesteps,
                 deriv_in_state=False,
                 num_sys=0,
                 alpha=1,
                 init_scale=1):

        super(KoopmanOperators, self).__init__()

        self.n_timesteps = n_timesteps
        self.u_dim = u_dim

        if deriv_in_state and n_timesteps > 2:
            first_deriv_dim = n_timesteps - 1
            sec_deriv_dim = n_timesteps - 2
        else:
            first_deriv_dim = 0
            sec_deriv_dim = 0
        ''' state '''
        input_particle_dim = state_dim * (
            n_timesteps + first_deriv_dim + sec_deriv_dim
        )  #+ g_dim # g_dim added for recursive sampling

        self.mapping = encoderNet(input_particle_dim, g_dim, ALPHA=alpha)
        self.composed_mapping = encoderNet(input_particle_dim,
                                           g_dim * 2,
                                           ALPHA=alpha)
        self.inv_mapping = decoderNet(state_dim, g_dim, ALPHA=alpha)
        self.dynamics = dynamics(g_dim, init_scale)
        self.backdynamics = dynamics_back(g_dim, self.dynamics)

        # self.gru_u_mapping = nn.GRU(input_particle_dim, u_dim, num_layers = 1, batch_first=True)
        # self.linear_u_mapping = nn.Linear(u_dim, u_dim * 2)
        #
        self.nlinear_u_mapping = nn.Sequential(
            nn.Linear(input_particle_dim, state_dim), nn.ReLU(),
            nn.Linear(state_dim, u_dim * 2))

        # if num_sys > 0:
        #     # self.A = nn.Parameter( torch.randn((1, num_sys, g_dim // n_blocks, g_dim // n_blocks), requires_grad=True) * .1 + (1 / (g_dim // n_blocks) **2))
        #     self.A = torch.zeros(1, num_sys, g_dim, g_dim)
        #     ids = torch.arange(0,self.A.shape[-1])
        #     self.A[..., ids, ids] = 1
        #     self.A = nn.Parameter(self.A, requires_grad=True)
        #     self.selector_fc = nn.Sequential(nn.Linear(g_dim, g_dim),
        #                                       nn.ReLU(),
        #                                       nn.Linear(g_dim, num_sys),
        #                                     nn.ReLU())

        self.softmax = nn.Softmax(dim=-1)
        self.st_gumbel_softmax = tut.STGumbelSoftmax(-1)
        self.round = tut.Round()
        self.st_gumbel_sigmoid = tut.STGumbelSigmoid()

        self.system_identify = self.fit_block_diagonal
        self.system_identify_with_A = self.fit_with_A
        self.system_identify_with_compositional_A = self.fit_with_compositional_A
        # self.system_identify = self.fit_across_objects
        self.simulate = self.rollout_block_diagonal
        self.step = self.linear_forward_block_diagonal