Пример #1
0
    def __init__(self,
                 state_dim,
                 nf_particle,
                 nf_effect,
                 g_dim,
                 u_dim,
                 n_timesteps,
                 deriv_in_state=False,
                 num_sys=0,
                 alpha=1,
                 init_scale=1):

        super(KoopmanOperators, self).__init__()

        self.n_timesteps = n_timesteps
        self.u_dim = u_dim

        if deriv_in_state and n_timesteps > 2:
            first_deriv_dim = n_timesteps - 1
            sec_deriv_dim = n_timesteps - 2
        else:
            first_deriv_dim = 0
            sec_deriv_dim = 0
        ''' state '''
        input_particle_dim = state_dim * (
            n_timesteps + first_deriv_dim + sec_deriv_dim
        )  #+ g_dim # g_dim added for recursive sampling

        self.mapping = encoderNet(input_particle_dim, g_dim, ALPHA=alpha)
        self.composed_mapping = encoderNet(input_particle_dim,
                                           g_dim * 2,
                                           ALPHA=alpha)
        self.inv_mapping = decoderNet(state_dim, g_dim, ALPHA=alpha, SN=False)
        self.dynamics = dynamics(g_dim, init_scale)
        self.backdynamics = dynamics_back(g_dim, self.dynamics)
        self.u_dynamics = u_dynamics(g_dim, u_dim)

        # self.gru_u_mapping = nn.GRU(input_particle_dim, u_dim, num_layers = 1, batch_first=True)
        # self.linear_u_mapping = nn.Linear(u_dim, u_dim * 2)
        #
        self.nlinear_u_mapping = nn.Sequential(
            nn.Linear(input_particle_dim, state_dim), nn.ReLU(),
            nn.Linear(state_dim, u_dim * 2))

        self.softmax = nn.Softmax(dim=-1)
        self.st_gumbel_softmax = tut.STGumbelSoftmax(-1)
        self.round = tut.Round()
        self.st_gumbel_sigmoid = tut.STGumbelSigmoid()

        self.system_identify = self.fit_block_diagonal
        # self.system_identify_with_A = self.fit_with_A
        # self.system_identify_with_compositional_A = self.fit_with_compositional_A
        # self.system_identify = self.fit_across_objects
        self.simulate = self.rollout
        self.simulate_no_input = self.rollout_no_input
    def __init__(self,
                 s,
                 c,
                 hidden_size,
                 output_size,
                 collision_margin=None,
                 SN=False):
        super(Rel_Embedding, self).__init__()
        self.collision_margin = collision_margin
        self.output_size = output_size
        self.c = c
        self.g = output_size
        self.sigmoid = nn.Sigmoid()
        self.gumbel_sigmoid = tut.STGumbelSigmoid()
        # Input is s_o1 - s_o2
        # if collision_margin is None:

        self.model_rel = nn.Sequential(
            nn.Linear(3 * s, hidden_size // 2), nn.ReLU(),
            nn.Linear(hidden_size // 2, hidden_size // 2), nn.ReLU(),
            nn.Linear(hidden_size // 2, 1))

        self.model = nn.Sequential(
            nn.Linear(3 * s, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, output_size * c),
        )

        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_normal_(m.weight)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0.0)
        # if SN:
        #     self.model = SpectralNorm(self.model)
        #     self.model_rel = SpectralNorm(self.model_u)

        self.round = tut.Round()
        self.sigmoid = nn.Sigmoid()
        self.relu = nn.ReLU()
Пример #3
0
 def __init__(self, n_objects, dims):
     super(NTM, self).__init__()
     self.n_objects = n_objects
     conf_dim, layer_dim, pose_dim, shape_dim, app_dim = dims[
         'confidence'], dims['layer'], dims['pose'], dims['shape'], dims[
             'appearance']
     dim_y = conf_dim + layer_dim + pose_dim + shape_dim + app_dim
     self.dims = dims
     self.dim_h_o = dims['tracker_state']
     self.fcn = nn.Sequential(nn.Linear(self.dim_h_o, self.dim_h_o),
                              nn.CELU(),
                              nn.Linear(self.dim_h_o, self.dim_h_o),
                              nn.CELU(), nn.Linear(self.dim_h_o, dim_y))
     self.fcn = nn.Linear(self.dim_h_o, dim_y)
     self.softmax = nn.Softmax(dim=1)
     self.st_gumbel_sigmoid = ut.STGumbelSigmoid()
     self.st_gumbel_softmax = ut.STGumbelSoftmax(1)
     self.permutation_matrix_calculator = ut.PermutationMatrixCalculator()
     # dim, feat_dim, n_objects, H, W
     self.ntm_cell = NTMCell(self.dim_h_o, dims['features'], n_objects)
     self.t = 0
     self.att = None
     self.mem = None
Пример #4
0
    def __init__(self, state_dim, nf_particle, nf_effect, g_dim, u_dim, n_timesteps, n_blocks=1, residual=False, deriv_in_state=False, fixed_A=False, fixed_B=False, num_sys=0):
        super(KoopmanOperators, self).__init__()

        self.residual = residual
        self.n_timesteps = n_timesteps
        self.u_dim = u_dim

        if deriv_in_state and n_timesteps > 2:
            first_deriv_dim = n_timesteps - 1
            sec_deriv_dim = n_timesteps - 2
        else:
            first_deriv_dim = 0
            sec_deriv_dim = 0

        ''' state '''
        # TODO: state_dim * n_timesteps if not hankel. Pass hankel as parameter.
        input_particle_dim = state_dim * (n_timesteps + first_deriv_dim + sec_deriv_dim) #+ g_dim #TODO: g_dim added for recursive sampling

        self.mapping = SimplePropagationNetwork(
            input_particle_dim=input_particle_dim, nf_particle=nf_particle,
            nf_effect=nf_effect, output_dim=g_dim, output_action_dim = u_dim, tanh=False,  # use tanh to enforce the shape of the code space
            residual=residual) # g * 2

        self.composed_mapping = SimplePropagationNetwork(
            input_particle_dim=input_particle_dim, nf_particle=nf_particle,
            nf_effect=nf_effect, output_dim=2 * g_dim, output_action_dim = u_dim, tanh=False,  # use tanh to enforce the shape of the code space
            residual=residual) # g * 2


        self.gru_u_mapping = nn.GRU(input_particle_dim, u_dim, num_layers = 1, batch_first=True)
        self.linear_u_mapping = nn.Linear(u_dim, u_dim * 2)
        #
        self.nlinear_u_mapping = nn.Sequential(nn.Linear(input_particle_dim, state_dim),
                                          nn.ReLU(),
                                          nn.Linear(state_dim, u_dim * 2))

        # self.nlinear_u_mapping = nn.Sequential(nn.Linear(g_dim, state_dim),
        #                                        nn.ReLU(),
        #                                        nn.Linear(state_dim, u_dim * 2))

        # the state for decoding phase is replaced with code of g_dim
        input_particle_dim = g_dim
        # print('state_decoder', 'node', input_particle_dim, 'edge', input_relation_dim)
        self.inv_mapping = SimplePropagationNetwork(
            input_particle_dim=input_particle_dim, nf_particle=nf_particle,
            nf_effect=nf_effect, output_dim=state_dim, tanh=False, residual=residual, spectral_norm=True) # TRUE

        ''' dynamical system coefficient: A'''

        # self.system_identify = self.fit
        # self.simulate = self.rollout
        # self.step = self.linear_forward

        self.A_reg = torch.eye(g_dim // n_blocks).unsqueeze(0)

        if fixed_A:
            # self.A = nn.Parameter( torch.randn((1, n_blocks, g_dim // n_blocks, g_dim // n_blocks), requires_grad=True) * .1 + (1 / (g_dim // n_blocks)))
            self.A = nn.Parameter(torch.zeros(1, n_blocks, g_dim // n_blocks, g_dim // n_blocks, requires_grad=True) + torch.eye(g_dim // n_blocks)[None, None])
        if fixed_B:
            self.B = nn.Parameter( torch.randn((1, n_blocks, u_dim // n_blocks, g_dim // n_blocks), requires_grad=True) * .1 + (1 / (g_dim // n_blocks)))
        if num_sys > 0:
            # self.A = nn.Parameter( torch.randn((1, num_sys, g_dim // n_blocks, g_dim // n_blocks), requires_grad=True) * .1 + (1 / (g_dim // n_blocks)))
            self.A = nn.Parameter(torch.zeros(1, num_sys, g_dim // n_blocks, g_dim // n_blocks, requires_grad=True) + torch.eye(g_dim // n_blocks)[None, None])
            # ids = torch.arange(0,self.A.shape[-1])
            # self.A[..., ids, ids] = 1
            # self.A = nn.Parameter(self.A)
            self.selector_fc = nn.Sequential(nn.Linear(g_dim, g_dim),
                                              nn.ReLU(),
                                              nn.Linear(g_dim, num_sys),
                                            nn.ReLU())


        self.softmax = nn.Softmax(dim=-1)
        self.st_gumbel_softmax = tut.STGumbelSoftmax(-1)
        self.round = tut.Round()
        self.st_gumbel_sigmoid = tut.STGumbelSigmoid()

        self.system_identify = self.fit_block_diagonal
        self.system_identify_with_A = self.fit_with_A
        self.system_identify_with_compositional_A = self.fit_with_compositional_A
        # self.system_identify = self.fit_across_objects
        self.simulate = self.rollout_block_diagonal
        self.step = self.linear_forward_block_diagonal
        self.num_blocks = n_blocks
    def __init__(self,
                 state_dim,
                 nf_particle,
                 nf_effect,
                 g_dim,
                 u_dim,
                 n_timesteps,
                 deriv_in_state=False,
                 num_sys=0,
                 alpha=1,
                 init_scale=1):

        super(KoopmanOperators, self).__init__()

        self.n_timesteps = n_timesteps
        self.u_dim = u_dim

        if deriv_in_state and n_timesteps > 2:
            first_deriv_dim = n_timesteps - 1
            sec_deriv_dim = n_timesteps - 2
        else:
            first_deriv_dim = 0
            sec_deriv_dim = 0
        ''' state '''
        input_particle_dim = state_dim * (
            n_timesteps + first_deriv_dim + sec_deriv_dim
        )  #+ g_dim # g_dim added for recursive sampling

        self.mapping = encoderNet(input_particle_dim, g_dim, ALPHA=alpha)
        self.composed_mapping = encoderNet(input_particle_dim,
                                           g_dim * 2,
                                           ALPHA=alpha)
        self.inv_mapping = decoderNet(state_dim, g_dim, ALPHA=alpha)
        self.dynamics = dynamics(g_dim, init_scale)
        self.backdynamics = dynamics_back(g_dim, self.dynamics)

        # self.gru_u_mapping = nn.GRU(input_particle_dim, u_dim, num_layers = 1, batch_first=True)
        # self.linear_u_mapping = nn.Linear(u_dim, u_dim * 2)
        #
        self.nlinear_u_mapping = nn.Sequential(
            nn.Linear(input_particle_dim, state_dim), nn.ReLU(),
            nn.Linear(state_dim, u_dim * 2))

        # if num_sys > 0:
        #     # self.A = nn.Parameter( torch.randn((1, num_sys, g_dim // n_blocks, g_dim // n_blocks), requires_grad=True) * .1 + (1 / (g_dim // n_blocks) **2))
        #     self.A = torch.zeros(1, num_sys, g_dim, g_dim)
        #     ids = torch.arange(0,self.A.shape[-1])
        #     self.A[..., ids, ids] = 1
        #     self.A = nn.Parameter(self.A, requires_grad=True)
        #     self.selector_fc = nn.Sequential(nn.Linear(g_dim, g_dim),
        #                                       nn.ReLU(),
        #                                       nn.Linear(g_dim, num_sys),
        #                                     nn.ReLU())

        self.softmax = nn.Softmax(dim=-1)
        self.st_gumbel_softmax = tut.STGumbelSoftmax(-1)
        self.round = tut.Round()
        self.st_gumbel_sigmoid = tut.STGumbelSigmoid()

        self.system_identify = self.fit_block_diagonal
        self.system_identify_with_A = self.fit_with_A
        self.system_identify_with_compositional_A = self.fit_with_compositional_A
        # self.system_identify = self.fit_across_objects
        self.simulate = self.rollout_block_diagonal
        self.step = self.linear_forward_block_diagonal
Пример #6
0
    def __init__(self, input_size, out_ch, dyn_dim):
        super(ImageDecoder, self).__init__()

        self.dyn_dim = dyn_dim
        init_ch = 256
        self.init_dec_cnn = nn.Sequential(
            nn.Conv2d(input_size, init_ch, 1),
            nn.CELU(),
            nn.BatchNorm2d(init_ch),
        )

        # TO X CNN Maps appearance to object. 1 x 1
        self.to_x = nn.Sequential(
            nn.Conv2d(init_ch, 128 * 2 * 2, 1),  # 2, 2
            nn.PixelShuffle(2),
            nn.CELU(),
            nn.BatchNorm2d(128),
            nn.Conv2d(128, 128, 3, 1, 1),
            nn.CELU(),
            nn.BatchNorm2d(128),
            nn.Conv2d(128, 128 * 2 * 2, 1),  # 4, 4
            nn.PixelShuffle(2),
            nn.CELU(),
            nn.BatchNorm2d(128),
            nn.Conv2d(128, 128, 3, 1, 1),
            nn.CELU(),
            nn.BatchNorm2d(128),
            # --
            nn.Conv2d(128, 64 * 2 * 2, 1),  # 8, 8
            nn.PixelShuffle(2),
            nn.CELU(),
            nn.BatchNorm2d(64),
            nn.Conv2d(64, 64, 3, 1, 1),
            nn.CELU(),
            nn.BatchNorm2d(64),
            # --
            nn.Conv2d(64, 32 * 2 * 2, 1),  # 16, 16
            nn.PixelShuffle(2),
            nn.CELU(),
            nn.BatchNorm2d(32),
            nn.Conv2d(32, 32, 3, 1, 1),
            nn.CELU(),
            nn.BatchNorm2d(32),
            # --
            nn.Conv2d(32, 16 * 2 * 2, 1),  # 32, 32
            nn.PixelShuffle(2),
            nn.CELU(),
            nn.BatchNorm2d(16),
            nn.Conv2d(16, 16, 3, 1, 1),
            nn.CELU(),
            nn.BatchNorm2d(16),
            # --
            nn.Conv2d(16, out_ch, 3, 1,
                      1)  # output channels. If your input is RGB, out_ch=3
        )

        self.to_x = nn.Sequential(
            nn.Conv2d(init_ch, 128, 3, 1, 1),  # 2, 2
            nn.UpsamplingBilinear2d(scale_factor=2),
            nn.CELU(),
            nn.BatchNorm2d(128),
            nn.Conv2d(128, 128, 3, 1, 1),
            nn.CELU(),
            nn.BatchNorm2d(128),
            nn.Conv2d(128, 128, 3, 1, 1),  # 4, 4
            nn.UpsamplingBilinear2d(scale_factor=2),
            nn.CELU(),
            nn.BatchNorm2d(128),
            nn.Conv2d(128, 128, 3, 1, 1),
            nn.CELU(),
            nn.BatchNorm2d(128),
            # --
            nn.Conv2d(128, 64, 3, 1, 1),  # 8, 8
            nn.UpsamplingBilinear2d(scale_factor=2),
            nn.CELU(),
            nn.BatchNorm2d(64),
            nn.Conv2d(64, 64, 3, 1, 1),
            nn.CELU(),
            nn.BatchNorm2d(64),
            # --
            nn.Conv2d(64, 32, 3, 1, 1),  # 16, 16
            nn.UpsamplingBilinear2d(scale_factor=2),
            nn.CELU(),
            nn.BatchNorm2d(32),
            nn.Conv2d(32, 32, 3, 1, 1),
            nn.CELU(),
            nn.BatchNorm2d(32),
            # --
            nn.Conv2d(32, 16, 3, 1, 1),  # 32, 32
            nn.UpsamplingBilinear2d(scale_factor=2),
            nn.CELU(),
            nn.BatchNorm2d(16),
            nn.Conv2d(16, 16, 3, 1, 1),
            nn.CELU(),
            nn.BatchNorm2d(16),
            # --
            nn.Conv2d(16, out_ch, 3, 1,
                      1)  # output channels. If your input is RGB, out_ch=3
        )
        self.st_gumbel_sigmoid = ut.STGumbelSigmoid()