Пример #1
0
    def __init__(self, in_channels, feat_dim, nf_particle, nf_effect, g_dim,
                 I_factor=10, psteps=1, n_timesteps=1, ngf=8, image_size=[64, 64]):
        super().__init__()
        out_channels = 1
        n_layers = int(np.log2(image_size[0])) - 1

        # Positional encoding buffers
        x = torch.linspace(-1, 1, image_size[0])
        y = torch.linspace(-1, 1, image_size[1])
        x_grid, y_grid = torch.meshgrid(x, y)
        # Add as constant, with extra dims for N and C
        self.register_buffer('x_grid_enc', x_grid.view((1, 1, 1) + x_grid.shape))
        self.register_buffer('y_grid_enc', y_grid.view((1, 1, 1) + y_grid.shape))

        # Temporal encoding buffers
        if n_timesteps > 1:
            t = torch.linspace(-1, 1, n_timesteps)
            # Add as constant, with extra dims for N and C
            self.register_buffer('t_grid', t)

        # Set state dim with config, depending on how many time-steps we want to take into account
        self.image_size = image_size
        self.n_timesteps = n_timesteps
        self.state_dim = feat_dim
        self.I_factor = I_factor
        self.psteps = psteps
        self.g_dim = g_dim

        # self.att_image_encoder = AttImageEncoder(in_channels, feat_dim, ngf, n_layers)
        self.image_encoder = ImageEncoder(in_channels, feat_dim * 4, ngf, n_layers)  # feat_dim * 2 if sample here
        self.image_decoder = ImageDecoder(g_dim, out_channels, ngf, n_layers)
        # self.image_decoder = SimpleSBImageDecoder(feat_dim, out_channels, ngf, n_layers, image_size)
        self.koopman = KoopmanOperators(self.state_dim * 4, nf_particle, nf_effect, g_dim, n_timesteps)
Пример #2
0
    def __init__(self, in_channels, feat_dim, nf_particle, nf_effect, g_dim, u_dim,
                 n_objects, I_factor=10, n_blocks=1, psteps=1, n_timesteps=1, ngf=8, image_size=[64, 64]):
        super().__init__()
        out_channels = 1
        n_layers = int(np.log2(image_size[0])) - 1

        self.u_dim = u_dim

        # Temporal encoding buffers
        if n_timesteps > 1:
            t = torch.linspace(-1, 1, n_timesteps)
            # Add as constant, with extra dims for N and C
            self.register_buffer('t_grid', t)

        # Set state dim with config, depending on how many time-steps we want to take into account
        self.image_size = image_size
        self.n_timesteps = n_timesteps
        self.state_dim = feat_dim
        self.I_factor = I_factor
        self.psteps = psteps
        self.g_dim = g_dim
        self.n_objects = n_objects

        self.softmax = nn.Softmax(dim=-1)
        self.sigmoid = nn.Sigmoid()

        self.linear_g = nn.Linear(g_dim, g_dim)
        self.linear_u = nn.Linear(g_dim, u_dim)
        self.initial_conditions = nn.Sequential(nn.Linear(feat_dim * n_timesteps * 2, feat_dim * n_timesteps),
                                                nn.ReLU(),
                                                nn.Linear(feat_dim * n_timesteps, g_dim * 2))

        self.image_encoder = ImageEncoder(in_channels, feat_dim * 2, n_objects, ngf, n_layers)  # feat_dim * 2 if sample here
        self.image_decoder = ImageDecoder(g_dim, out_channels, ngf, n_layers)
        self.koopman = KoopmanOperators(feat_dim * 2, nf_particle * 2, nf_effect * 2, g_dim, u_dim, n_timesteps, n_blocks)
Пример #3
0
    def __init__(self,
                 in_channels,
                 feat_dim,
                 nf_particle,
                 nf_effect,
                 g_dim,
                 u_dim,
                 n_objects,
                 I_factor=10,
                 n_blocks=1,
                 psteps=1,
                 n_timesteps=1,
                 ngf=8,
                 image_size=[64, 64]):
        super().__init__()
        out_channels = 1
        n_layers = int(np.log2(image_size[0])) - 1

        self.u_dim = u_dim

        # Temporal encoding buffers
        if n_timesteps > 1:
            t = torch.linspace(-1, 1, n_timesteps)
            # Add as constant, with extra dims for N and C
            self.register_buffer('t_grid', t)

        # Set state dim with config, depending on how many time-steps we want to take into account
        self.image_size = image_size
        self.n_timesteps = n_timesteps
        self.state_dim = feat_dim
        self.I_factor = I_factor
        self.psteps = psteps
        self.g_dim = g_dim
        self.n_objects = n_objects

        self.softmax = nn.Softmax(dim=-1)
        self.sigmoid = nn.Sigmoid()

        # self.linear_g = nn.Linear(g_dim, g_dim)

        # self.initial_conditions = nn.Sequential(nn.Linear(feat_dim * n_timesteps * 2, feat_dim * n_timesteps),
        #                                         nn.ReLU(),
        #                                         nn.Linear(feat_dim * n_timesteps, g_dim * 2))

        feat_dyn_dim = feat_dim // 6
        self.feat_dyn_dim = feat_dyn_dim
        self.content = None
        self.reverse = False
        self.hankel = True
        self.ini_alpha = 1
        self.incr_alpha = 0.5

        # self.linear_u = nn.Linear(g_dim + u_dim, u_dim * 2)
        # self.linear_u_2_f = nn.Linear(feat_dyn_dim * n_timesteps, u_dim * 2)
        # self.linear_u_T_f = nn.Linear(feat_dyn_dim * n_timesteps, u_dim)

        if self.hankel:
            # self.linear_u_2_g = nn.Linear(g_dim * self.n_timesteps, u_dim * 2)
            # self.linear_u_2_g = nn.Sequential(nn.Linear(g_dim * self.n_timesteps, g_dim),
            #                                    nn.ReLU(),
            #                                    nn.Linear(g_dim, u_dim * 2))
            # self.linear_u_all_g = nn.Sequential(nn.Linear(g_dim * self.n_timesteps, g_dim),
            #                                   nn.ReLU(),
            #                                   nn.Linear(g_dim, u_dim + 1))
            # self.gru_u_all_g = nn.GRU(g_dim, u_dim + 1, num_layers = 2, batch_first=True)
            self.linear_u_1_g = nn.Sequential(
                nn.Linear(g_dim * self.n_timesteps, g_dim), nn.ReLU(),
                nn.Linear(g_dim, u_dim))
        else:
            self.linear_u_2_g = nn.Linear(g_dim, u_dim * 2)

        self.linear_f_mu = nn.Linear(feat_dim * 2, feat_dim)
        self.linear_f_logvar = nn.Linear(feat_dim * 2, feat_dim)

        self.image_encoder = ImageEncoder(
            in_channels, feat_dim * 2, n_objects, ngf,
            n_layers)  # feat_dim * 2 if sample here
        self.image_decoder = ImageDecoder(feat_dim, out_channels, ngf,
                                          n_layers)
        self.koopman = KoopmanOperators(feat_dyn_dim, nf_particle, nf_effect,
                                        g_dim, u_dim, n_timesteps, n_blocks)