コード例 #1
0
ファイル: encoder.py プロジェクト: MingyuKim87/MLwM
    def forward(self, inputs):
        '''
            Args:
                input : imamges (num_tasks, n_way, k_shot, num_channel, img_size, img_size)

            Return:
                output : encoded_img, kl_loss
        '''
        # Input shape
        num_task, n_way, k_shot, num_channel, img_size, img_size = inputs.shape

        # Reshaping (num_task, num_points) --> (num_task * num_points)
        inputs = inputs.view(
            -1, num_channel, img_size,
            img_size)  #(num_task * n_way * k_shot, img_size, img_size)

        # Rename
        hidden = inputs

        # Conv Embedding (mu) for Meta learning
        for i, layer in enumerate(self._layers):
            if isinstance(layer, nn.Conv2d):
                hidden = F.relu(layer(hidden))
            else:
                hidden = layer(hidden)

        # Flatten (num_task * n_way * k_shot, num_channel, imgsize, imgsize) --> (num_task, n_way, k_shot, reminging_dim)
        hidden = hidden.view(num_task, n_way, k_shot, -1)

        hidden_mu = self._last_layer_mu(
            hidden)  #(num_task, n_way, k_shot, output_dim)
        hidden_rho = self._last_layer_rho(
            hidden)  #(num_task * num_points, output_dim)

        # Distribution and sampling
        dist_hidden = torch.distributions.Normal(
            hidden_mu, torch.log1p(
                torch.exp(hidden_rho)))  #(num_task * num_points, output_dim)
        dist_prior = torch.distributions.Normal(0, 1)

        # KL_loss
        kl_loss = torch.distributions.kl.kl_divergence(dist_hidden,
                                                       dist_prior).sum()

        hidden_reparameterization = dist_hidden.rsample(
        )  #(num_task * num_points, output_dim)

        # Shape of img_size = sqrt(output_dim)
        output_dim = hidden_reparameterization.size(-1)
        img_size = int(math.sqrt(output_dim))

        # Reshape hidden to a image type
        if self.is_image_feature:
            hidden_reparameterization = hidden_reparameterization.view(
                num_task, n_way, k_shot, 1, img_size, -1)

        return hidden_reparameterization, kl_loss
コード例 #2
0
ファイル: encoder.py プロジェクト: MingyuKim87/MLwM
    def forward(self, inputs):
        '''
            Args:
                input : imamges (num_tasks, num_points (way * shot), img_size, img_size)

            Return:
                output : 
        '''
        # Input shape
        num_task, n_way, k_shot, num_channel, img_size, img_size = inputs.shape

        # Reshaping (num_task, n_way, k_shot) --> (num_task * n_way * k_shot)
        inputs = inputs.view(-1, num_channel, img_size, img_size)

        # Rename
        hidden = inputs

        # kl_loss
        kl_loss = 0

        # Stochastic Conv Embedding for Meta learning
        for i, layer in enumerate(self._layers):
            if hasattr(layer, 'kl_loss'):
                hidden = F.relu(layer(hidden))
                kl_loss += layer.kl_loss()
            else:
                hidden = layer(hidden)

        # Last layer (FC)
        # Flatten (num_task * n_way * k_shot ) --> (num_task, n_way, k_shot)
        hidden = hidden.view(num_task, n_way, k_shot,
                             -1)  #(num_task, n_way, k_shot, output_dim)
        hidden = self._last_layer(hidden)  #(num_task * num_points, output_dim)

        # KL loss
        kl_loss += self._last_layer.kl_loss()

        # Shape of img_size = sqrt(output_dim)
        output_dim = hidden.size(-1)
        img_size = int(math.sqrt(output_dim))

        # Reshape hidden to a image type
        if self.is_image_feature:
            hidden = hidden.view(num_task, n_way, k_shot, 1, img_size, -1)
        else:
            hidden = hidden.view(num_task, n_way, k_shot, -1)

        return hidden, kl_loss
コード例 #3
0
ファイル: encoder.py プロジェクト: MingyuKim87/MLwM
    def forward(self, inputs):
        '''
            Args:
                input : imamges (num_tasks, n_way, k_shot, feature_dim)

            Return:
                output : 
        '''
        # Input shape
        num_task, n_way, k_shot, feature_dim = inputs.shape

        # Reshaping (num_task, num_points) --> (num_task * num_points)
        inputs = inputs.view(
            -1, feature_dim)  #(num_task * n_way * k_shot, img_size, img_size)

        # Rename
        hidden = inputs

        # kl_loss
        kl_loss = 0

        # Conv Embedding (mu) for Meta learning
        for i, layer in enumerate(self._layers):
            hidden = F.relu(layer(hidden))
            kl_loss += layer.kl_loss()

        # Shape of img_size = sqrt(output_dim)
        output_dim = hidden.size(-1)
        img_size = int(math.sqrt(output_dim))

        # Reshape hidden to a image type
        hidden = hidden.view(num_task, n_way, k_shot, 1, img_size, -1)

        return hidden, kl_loss
コード例 #4
0
ファイル: encoder.py プロジェクト: MingyuKim87/MLwM
    def forward(self, inputs):
        '''
            Args:
                input : imamges (num_tasks, n_way, k_shot, feature_dim)

            Return:
                output : 
        '''
        # Input shape
        num_task, n_way, k_shot, feature_dim = inputs.shape

        # Reshaping (num_task, num_points) --> (num_task * num_points)
        inputs = inputs.view(
            -1, feature_dim)  #(num_task * n_way * k_shot, img_size, img_size)

        # Rename
        hidden = inputs

        # Conv Embedding (mu) for Meta learning
        for i, layer in enumerate(self._layers):
            hidden = F.relu(layer(hidden))

        # Last layer (FC)
        # Flatten (num_task * n_way * k_shot ) --> (num_task, n_way, k_shot)
        hidden = hidden.view(num_task, n_way, k_shot,
                             -1)  #(num_task, n_way, k_shot, output_dim)
        hidden = self._last_layer(hidden)  #(num_task * num_points, output_dim)

        # Shape of img_size = sqrt(output_dim)
        output_dim = hidden.size(-1)

        # KL_loss = None
        kl_loss = None

        return hidden, kl_loss
コード例 #5
0
ファイル: encoder.py プロジェクト: MingyuKim87/MLwM
    def forward(self, inputs):
        '''
            Args:
                input : imamges (num_tasks, n_way, k_shot, filter_size img_size, img_size)

            Return:
                output : 
        '''
        # KL_loss = None (Initialize)
        kl_loss = None

        # Input shape
        num_task, n_way, k_shot, num_channel, img_size, img_size = inputs.shape

        # Reshaping (num_task, num_points) --> (num_task * num_points)
        inputs = inputs.view(
            -1, num_channel, img_size,
            img_size)  #(num_task * n_way * k_shot, img_size, img_size)

        # Rename
        hidden = inputs

        # Conv Embedding (mu) for Meta learning
        for i, layer in enumerate(self._layers):
            if isinstance(layer, nn.Conv2d):
                hidden = F.relu(layer(hidden))
            else:
                hidden = layer(hidden)

        # Last layer (FC)
        # Flatten (num_task * n_way * k_shot ) --> (num_task, n_way, k_shot)
        hidden = hidden.view(num_task, n_way, k_shot,
                             -1)  #(num_task, n_way, k_shot, output_dim)
        hidden = self._last_layer(hidden)  #(num_task * num_points, output_dim)

        # Shape of img_size = sqrt(output_dim)
        output_dim = hidden.size(-1)
        img_size = int(math.sqrt(output_dim))

        # Reshape hidden to a image type
        if self.is_image_feature:
            hidden = hidden.view(num_task, n_way, k_shot, 1, img_size, -1)

        return hidden, kl_loss