예제 #1
0
    def forward(self, x):
        # encoder
        # L1
        x = self.conv_1(x)
        size_1 = x.size()
        x, indices_1 = F.max_pool1d_with_indices(x, kernel_size=2)
        # L2
        x = self.conv_2(x)
        size_2 = x.size()
        x, indices_2 = F.max_pool1d_with_indices(x, kernel_size=2)
        # L3
        x = self.conv_3(x)
        size_3 = x.size()
        x, indices_3 = F.max_pool1d_with_indices(x, kernel_size=2)

        # decoder
        # L1
        x = F.max_unpool1d(x,
                           kernel_size=2,
                           indices=indices_3,
                           output_size=list(size_3))
        x = self.deconv_1(x)
        # L2
        x = F.max_unpool1d(x,
                           kernel_size=2,
                           indices=indices_2,
                           output_size=list(size_2))
        x = self.deconv_2(x)
        # L3
        x = F.max_unpool1d(x,
                           kernel_size=2,
                           indices=indices_1,
                           output_size=list(size_1))
        x = self.deconv_3(x)
        return x
예제 #2
0
    def forward(self, input, size_history, indices_history, kernel_history):

        output = F.max_unpool1d(input,
                                indices_history.pop(),
                                kernel_history.pop(),
                                output_size=size_history.pop())
        output = self.outer_cnn(output)
        output = F.max_unpool1d(output,
                                indices_history.pop(),
                                kernel_history.pop(),
                                output_size=size_history.pop())
        output = self.inner_cnn(output).permute(0, 2, 1)

        output = self.lin(output)

        return output
    def forward(self, x):
        x_len = x.size(0)
        #variational enc part
        x = self.conv1(x)
        x = F.relu(x)
        x = self.batchnorm1(x)
        x = F.dropout(x, p=self.dropout)
        size1 = x.size()
        kernel_size = x.size()[2]
        x, idx1 = F.max_pool1d(x,
                               stride=1,
                               kernel_size=kernel_size,
                               return_indices=True)
        x = x.view(x_len, -1)
        mu = self.FC_mu(x)
        log_sigma = self.FC_log_sigma(x)
        x = self._sample(mu, log_sigma)
        hidden = x
        #dec part
        x = x.unsqueeze(2)
        x = F.max_unpool1d(x,
                           idx1,
                           stride=1,
                           kernel_size=kernel_size,
                           output_size=size1)
        x = self.batchnorm2(x)
        x = F.relu(x)
        x = self.deconv1(x)

        #class part
        y = F.dropout(hidden, p=self.dropout)
        y = self.fcn(y)
        return x, y, hidden
예제 #4
0
 def forward(self, x, indices):
     """calls MaxUnpool1d using the indices returned previously by HealpixMaxPool
     Args:
         tuple(x (:obj:`torch.tensor`) : [B x Fin x V]
         indices (int)): indices of pixels equiangular maxpooled previously
     Returns:
         [:obj:`torch.tensor`] -- [B x Fin x V_unpool]
     """
     x = F.max_unpool1d(x, indices, self.kernel_size)  # B x Fin x V_unpool
     return x
예제 #5
0
 def backward_pass(m, tensor, weight):
     with torch.no_grad():
         inverted = F.max_unpool1d(tensor,
                                   m.indices,
                                   m.kernel_size,
                                   m.stride,
                                   m.padding,
                                   output_size=m.in_shape)
         del layer_instance.indices, layer_instance.weights
     return inverted.detach()
예제 #6
0
    def forward(self, inputs, size_history, indices_history, embedded_size):

        combined = torch.zeros(embedded_size, device=self.device)
        for input, cnn, size, indices in zip(inputs, self.cnn_blocks,
                                             size_history, indices_history):
            output = F.max_unpool1d(input, indices, size[-1], output_size=size)
            output = cnn(output)
            combined += output

        output = self.lin(combined.permute(0, 2, 1))

        return output
예제 #7
0
 def test_max_unpool1d(self):
     inp = torch.randn(1, 16, 32, device='cuda', dtype=self.dtype)
     output, indices = F.max_pool1d(inp,
                                    kernel_size=5,
                                    stride=2,
                                    padding=2,
                                    return_indices=True,
                                    ceil_mode=True)
     output = F.max_unpool1d(output,
                             indices,
                             kernel_size=2,
                             stride=2,
                             padding=2)
예제 #8
0
    def forward(self, input, size_history, indices_history, kernel_history):

        output = input
        for cnn in self.cnn_blocks:
            output = F.max_unpool1d(output,
                                    indices_history.pop(),
                                    kernel_history.pop(),
                                    output_size=size_history.pop())
            output = cnn(output)

        output = self.lin(output.permute(0, 2, 1))

        return output
    def forward(self, x, indices):
        """calls MaxUnpool1d using the indices returned previously by HealpixMaxPool

        Args:
            tuple(x (:obj:`torch.tensor`) : [batch x pixels x features]
            indices (int)): indices of pixels equiangular maxpooled previously

        Returns:
            [:obj:`torch.tensor`] -- [batch x unpooled pixels x features]
        """
        x = x.permute(0, 2, 1)
        x = F.max_unpool1d(x, indices, self.kernel_size)
        x = x.permute(0, 2, 1)
        return x
예제 #10
0
    def forward(self, x, indices):
        """calls pytorch's unpool1d function to create the values while unpooling based on the nearby values
        Parameters
        ----------
        inputs : torch.tensor of shape batch x pixels x features
            Input data
        indices : list
            Indices where the max value was located in unpooled image
        
        Returns
        -------
        x : torch.tensor of shape batch x unpooled pixels x features
            Layer output
        """

        x = x.permute(0, 2, 1)
        x = F.max_unpool1d(x, indices, self.kernel_size)
        x = x.permute(0, 2, 1)
        return x
예제 #11
0
    def forward(self, input, size_history, indices_history, embedded_size):

        output = self.z_lin(input)

        outputs = []
        current_index = 0
        for _, channels in self.kernels_channels:
            outputs.append(output[:, current_index:current_index +
                                  channels].unsqueeze(2))
            current_index += channels

        combined = torch.zeros(embedded_size, device=self.device)
        for input, cnn, size, indices in zip(outputs, self.cnn_blocks,
                                             size_history, indices_history):
            output = F.max_unpool1d(input, indices, size[-1], output_size=size)
            output = cnn(output)
            combined += output

        output = self.fc(combined.permute(0, 2, 1))

        return output
예제 #12
0
 def forward(self, input, indices, output_size=None) -> Tensor:
     input = self.quant_handle(input)
     return F.max_unpool1d(input, indices, self.kernel_size, self.stride,
                           self.padding, output_size)
예제 #13
0
 def forward(self, x, indices, output_size=None):
     return F.max_unpool1d(x, indices, self.kernel_size, self.strides, self.padding, output_size)