コード例 #1
0
ファイル: model.py プロジェクト: tbenst/DeepSynth_Software
    def forward(self, x):
        x = F.relu(self.bn11(self.conv11(x)))
        x = F.relu(self.bn12(self.conv12(x)))
        size1 = x.size()
        x, ind1 = F.max_pool3d(x, kernel_size=2, stride=2, return_indices=True)
        #		x,ind1 = self.pool1(F.relu(self.bn12(self.conv12(x))))

        x = F.relu(self.bn21(self.conv21(x)))
        x = F.relu(self.bn22(self.conv22(x)))
        size2 = x.size()
        x, ind2 = F.max_pool3d(x, kernel_size=2, stride=2, return_indices=True)
        #		x,ind2 = self.pool2(F.relu(self.bn22(self.conv22(x))))

        x = F.relu(self.bn31(self.conv31(x)))
        x = F.relu(self.bn32(self.conv32(x)))

        x = F.max_unpool3d(x, ind2, kernel_size=2, stride=2, output_size=size2)
        #		x = F.relu(self.bn41(self.conv41(self.unpool2(x,ind2))))
        x = F.relu(self.bn41(self.conv41(x)))
        x = F.relu(self.bn42(self.conv42(x)))

        x = F.max_unpool3d(x, ind1, kernel_size=2, stride=2, output_size=size1)
        #		x = F.relu(self.bn51(self.conv51(self.unpool1(x,ind1))))
        x = F.relu(self.bn51(self.conv51(x)))
        x = F.tanh(self.bn52(self.conv52(x)))

        return x
コード例 #2
0
    def forward(self, x):
        x = self.features(x)
        batch_size = x.size(0)
        x = x.view(x.size(0), 256 * 6 * 6)
        x = self.latentV(x)  # latent vector, size:4096

        # x = x.view(x.size(0),64,4,4,4) # reshape to 4/4/4 cube with 64 channels

        x = x.view(x.size(0), 128, 2, 2, 2)

        x = F.max_unpool3d(x,
                           Variable(
                               torch.Tensor(x.size()).zero_().long().cuda()),
                           kernel_size=2,
                           stride=2)
        deconv1 = nn.ConvTranspose3d(128, 128, 3, padding=1).cuda()
        x = deconv1(x)
        x = F.leaky_relu(x)

        x = F.max_unpool3d(x,
                           Variable(
                               torch.Tensor(x.size()).zero_().long().cuda()),
                           kernel_size=2,
                           stride=2)
        deconv1 = nn.ConvTranspose3d(128, 128, 3, padding=1).cuda()
        x = deconv1(x)
        x = F.leaky_relu(x)

        x = F.max_unpool3d(x,
                           Variable(
                               torch.Tensor(x.size()).zero_().long().cuda()),
                           kernel_size=2,
                           stride=2)
        deconv2 = nn.ConvTranspose3d(128, 64, 3, padding=1).cuda()
        x = deconv2(x)
        x = F.leaky_relu(x)

        x = F.max_unpool3d(x,
                           Variable(
                               torch.Tensor(x.size()).zero_().long().cuda()),
                           kernel_size=2,
                           stride=2)
        deconv3 = nn.ConvTranspose3d(64, 32, 3, padding=1).cuda()
        x = deconv3(x)
        x = F.leaky_relu(x)

        deconv4 = nn.ConvTranspose3d(32, 2, 3, padding=1).cuda()
        x = deconv4(x)

        # x = self.decoding(x) # convert to 3D voxel distribution
        x = x.view(batch_size, 2, 32, 1024)  # 60*2*32*1024  converted to 2d
        return x
コード例 #3
0
    def _suppress_weights(self):
        """Suppresses non-maxima of weights locally.

        Args:
            weights (torch.Tensor): The weights to suppress.
            weights_stride (iterable[ind]): The distance0 between two adjacent
                windows.

        Returns:
            torch.Tensor: The suppressed weights.

        """
        weights = self.sample_weights.weights[None, None, ...]
        pooled, indices = F.max_pool3d(weights,
                                       kernel_size=self.kernel_size,
                                       stride=self.stride,
                                       return_indices=True)
        self._pooled_weights = pooled.squeeze(0).squeeze(0)
        unpooled = F.max_unpool3d(pooled,
                                  indices,
                                  self.kernel_size,
                                  stride=self.stride,
                                  output_size=weights.shape)
        self._sup_weights = unpooled.squeeze(0).squeeze(0)
        sup_weights_flat = self._sup_weights.flatten()
        self._weights_mapping = torch.where(sup_weights_flat > 0)[0]
        self._sup_weights_flat = sup_weights_flat[self._weights_mapping]
コード例 #4
0
 def backward_pass(m, tensor, weight):
     inverted = F.max_unpool3d(tensor,
                               m.indices,
                               m.kernel_size,
                               m.stride,
                               m.padding,
                               output_size=m.in_shape)
     del layer_instance.indices
     return inverted.detach()
コード例 #5
0
 def backward(ctx, grad_output):
     input, output = ctx.saved_tensors
     kernel_size, stride, padding, dilation, ceil_mode, return_indices = ctx.hparams
     # output += explain.epsilon
     norm_grad = grad_output / output
     # following EB special case, zero outputs result in relevance of 0 in grad
     norm_grad[output==0] = 0
     # The gradient of each element of the output is attributed in a winner takes all strategy, to the argmax of the input
     # input.grad = F.max_unpool3d(torch.ones_like(norm_grad), return_indices, kernel_size, stride, padding, input.shape)
     input.grad = F.max_unpool3d(norm_grad,return_indices,kernel_size,stride,padding,input.shape)
     return (input * input.grad), None, None, None, None, None, None
コード例 #6
0
 def test_max_unpool3d(self):
     inp = torch.randn(1, 16, 8, 32, 32, device='cuda', dtype=self.dtype)
     output, indices = F.max_pool3d(inp,
                                    kernel_size=5,
                                    stride=2,
                                    padding=2,
                                    return_indices=True,
                                    ceil_mode=True)
     output = F.max_unpool3d(output,
                             indices,
                             kernel_size=2,
                             stride=2,
                             padding=2)
コード例 #7
0
 def forward(self, input, indices, output_size=None) -> Tensor:
     input = self.quant_handle(input)
     return F.max_unpool3d(input, indices, self.kernel_size, self.stride,
                           self.padding, output_size)
コード例 #8
0
 def forward(self, x, indices, output_size=None):
     return F.max_unpool3d(x, indices, self.kernel_size, self.strides, self.padding, output_size)