Пример #1
0
    def forward(self, x, tc):
    #def forward(self, x):

        tc = F.dropout3d(self.bn_0tc(F.relu(self.c0tc(tc))), p=0.1)
        x = F.dropout3d(self.bn_0(F.relu(self.c0(x))), p=0.1)
        
        x = F.dropout3d(self.bn_02(F.relu(self.c02(x))), p=0.2)
        x = x + tc
        x = F.dropout3d(self.bn_1(F.relu(self.conv_1(x))), p=0.2)
        
        """tc = F.dropout3d(self.bn_0tc(F.relu(self.c0tc(tc))), p=0.1)
        tc = F.dropout3d(self.bn_02tc(F.relu(self.c02tc(tc))), p=0.2)
        tc = F.dropout3d(self.bn_1tc(F.relu(self.conv_1tc(tc))), p=0.2)"""
        

        s1 = x
        x = F.dropout3d(self.bn_2(F.relu(self.conv_2(x))), p=0.3)
        s2 = x
        x = F.dropout3d(self.bn_3(F.relu(self.conv_3(x))), p=0.3)
        s3 = x
        #x = F.dropout2d(self.bn_4(F.relu(self.conv_4(x))), p=0.3)
        
        s3 = F.dropout3d(self.bn_5(F.relu(self.upsample_1(s3))), p=0.3)
        s3 = s3 + s2
        s3 = F.dropout3d(self.bn_6(F.relu(self.upsample_2(s3))), p=0.3)
        s3 = s3 + s1
        s3 = F.dropout3d(self.bn_7(F.relu(self.upsample_3(s3))), p=0.3)
        s3 = F.dropout3d(self.bn_8(F.sigmoid(self.upsample_4(s3))), p=0.2)
        return s3
Пример #2
0
    def forward(self, input):
        n, c, h, w, d = input.shape
        x_volumes = x2d_to_volumes(input)
        out_2d, fea_2d = self.denseunet_2d(x_volumes)
        out_3d, fea_3d = dim_tran(out_2d) * 250, dim_tran(fea_2d)
        x_3d = torch.cat((input, out_3d), 1)

        x_3d = F.relu(self.rn(x_3d))
        x_3d = self.up1(x_3d, self.sfs[3].features)
        x_3d = self.up2(x_3d, self.sfs[2].features)
        x_3d = self.up3(x_3d, self.sfs[1].features)
        x_3d = self.up4(x_3d, self.sfs[0].features)

        x_out = F.upsample(x_3d, size=(h, w, d), mode='trilinear')
        x_out = self.conv1(x_out)
        x_out = F.dropout3d(x_out, p=0.3)
        x_out = F.relu(self.bn1(x_out))

        x_out = x_out + fea_3d
        x_out = self.conv2(x_out)
        x_out_dropout = F.dropout3d(x_out, p=0.1)
        x_out_bn = F.relu(self.bn2(x_out_dropout))
        final_result = self.conv3(x_out_bn)

        return final_result
Пример #3
0
    def forward(self, x, y):
        if self.drop_out:
            x = F.dropout3d(x)
        x = F.relu(self.up_bn(self.up_conv(x)))
        y = F.dropout3d(y)
        x = torch.cat([x, y], dim=1)
        a = x
        for i in self.layer:
            x = i(x)
        x = F.relu(torch.add(x, a))

        return x
    def forward(self, x):
        #         print(self.training)
        out = F.dropout3d(self.bn1(F.leaky_relu(self.conv1(x))),
                          p=0.4,
                          training=self.training)
        out = F.dropout3d(self.bn2(F.leaky_relu(self.conv2(out))),
                          p=0.3,
                          training=self.training)
        out = F.dropout3d(self.bn3(F.leaky_relu(self.conv3(out))),
                          p=0.3,
                          training=self.training)
        out = F.dropout3d(self.bn4(F.leaky_relu(self.conv4(out))),
                          p=0.3,
                          training=self.training)
        out = F.dropout3d(self.bn5(F.leaky_relu(self.conv5(out))),
                          p=0.3,
                          training=self.training)

        out = F.dropout3d(self.bn6(F.leaky_relu(self.conv6(out))),
                          p=0.3,
                          training=self.training)
        out = F.dropout3d(self.bn7(F.leaky_relu(self.conv7(out))),
                          p=0.3,
                          training=self.training)
        out = F.dropout3d(self.bn8(F.leaky_relu(self.conv8(out))),
                          p=0.3,
                          training=self.training)
        out = F.sigmoid(self.conv9(out))

        return out
Пример #5
0
    def decode(self, input_s, down_inputs):
        for d, i in zip(self.up, down_inputs[::-1]):
            d.to(self.device)
            # Remember that pooling is optional
            if self.pooling:
                input_s = F.dropout3d(
                    d(
                        torch.cat(
                            (F.interpolate(input_s, size=i.size()[2:]), i),
                            dim=1)), self.dropout, self.training)
            else:
                input_s = F.dropout3d(d(torch.cat((input_s, i), dim=1)),
                                      self.dropout, self.training)

        return input_s
Пример #6
0
 def forward(self, input):
     out = self.conv1(self.relu1(self.batch_norm1(input)))
     out = self.conv2(self.relu2(self.batch_norm2(out)))
     if self.dropout > 0:
         out = F.dropout3d(out, p=self.dropout, training=self.training)
     out = torch.cat([input, out], dim=1)
     return out
Пример #7
0
 def forward(self, x):
     new_feature = super(DenseLayer, self).forward(x)
     if (self.drop_rate > 0):
         new_feature = F.dropout3d(new_feature,
                                   p=self.drop_rate,
                                   training=self.training)
     return torch.cat([x, new_feature], 1)  #每一层denselayer的输出为该层的输入和输出的堆叠
Пример #8
0
    def forward(self, inputs):
        if self.training and self.p > 0.:
            if self.method == "uniform":
                # For step training when p decay to mini value.
                if self.start_p > self.p:
                    self.start_p = self.p
                self.init_value.uniform_(self.start_p, self.p)
            else:
                # Only take (0, self.p) of the gaussian curve.
                self.init_value = self.init_value.normal_(
                    self.mean, self.std).clamp(min=0., max=self.p)

            if self.dim == 1:
                outputs = F.dropout(inputs,
                                    self.init_value,
                                    inplace=self.inplace)
            elif self.dim == 2:
                outputs = F.dropout2d(inputs,
                                      self.init_value,
                                      inplace=self.inplace)
            else:
                outputs = F.dropout3d(inputs,
                                      self.init_value,
                                      inplace=self.inplace)
            return outputs
        else:
            return inputs
Пример #9
0
    def forward(self, x):

        x = self.conv1(x)
        x = self.bn_op_1(x)
        x = F.leaky_relu(x)
        if self.dropout is True:
            x = F.dropout3d(x, p=0.3)

        x = self.conv2(x)
        x = self.bn_op_2(x)
        x = F.leaky_relu(x)

        if self.dropout is True:
            x = F.dropout3d(x, p=0.3)

        return x
Пример #10
0
    def forward(self, input):
        feature = super(DenseASPPBlock, self).forward(input)

        if self.drop_rate > 0:
            feature = F.dropout3d(feature, p=self.drop_rate, training=self.training)

        return feature
Пример #11
0
    def encode(self, input_s):
        # We need to keep track of the convolutional outputs, for the skip
        # connections.
        down_inputs = []
        for c in self.down:
            c.to(self.device)
            input_s = F.dropout3d(c(input_s), self.dropout, self.training)
            down_inputs.append(input_s)
            # Remember that pooling is optional
            if self.pooling:
                input_s = F.max_pool3d(input_s, 2)

        self.u.to(self.device)
        input_s = F.dropout3d(self.u(input_s), self.dropout, self.training)

        return down_inputs, input_s
Пример #12
0
    def forward(self, im, features):
        for c, p in zip(self.base_model.convlist, self.base_model.pooling):
            c.to(self.device)
            p.to(self.device)
            im = p(c(im))

        self.base_model.dropout = self.dropout
        im = self.base_model.midconv(im)
        drop = F.dropout3d(im, p=self.dropout, training=self.drop)

        self.global_pooling.to(self.device)
        x = self.global_pooling(drop).view(im.shape[:2])

        for l in self.linear:
            l.to(self.device)
            x = l(x)
            x = F.dropout(x, p=self.dropout, training=self.drop)

        x = torch.cat((x,features.type_as(x)), dim=1)
        self.out.to(self.device)
        output = self.out(x)
        if self.dropout <= 0.5:
            output = F.relu(output)
        else:
            output = F.leaky_relu(output)
        return output
Пример #13
0
 def _add_noise(self, x: torch.Tensor) -> torch.Tensor:
     if self.dropout_p > 0:
         x = F.dropout3d(x, self.dropout_p, training=self.enable_dropout) if self.is_3d else \
             F.dropout2d(x, self.dropout_p, training=self.enable_dropout)
     if self.noise_lvl > 0:
         x.add_(torch.randn_like(x.detach()) * self.noise_lvl)
     return x
Пример #14
0
 def _add_noise(self, x: torch.Tensor) -> torch.Tensor:
     if self.dropout_prob > 0:
         x = F.dropout3d(x, self.dropout_prob, training=self.enable_dropout, inplace=self.inplace) if self.dim == 3 else \
             F.dropout2d(x, self.dropout_prob, training=self.enable_dropout, inplace=self.inplace) if self.dim == 2 else \
             F.dropout(x, self.dropout_prob, training=self.enable_dropout, inplace=self.inplace)
     if self.noise_lvl > 0:
         x = x + (torch.randn_like(x.detach()) * self.noise_lvl)
     return x
Пример #15
0
 def forward(self, x):
     if not self.first:
         x = self.maxpool(x)
     x = self.bn1(self.conv1(x))
     y = self.relu(self.bn2(self.conv2(x)))
     if self.dropout > 0:
         y = F.dropout3d(y, self.dropout)
     y = self.bn3(self.conv3(x))
     return self.relu(x + y)
Пример #16
0
 def forward(self, x):
     out = self.conv(x)
     if self.drop_rate > 0:
         out = F.dropout3d(out,
                           self.drop_rate,
                           training=self.training,
                           inplace=True)
     out = self.nonlin(self.norm(out))
     return torch.cat([x, out], dim=1)
Пример #17
0
 def forward(self, *prev_features):
     bn_function = _bn_function_factory(self.norm1, self.relu1, self.conv1)
     if self.efficient and any(prev_feature.requires_grad for prev_feature in prev_features):
         bottleneck_output = cp.checkpoint(bn_function, *prev_features)
     else:
         bottleneck_output = bn_function(*prev_features)
     new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))
     if self.drop_rate > 0:
         new_features = F.dropout3d(new_features, p=self.drop_rate, training=self.training)
     return new_features
Пример #18
0
    def forward(self, x, device):
        # print('1. ', x.shape)
        h = self.conv1(x, device)
        # print('2. ', h.shape)
        h = relu(h)
        h = self.pool1(h)
        # print('3. ', h.shape)

        h = self.conv2(h, device)
        # print('4. ', h.shape)
        h = relu(h)
        h = self.pool2(h)
        # print('5. ', h.shape)

        h = self.conv3(h, device)
        # print('6. ', h.shape)
        h = relu(h)
        h = self.conv4(h, device)
        # print('7. ', h.shape)
        h = relu(h)
        h = self.conv5(h, device)
        # print('8. ', h.shape)
        h = relu(h)
        h = self.pool3(h)
        # print('9. ', h.shape)

        h = self.pool4(h)
        # print('10. ', h.shape)
        _shape = h.shape
        h = h.view(-1, _shape[1] * _shape[2] * _shape[3] * _shape[4])
        # print('11. ', h.shape)

        h = dropout3d(h, p=0.2)
        h = self.fc1(h)
        # print('12. ', h.shape)
        h = relu(h)
        h = dropout3d(h, p=0.2)
        h = self.fc2(h)
        # print('13. ', h.shape)
        h = relu(h)
        y = self.fc3(h)
        # print('14. ', h.shape)
        return y
Пример #19
0
    def forward(self, x):
        a = F.relu(self.down_bn(self.down_conv(x)))
        if self.drop_out:
            x = F.dropout3d(a)
        else:
            x = a
        for i in self.layer:
            x = i(x)
        x = F.relu(torch.add(x, a))

        return x
    def forward(self, x):

        x = F.dropout3d(self.bn_0(F.relu(self.c0(x))), p=0.1)
        x = F.dropout3d(self.bn_01(F.relu(self.c01(x))), p=0.2)
        x = F.dropout3d(self.bn_02(F.relu(self.c02(x))), p=0.2)
        x = F.dropout3d(self.bn_1(F.relu(self.conv_1(x))), p=0.2)
        s1 = x
        x = F.dropout3d(self.bn_2(F.relu(self.conv_2(x))), p=0.3)
        s2 = x
        x = F.dropout3d(self.bn_3(F.relu(self.conv_3(x))), p=0.3)
        s3 = x
        #x = F.dropout2d(self.bn_4(F.relu(self.conv_4(x))), p=0.3)

        s3 = F.dropout3d(self.bn_5(F.relu(self.upsample_1(s3))), p=0.3)
        s3 = s3 + s2
        s3 = F.dropout3d(self.bn_6(F.relu(self.upsample_2(s3))), p=0.3)
        s3 = s3 + s1
        s3 = F.dropout3d(self.bn_7(F.relu(self.upsample_3(s3))), p=0.3)
        s3 = F.dropout3d(self.bn_8((self.upsample_4(s3))), p=0.2)
        return s3
Пример #21
0
 def forward(self):
     a = torch.randn(8, 4)
     b = torch.randn(8, 4, 4, 4)
     c = torch.randn(8, 4, 4, 4, 4)
     return len(
         F.dropout(a),
         F.dropout2d(b),
         F.dropout3d(c),
         F.alpha_dropout(a),
         F.feature_alpha_dropout(c),
     )
Пример #22
0
    def forward(self, x):
        down_list = []
        for c, p in zip(self.convlist, self.pooling):
            c.to(self.device)
            down = c(x)
            drop = F.dropout3d(down, p=self.dropout, training=self.drop)
            down_list.append(drop)
            p.to(self.device)
            x = p(drop)

        x = self.midconv(x)
        x = F.dropout3d(x, p=self.dropout, training=self.drop)

        for d, prev in zip(self.deconvlist, down_list[::-1]):
            interp = F.interpolate(x, size=prev.shape[2:])
            d.to(self.device)
            x = d(torch.cat((prev, interp), dim=1))
            x = F.dropout3d(x, p=self.dropout, training=self.drop)

        self.out.to(self.device)
        output = self.out(x)
        return output
Пример #23
0
 def forward(self, x):
     # print(x.shape)
     x = x.permute(0, 1, 4, 2, 3)
     if not self.first:
         x = self.maxpool(x.squeeze(0))
     x = x.unsqueeze(0)
     x = x.permute(0, 1, 3, 4, 2)
     x = self.bn1(self.conv1(x))
     y = self.relu(self.bn2(self.conv2(x)))
     if self.dropout > 0:
         y = F.dropout3d(y, self.dropout)
     y = self.bn3(self.conv3(x))
     return self.relu(x + y)
Пример #24
0
    def forward(self, x):

        #print("Down---forward", self.meta_loss, self.meta_step_size, self.stop_gradient)
        if not self.first:
            x = maxpool3D(x, kernel_size=2)

        #layer 1 conv, bn
        #print(">>>" ,x, self.conv1.weight, self.conv1.bias)
        x = conv3d(x,
                   self.conv1.weight,
                   self.conv1.bias,
                   meta_loss=self.meta_loss,
                   meta_step_size=self.meta_step_size,
                   stop_gradient=self.stop_gradient)
        x = self.bn1(x)

        #layer 2 conv, bn, relu
        y = conv3d(x,
                   self.conv2.weight,
                   self.conv2.bias,
                   meta_loss=self.meta_loss,
                   meta_step_size=self.meta_step_size,
                   stop_gradient=self.stop_gradient)
        y = self.bn2(y)
        y = relu(y)

        #droupout if required
        if self.dropout > 0:
            y = F.dropout3d(y, self.dropout)

        #layer 3 conv, bn
        z = conv3d(y,
                   self.conv3.weight,
                   self.conv3.bias,
                   meta_loss=self.meta_loss,
                   meta_step_size=self.meta_step_size,
                   stop_gradient=self.stop_gradient)
        z = self.bn3(z)
        z = relu(z)  #was not there

        #final relu
        #k = relu(x + z)

        return z
    def forward(self, x):
        #         print(self.training)
        out1 = F.dropout3d(self.bn1(F.leaky_relu(self.conv1(x))),
                           p=0.4,
                           training=self.training)
        #         print('out1 ', out1.shape)
        out2 = F.dropout3d(self.bn2(F.leaky_relu(self.conv2(out1))),
                           p=0.3,
                           training=self.training)
        #         print('out2 ', out2.shape)
        out3 = F.dropout3d(self.bn3(F.leaky_relu(self.conv3(out2))),
                           p=0.3,
                           training=self.training)
        #         print('out3 ', out3.shape)
        out4 = F.dropout3d(self.bn4(F.leaky_relu(self.conv4(out3))),
                           p=0.3,
                           training=self.training)
        #         print('out4 ', out4.shape)
        out5 = F.dropout3d(self.bn5(F.leaky_relu(self.conv5(out4))),
                           p=0.3,
                           training=self.training)
        #         print('out5 ', out5.shape)
        out6 = F.dropout3d(self.bn6(F.leaky_relu(self.conv6(out5))),
                           p=0.3,
                           training=self.training)
        #         print('out6 ', out6.shape)
        out6 = torch.cat([out4, out6], dim=1)
        out7 = F.dropout3d(self.bn7(F.leaky_relu(self.conv7(out6))),
                           p=0.3,
                           training=self.training)
        out7 = torch.cat([out3, out7], dim=1)
        #         print('out7 ', out7.shape)
        out8 = F.dropout3d(self.bn8(F.leaky_relu(self.conv8(out7))),
                           p=0.3,
                           training=self.training)
        out8 = torch.cat([out2, out8], dim=1)
        #         print('out8 ', out8.shape )
        out = F.sigmoid(self.conv9(out8))

        return out
    def forward(self, x):
        #         print(self.training)
        out = F.dropout3d(self.bn1(F.leaky_relu(self.conv1(x))),
                          p=0.4,
                          training=self.training)
        out = F.dropout3d(self.bn2(F.leaky_relu(self.conv2(out))),
                          p=0.3,
                          training=self.training)
        out = F.dropout3d(self.bn3(F.leaky_relu(self.conv3(out))),
                          p=0.3,
                          training=self.training)
        out = F.dropout3d(self.bn4(F.leaky_relu(self.conv4(out))),
                          p=0.3,
                          training=self.training)
        out = F.dropout3d(self.bn5(F.leaky_relu(self.conv5(out))),
                          p=0.3,
                          training=self.training)

        #         out2 = self.stn(out2)
        #         out2 = F.dropout3d(self.m_bn6(F.leaky_relu(self.m_conv6(out))), p=0.3, training=self.training)
        #         out2 = F.dropout3d(self.m_bn7(F.leaky_relu(self.m_conv7(out2))), p=0.3, training=self.training)
        #         out2 = F.dropout3d(self.m_bn8(F.leaky_relu(self.m_conv8(out2))), p=0.3, training=self.training)
        #         out_rois = F.sigmoid(self.conv9(out2))

        out = F.dropout3d(self.bn6(F.leaky_relu(self.conv6(out))),
                          p=0.3,
                          training=self.training)
        out = F.dropout3d(self.bn7(F.leaky_relu(self.conv7(out))),
                          p=0.3,
                          training=self.training)
        out = F.dropout3d(self.bn8(F.leaky_relu(self.conv8(out))),
                          p=0.3,
                          training=self.training)
        out_frames = F.sigmoid(self.conv9(out))

        print('out_frames.shape', out_frames.shape)
        out_rois = self.stn(out_frames)

        return out_frames, out_rois
Пример #27
0
    def forward(self, x, meta_loss, meta_step_size, stop_gradient):
        if not self.first:
            x = maxpool3D(x, kernel_size=2)

        #layer 1 conv, bn
        x = conv3d(x,
                   self.conv1.weight,
                   self.conv1.bias,
                   meta_loss=meta_loss,
                   meta_step_size=meta_step_size,
                   stop_gradient=stop_gradient)
        x = self.bn1(x)

        #layer 2 conv, bn, relu
        y = conv3d(x,
                   self.conv2.weight,
                   self.conv2.bias,
                   meta_loss=meta_loss,
                   meta_step_size=meta_step_size,
                   stop_gradient=stop_gradient)
        y = self.bn2(y)
        y = relu(y)

        #droupout if required
        if self.dropout > 0:
            y = F.dropout3d(y, self.dropout)

        #layer 3 conv, bn
        z = conv3d(y,
                   self.conv3.weight,
                   self.conv3.bias,
                   meta_loss=meta_loss,
                   meta_step_size=meta_step_size,
                   stop_gradient=stop_gradient)
        z = self.bn3(z)
        z = relu(z)  #was not there

        return z
Пример #28
0
 def test_dropout3d(self):
     inp = torch.randn(16, 8, 32, 64, 64, device='cuda', dtype=self.dtype)
     output = F.dropout3d(inp, p=0.5, training=True, inplace=False)
Пример #29
0
 def forward(self, input):
     inputShape = input.shape
     return F.dropout3d(
         input.reshape((inputShape[0], -1, 1, 1, inputShape[-1])), self.p,
         self.training, self.inplace).reshape(inputShape)
Пример #30
0
 def forward(self, x):
     result = f.dropout3d(
         x.reshape((x.shape[0], x.shape[1] * x.shape[2], x.shape[3], 1, 1,
                    x.shape[4])), self.p, self.training, self.inplace)
     return result.reshape(
         (result.shape[0], x.shape[1], x.shape[2], x.shape[3], x.shape[4]))