Esempio n. 1
0
    def __init__(self):
        super().__init__()

        self.init_block = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, padding=1),
            layers.PreActivation2d(32, 32, kernel_size=3, padding=1),
        )

        # don't need upsampling here because it will be done in `merge_branches`
        upsample = nn.Sequential  # same as nn.Identity, but supported by older versions
        downsample = partial(nn.MaxPool2d, kernel_size=2)

        self.midline_head = layers.InterpolateToInput(
            nn.Sequential(
                downsample(),

                # unet
                layers.FPN(layers.ResBlock2d,
                           downsample=downsample,
                           upsample=upsample,
                           merge=merge_branches,
                           structure=STRUCTURE,
                           kernel_size=3,
                           padding=1),
                # final logits
                layers.PreActivation2d(32, 1, kernel_size=1),
            ),
            mode='bilinear')

        self.limits_head = layers.InterpolateToInput(
            nn.Sequential(
                layers.ResBlock2d(32, 32, kernel_size=3, padding=1),
                downsample(),
                layers.ResBlock2d(32, 32, kernel_size=3, padding=1),

                # 2D feature map to 1D feature map
                nn.AdaptiveMaxPool2d((None, 1)),
                layers.Reshape('0', '1', -1),
                nn.Conv1d(32, 8, kernel_size=3, padding=1),
                nn.ReLU(),
                nn.Conv1d(8, 1, kernel_size=3, padding=1)),
            mode='linear',
            axes=0)
Esempio n. 2
0
def build_encoder(nums_conv, channels, n_features, n_narrow, kernel_sizes,
                  padding, stride, poolings):
    '''
    :param nums_conv: int, number of convolutional blocks
    :param channels: List of ints, the length should be divided by 2. Provides number of channels for each convolution.
    :param n_features: Shape of output after convolutions
    :param n_narrow: Shape to inner representation
    :param kernel_sizes: Size of kernel in convolutions
    :param padding: Padding size in convolutions
    :param stride: Stride size in convolutions
    :param poolings: The list of poolings after blocks
    :return: Encoder part: CNN + fc
    '''
    encoder = nn.Sequential(
        convs(nums_conv,
              channels,
              kernel_sizes, [padding] * nums_conv, [stride] * nums_conv,
              poolings,
              nn.Sequential(),
              conv_module=nn.Conv2d), layers.Reshape('0', -1),
        nn.Linear(n_features, n_narrow))

    return encoder
Esempio n. 3
0
    def __init__(self,
                 n_classes=2,
                 in_channels=3,
                 feature_scale=4,
                 is_batchnorm=True,
                 up_sample_mode='transpose_conv',
                 beta=10):
        super(Ours_CARNet, self).__init__()
        self.up_sample_mode = up_sample_mode
        self.in_channels = in_channels
        self.is_batchnorm = is_batchnorm
        self.feature_scale = feature_scale
        self.beta = beta

        filters = [64, 128, 256, 512, 1024]
        filters = [int(x / self.feature_scale) for x in filters]

        downsample = partial(nn.MaxPool2d, kernel_size=2)
        self.limits_head = layers.InterpolateToInput(
            nn.Sequential(
                layers.ResBlock2d(16, 32, kernel_size=3, padding=1),
                downsample(),
                layers.ResBlock2d(32, 32, kernel_size=3, padding=1),

                # 2D feature map to 1D feature map
                nn.AdaptiveMaxPool2d((None, 1)),
                layers.Reshape('0', '1', -1),
                nn.Conv1d(32, 8, kernel_size=3, padding=1),
                nn.ReLU(),
                nn.Conv1d(8, 1, kernel_size=3, padding=1)),
            mode='linear',
            axes=0)

        # downsampling
        self.conv1 = unetConv2(self.in_channels, filters[0], self.is_batchnorm)
        self.maxpool1 = nn.MaxPool2d(kernel_size=2)

        self.conv2 = unetConv2(filters[0], filters[1], self.is_batchnorm)
        self.maxpool2 = nn.MaxPool2d(kernel_size=2)

        self.conv3 = unetConv2(filters[1], filters[2], self.is_batchnorm)
        self.maxpool3 = nn.MaxPool2d(kernel_size=2)

        self.conv4 = unetConv2(filters[2], filters[3], self.is_batchnorm)
        self.maxpool4 = nn.MaxPool2d(kernel_size=2)

        self.center = unetConv2(filters[3], filters[4], self.is_batchnorm)

        # upsampling
        self.up_concat4 = unetUp(filters[4], filters[3], self.up_sample_mode)
        self.up_concat3 = unetUp(filters[3], filters[2], self.up_sample_mode)
        self.up_concat2 = unetUp(filters[2], filters[1], self.up_sample_mode)
        self.up_concat1 = unetUp(filters[1], filters[0], self.up_sample_mode)

        # final conv (without any concat)
        self.final_1 = nn.Conv2d(filters[0], 1, 1)
        self.sigmoid = nn.Sigmoid()

        # ===== refine net ===== #
        self.laternal_1 = nn.Conv2d(filters[0], filters[0], 1)
        self.laternal_2 = nn.Conv2d(filters[1], filters[0], 1)
        self.laternal_3 = nn.Conv2d(filters[2], filters[0], 1)
        self.laternal_4 = nn.Conv2d(filters[3], filters[0], 1)
        self.laternal_5 = nn.Conv2d(filters[4], filters[0], 1)

        self.refine_conv_1 = SELayer(filters[0])
        self.refine_conv_2 = nn.Sequential(*[
                             unetConv2(filters[0], filters[0], self.is_batchnorm),\
                             SELayer(filters[0])
                             ])
        self.refine_conv_3 = nn.Sequential(*[
                             unetConv2(filters[0], filters[0], self.is_batchnorm),\
                             unetConv2(filters[0], filters[0], self.is_batchnorm),\
                             SELayer(filters[0])
                             ])
        self.refine_conv_4 = nn.Sequential(*[
                             unetConv2(filters[0], filters[0], self.is_batchnorm),\
                             unetConv2(filters[0], filters[0], self.is_batchnorm),\
                             unetConv2(filters[0], filters[0], self.is_batchnorm),\
                             SELayer(filters[0])
                             ])
        self.refine_conv_5 = nn.Sequential(*[
                             unetConv2(filters[0], filters[0], self.is_batchnorm),\
                             unetConv2(filters[0], filters[0], self.is_batchnorm),\
                             unetConv2(filters[0], filters[0], self.is_batchnorm),\
                             unetConv2(filters[0], filters[0], self.is_batchnorm),\
                             SELayer(filters[0])
                             ])

        self.refine_final = unetConv2(filters[0] * 5, filters[0],
                                      self.is_batchnorm)
        self.final_2 = nn.Conv2d(filters[0], 1, 1)

        # ===== regression_based_refine ===== #
        self.downsample_a = nn.Sequential(
            nn.Conv2d(1, 32, 3, stride=2, padding=1), nn.BatchNorm2d(32))
        self.res_blocks_a = BasicBlock(inplanes=1,
                                       planes=32,
                                       stride=2,
                                       downsample=self.downsample_a)
        self.downsample_b = nn.Sequential(
            nn.Conv2d(32, 64, 3, stride=2, padding=1), nn.BatchNorm2d(64))
        self.res_blocks_b = BasicBlock(inplanes=32,
                                       planes=64,
                                       stride=2,
                                       downsample=self.downsample_b)
        self.downsample_c = nn.Sequential(
            nn.Conv2d(64, 128, 3, stride=2, padding=1), nn.BatchNorm2d(128))
        self.res_blocks_c = BasicBlock(inplanes=64,
                                       planes=128,
                                       stride=2,
                                       downsample=self.downsample_c)

        self.softargmax = SoftArgmax(beta=self.beta)
        self.res_blocks_mid = nn.Sequential(
            *[self.res_blocks_a, self.res_blocks_b, self.res_blocks_c])
        self.avgpool = nn.AdaptiveAvgPool2d((4, 4))
        self.regress_mid = nn.Sequential(nn.Linear(128 * 4 * 4, 600),
                                         nn.Linear(600, 400))

        # initialise weights
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                init_weights(m, init_type='kaiming')
            elif isinstance(m, nn.BatchNorm2d):
                init_weights(m, init_type='kaiming')