Пример #1
0
    def Tiramisu(self, params):
        """
            References:
                Simon Jegou et al. The One Hundred Layers Tiramisu: Fully Convolutional Densenets for Semantic Segmentation.
        """

        self.ids_inputs = params["INPUTS_IDS_MODEL"]
        self.ids_outputs = params["OUTPUTS_IDS_MODEL"]

        crop = params['IMG_CROP_SIZE']
        image = Input(shape=tuple([crop[-1], None, None]),
                      name=self.ids_inputs[0])

        init_filters = params['TIRAMISU_INIT_FILTERS']
        growth_rate = params['TIRAMISU_GROWTH_RATE']
        num_transition_blocks = params['TIRAMISU_N_TRANSITION_BLOCKS']

        n_layers_down = params['TIRAMISU_N_LAYERS_DOWN']
        n_layers_up = params['TIRAMISU_N_LAYERS_UP']
        bottleneck_layers = params['TIRAMISU_BOTTLENECK_LAYERS']

        # Downsampling path and recover skip connections for each transition down block
        x = Conv2D(init_filters, (3, 3),
                   kernel_initializer=params['WEIGHTS_INIT'],
                   padding='same',
                   name='conv_initial')(image)
        prev_filters = init_filters

        skip_conn = []
        for td in range(num_transition_blocks):
            nb_filters_conv = prev_filters + n_layers_down[td] * growth_rate
            [x, skip] = self.add_transitiondown_block(x, nb_filters_conv, 2,
                                                      params['WEIGHTS_INIT'],
                                                      n_layers_down[td],
                                                      growth_rate,
                                                      params['DROPOUT_P'])
            skip_conn.append(skip)
            prev_filters = nb_filters_conv

        # Middle of the path (bottleneck)
        x = self.add_dense_block(
            x, bottleneck_layers, growth_rate, params['DROPOUT_P'],
            params['WEIGHTS_INIT'])  # feature maps: 512 input, 592 output

        # Upsampling path
        skip_conn = skip_conn[::-1]
        prev_filters = bottleneck_layers * growth_rate
        for tu in range(num_transition_blocks):
            x = self.add_transitionup_block(x, skip_conn[tu], prev_filters,
                                            params['WEIGHTS_INIT'],
                                            n_layers_up[tu], growth_rate,
                                            params['DROPOUT_P'])
            prev_filters = n_layers_up[tu] * growth_rate

        # Final classification layer (batch_size, classes, width, height)
        x = Conv2D(params['NUM_CLASSES'], (1, 1),
                   kernel_initializer=params['WEIGHTS_INIT'],
                   padding='same')(x)

        # Reshape to (None, width*height, classes) before applying softmax
        x = Lambda(
            lambda x: x.flatten(ndim=3),
            output_shape=lambda s: tuple([s[0], params['NUM_CLASSES'], None]))(
                x)
        x = Permute((2, 1))(x)

        matrix_out = Activation(params['CLASSIFIER_ACTIVATION'],
                                name=self.ids_outputs[0])(x)
        self.model = Model(inputs=image, outputs=matrix_out)
Пример #2
0
    def ClassicUpsampling(self, params):
        """
            References:
                Olaf Ronneberger et al. U-net: Convolutional networks for biomedical image segmentation.
        """

        self.ids_inputs = params["INPUTS_IDS_MODEL"]
        self.ids_outputs = params["OUTPUTS_IDS_MODEL"]

        crop = params['IMG_CROP_SIZE']
        image = Input(shape=tuple([crop[-1], None, None]),
                      name=self.ids_inputs[0])

        # Downsampling path and recover skip connections for each transition down block
        concat_axis = 1

        conv1 = Conv2D(64, (3, 3),
                       activation='relu',
                       padding='same',
                       name='conv1_1')(image)
        conv1 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv1)
        conv1 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv1)
        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

        conv2 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool1)
        conv2 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv2)
        conv2 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv2)
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
        conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
        conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
        pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
        conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
        conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
        pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

        # Middle of the path (bottleneck)
        conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
        conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)
        conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)

        # Upsampling path
        up_conv5 = UpSampling2D(size=(2, 2))(conv5)
        up_conv5 = ZeroPadding2D()(up_conv5)
        up6 = Concat(cropping=[None, None, 'center', 'center'])(
            [conv4, up_conv5])
        conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
        conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)
        conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)

        up_conv6 = UpSampling2D(size=(2, 2))(conv6)
        up_conv6 = ZeroPadding2D()(up_conv6)
        up7 = Concat(cropping=[None, None, 'center', 'center'])(
            [conv3, up_conv6])
        conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
        conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)
        conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)

        up_conv7 = UpSampling2D(size=(2, 2))(conv7)
        up_conv7 = ZeroPadding2D()(up_conv7)
        up8 = Concat(cropping=[None, None, 'center', 'center'])(
            [conv2, up_conv7])
        conv8 = Conv2D(128, (3, 3), activation='relu', padding='same')(up8)
        conv8 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv8)
        conv8 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv8)

        up_conv8 = UpSampling2D(size=(2, 2))(conv8)
        up_conv8 = ZeroPadding2D()(up_conv8)
        up9 = Concat(cropping=[None, None, 'center', 'center'])(
            [conv1, up_conv8])
        conv9 = Conv2D(64, (3, 3), activation='relu', padding='same')(up9)
        conv9 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv9)
        conv9 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv9)

        # Final classification layer (batch_size, classes, width, height)
        x = Conv2D(params['NUM_CLASSES'], (1, 1), border_mode='same')(conv9)

        # Reshape to (None, width*height, classes) before applying softmax
        x = Lambda(
            lambda x: x.flatten(ndim=3),
            output_shape=lambda s: tuple([s[0], params['NUM_CLASSES'], None]))(
                x)
        x = Permute((2, 1))(x)

        matrix_out = Activation(params['CLASSIFIER_ACTIVATION'],
                                name=self.ids_outputs[0])(x)
        self.model = Model(inputs=image, outputs=matrix_out)