예제 #1
0
def _residual_block_pre_activation(x,
                                   n_channel,
                                   filter_=(3, 3),
                                   name='residual_block_pre_activation'):
    with tf.variable_scope(name):
        stack = Stacker(x)

        stack.add_layer(batchActivation)
        stack.layers_conv2d(n_channel, filter_, (1, 1), 'SAME')

        stack.add_layer(batchActivation)
        stack.layers_conv2d(n_channel, filter_, (1, 1), 'SAME')

        stack.residual_add(x)

        return stack.last_layer
예제 #2
0
    def build(self):

        self.DynamicDropoutRate = DynamicDropoutRate(self.dropout_rate)
        self.drop_out_tensor = self.DynamicDropoutRate.tensor

        with tf.variable_scope(self.name, reuse=self.reuse):
            self.encoder = InceptionV2UNetEncoderModule(self.x,
                                                        None,
                                                        resize_shape=(201,
                                                                      201),
                                                        capacity=self.capacity)
            self.encoder.build()
            encode = self.encoder.last_layer
            skip_tensors = self.encoder.skip_tensors[::-1]

            bottom_layer = self.bottom_layer(encode, self.capacity * 128,
                                             self.depth, self.drop_out_tensor)

            pprint(skip_tensors)
            stacker = Stacker(bottom_layer)

            stacker.layers_conv2d_transpose(self.n_channel * 16 * 2, (3, 3),
                                            (2, 2), 'SAME')
            stacker.bn()
            stacker.relu()
            stacker.concat(
                [stacker.last_layer[:, :13, :13, :], skip_tensors[0]], 3)
            stacker.layers_dropout(self.drop_out_tensor)
            stacker.layers_conv2d(self.n_channel * 16 * 2, (3, 3), (1, 1),
                                  'SAME')
            stacker.bn()
            stacker.relu()
            stacker.add_layer(_residual_block_pre_activation,
                              self.n_channel * 16 * 2)
            stacker.add_layer(_residual_block_pre_activation,
                              self.n_channel * 16 * 2)
            stacker.add_layer(_residual_block_pre_activation,
                              self.n_channel * 16 * 2)
            stacker.bn()
            stacker.relu()

            # 12 to 25
            stacker.layers_conv2d_transpose(self.n_channel * 8 * 2, (3, 3),
                                            (2, 2), 'SAME')
            stacker.bn()
            stacker.relu()
            stacker.concat([stacker.last_layer, skip_tensors[1]], 3)
            stacker.layers_dropout(self.drop_out_tensor)
            stacker.layers_conv2d(self.n_channel * 8 * 2, (3, 3), (1, 1),
                                  'SAME')
            stacker.bn()
            stacker.relu()
            stacker.add_layer(_residual_block_pre_activation,
                              self.n_channel * 8 * 2)
            stacker.add_layer(_residual_block_pre_activation,
                              self.n_channel * 8 * 2)
            stacker.add_layer(_residual_block_pre_activation,
                              self.n_channel * 8 * 2)
            stacker.bn()
            stacker.relu()

            # 25 to 50
            stacker.layers_conv2d_transpose(self.n_channel * 4 * 2, (3, 3),
                                            (2, 2), 'SAME')
            stacker.bn()
            stacker.relu()
            stacker.concat(
                [stacker.last_layer[:, :51, :51, :], skip_tensors[2]], 3)
            stacker.layers_dropout(self.drop_out_tensor)
            stacker.layers_conv2d(self.n_channel * 4 * 2, (3, 3), (1, 1),
                                  'SAME')
            stacker.bn()
            stacker.relu()
            stacker.add_layer(_residual_block_pre_activation,
                              self.n_channel * 4 * 2)
            stacker.add_layer(_residual_block_pre_activation,
                              self.n_channel * 4 * 2)
            stacker.add_layer(_residual_block_pre_activation,
                              self.n_channel * 4 * 2)
            stacker.bn()
            stacker.relu()

            # 50 to 101
            stacker.layers_conv2d_transpose(self.n_channel * 2 * 2, (3, 3),
                                            (2, 2), 'SAME')
            stacker.bn()
            stacker.relu()
            stacker.concat(
                [stacker.last_layer[:, :101, :101, :], skip_tensors[3]], 3)
            stacker.layers_dropout(self.drop_out_tensor)
            stacker.layers_conv2d(self.n_channel * 2 * 2, (3, 3), (1, 1),
                                  'SAME')
            stacker.bn()
            stacker.relu()
            stacker.add_layer(_residual_block_pre_activation,
                              self.n_channel * 2 * 2)
            stacker.add_layer(_residual_block_pre_activation,
                              self.n_channel * 2 * 2)
            stacker.add_layer(_residual_block_pre_activation,
                              self.n_channel * 2 * 2)
            stacker.bn()
            stacker.relu()

            decode = stacker.last_layer

            stacker = Stacker(decode, name='to_match')
            stacker.conv2d(self.n_classes, CONV_FILTER_3311)
            self.logit = stacker.last_layer
            self.proba = stacker.sigmoid()