Esempio n. 1
0
def discriminator(model, input_image, reuse=False, initial_size=(4, 4), max_size=None, name=None, depth=1, transition=False, alpha_transition=0, **kwargs):

    import tensorflow as tf

    """
    """

    with tf.variable_scope(name) as scope:

        convs = []

        if reuse:
            scope.reuse_variables()

        # fromRGB
        convs += [leaky_relu(DnConv(input_image, output_dim=model.get_filter_num(depth), kernel_size=(1,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_y_rgb_conv_{}'.format(input_image.shape[1]), dim=model.dim))]

        for i in range(depth):

            # Convolutional blocks. TODO: Replace with block module.
            convs += [leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(depth - i), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_n_conv_1_{}'.format(convs[-1].shape[1]), dim=model.dim))]
            convs += [leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(depth - 1 - i), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_n_conv_2_{}'.format(convs[-1].shape[1]), dim=model.dim))]
            
            # Calculate Next Downsample Ratio
            # Whoever can calculate this in a less dumb way than this gets a Fields Medal.
            if max_size is None:
                downsample_ratio = (2,) * model.dim
            else:
                reference_shape = []
                current_shape = input_image.shape
                for idx, cshape in enumerate(current_shape):
                    reference_shape += [current_shape[idx] // initial_size[idx]]
                downsample_ratio = []
                for size_idx, size in enumerate(max_size):
                    if size // initial_size[size_idx] > min(reference_shape):
                        downsample_ratio += [1]
                    else:
                        downsample_ratio += [2]
                downsample_ratio = tuple(downsample_ratio)

            convs[-1] = DnAveragePooling(convs[-1], downsample_ratio, dim=model.dim)

            if i == 0 and transition:
                transition_conv = DnAveragePooling(input_image, downsample_ratio, dim=model.dim)
                transition_conv = leaky_relu(DnConv(transition_conv, output_dim=model.get_filter_num(depth - 1), kernel_size=(1,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_y_rgb_conv_{}'.format(transition_conv.shape[1]), dim=model.dim))
                convs[-1] = alpha_transition * convs[-1] + (1 - alpha_transition) * transition_conv

        convs += [minibatch_state_concat(convs[-1])]
        convs[-1] = leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(0), kernel_size=(3,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_n_conv_1_{}'.format(convs[-1].shape[1]), dim=model.dim))

        output = tf.reshape(convs[-1], [tf.shape(convs[-1])[0], np.prod(initial_size) * model.get_filter_num(0)])

        # Currently erroring
        # discriminate_output = dense(output, output_size=1, name='discriminator_n_fully')

        discriminate_output = tf.layers.dense(output, 1, name='discriminator_n_1_fully')

        return tf.nn.sigmoid(discriminate_output), discriminate_output
Esempio n. 2
0
def discriminator(model, input_image, reuse=False, name=None, depth=1, transition=False, **kwargs):

    """
    """

    with tf.variable_scope(name) as scope:

        if reuse:
            scope.reuse_variables()

        if transition:
            transition_conv = DnAveragePooling(input_image, (2,) * model.dim, dim=model.dim)
            transition_conv = leaky_relu(DnConv(transition_conv, output_dim=model.get_filter_num(depth - 1), kernel_size=(1,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_y_rgb_conv_{}'.format(transition_conv.shape[1]), dim=model.dim))

        convs = []

        # fromRGB
        convs += [leaky_relu(DnConv(input_image, output_dim=model.get_filter_num(depth), kernel_size=(1,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_y_rgb_conv_{}'.format(input_image.shape[1]), dim=model.dim))]

        for i in range(depth):

            convs += [leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(depth - i), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_n_conv_1_{}'.format(convs[-1].shape[1]), dim=model.dim))]

            convs += [leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(depth - 1 - i), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_n_conv_2_{}'.format(convs[-1].shape[1]), dim=model.dim))]
            convs[-1] = DnAveragePooling(convs[-1], dim=model.dim)

        convs += [minibatch_state_concat(convs[-1])]
        convs[-1] = leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(0), kernel_size=(3,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_n_conv_1_{}'.format(convs[-1].shape[1]), dim=model.dim))

        #for D -- what's going on with the channel number here?
        output = tf.reshape(convs[-1], [tf.shape(convs[-1])[0], 4 * 4 * model.get_filter_num(0)])

        # Currently erroring
        # discriminate_output = dense(output, output_size=1, name='discriminator_n_fully')

        discriminate_output = tf.layers.dense(output, model.get_filter_num(0), name='discriminator_n_1_fully')
        discriminate_output = tf.layers.dense(discriminate_output, 1, name='discriminator_n_2_fully')

        return tf.nn.sigmoid(discriminate_output), discriminate_output
Esempio n. 3
0
def unet(model, input_tensor, backend='tensorflow'):

        from keras.layers.merge import concatenate

        left_outputs = []

        for level in range(model.depth):

            filter_num = int(model.max_filter / (2 ** (model.depth - level)) / model.downsize_filters_factor)

            if level == 0:
                left_outputs += [DnConv(input_tensor, filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_downsampling_conv_{}_1'.format(level), backend=backend)]
                left_outputs[level] = DnConv(left_outputs[level], 2 * filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_downsampling_conv_{}_2'.format(level), backend=backend)
            else:
                left_outputs += [DnMaxPooling(left_outputs[level - 1], pool_size=model.pool_size, dim=model.dim, backend=backend)]
                left_outputs[level] = DnConv(left_outputs[level], filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_downsampling_conv_{}_1'.format(level), backend=backend)
                left_outputs[level] = DnConv(left_outputs[level], 2 * filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_downsampling_conv_{}_2'.format(level), backend=backend)

            if model.dropout is not None and model.dropout != 0:
                left_outputs[level] = DnDropout(model.dropout)(left_outputs[level])

            if model.batch_norm:
                left_outputs[level] = DnBatchNormalization(left_outputs[level])

        right_outputs = [left_outputs[model.depth - 1]]

        for level in range(model.depth):

            filter_num = int(model.max_filter / (2 ** (level)) / model.downsize_filters_factor)

            if level > 0:
                right_outputs += [DnUpsampling(right_outputs[level - 1], pool_size=model.pool_size, dim=model.dim, backend=backend)]
                right_outputs[level] = concatenate([right_outputs[level], left_outputs[model.depth - level - 1]], axis=model.dim + 1)
                right_outputs[level] = DnConv(right_outputs[level], filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_upsampling_conv_{}_1'.format(level), backend=backend)
                right_outputs[level] = DnConv(right_outputs[level], int(filter_num / 2), model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_upsampling_conv_{}_2'.format(level), backend=backend)
            else:
                continue

            if model.dropout is not None and model.dropout != 0:
                right_outputs[level] = DnDropout(model.dropout)(right_outputs[level])

            if model.batch_norm:
                right_outputs[level] = DnBatchNormalization()(right_outputs[level])

        output_layer = DnConv(right_outputs[level], 1, (1, ) * model.dim, stride_size=(1,) * model.dim, dim=model.dim, name='end_conv', backend=backend) 

        # TODO: Brainstorm better way to specify outputs
        if model.input_tensor is not None:
            return output_layer

        return model.model
Esempio n. 4
0
    def build_model(self):

        self.output_layer = DnConv(self.inputs,
                                   1,
                                   self.kernel_size,
                                   stride_size=(1, ) * self.dim,
                                   dim=self.dim,
                                   name='minimal_conv',
                                   backend='keras')

        if self.input_tensor is None:

            super(MinimalKerasCNN, self).build()
            return self.model

        else:
            return self.output_layer
Esempio n. 5
0
    def build_model(self):

        output_layer = DnConv(self.inputs,
                              1,
                              self.kernel_size,
                              stride_size=(1, ) * self.dim,
                              dim=self.dim,
                              name='minimal_conv',
                              backend='keras')

        # TODO: Brainstorm better way to specify outputs
        if self.input_tensor is not None:
            return output_layer

        if self.output_type == 'regression':
            self.model = Model(inputs=self.inputs, outputs=output_layer)
            self.model.compile(optimizer=Nadam(lr=self.initial_learning_rate),
                               loss='mean_squared_error',
                               metrics=['mean_squared_error'])

        if self.output_type == 'binary_label':
            act = Activation('sigmoid')(output_layer)
            self.model = Model(inputs=self.inputs, outputs=act)
            self.model.compile(optimizer=Nadam(lr=self.initial_learning_rate),
                               loss=dice_coef_loss,
                               metrics=[dice_coef])

        if self.output_type == 'categorical_label':
            act = Activation('softmax')(output_layer)
            self.model = Model(inputs=self.inputs, outputs=act)
            self.model.compile(optimizer=Nadam(lr=self.initial_learning_rate),
                               loss='categorical_crossentropy',
                               metrics=['categorical_accuracy'])

        super(MinimalKerasCNN, self).build()

        return self.model
Esempio n. 6
0
def generator(model, latent_var, depth=1, initial_size=4, reuse=False, transition=False, alpha_transition=0, name=None):

    """
    """

    with tf.variable_scope(name) as scope:

        convs = []

        if reuse:
            scope.reuse_variables()

        convs += [tf.reshape(latent_var, [tf.shape(latent_var)[0]] + [1] * model.dim + [model.latent_size])]

        # TODO: refactor the padding on this step. Or replace with a dense layer?
        convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(0), kernel_size=(4,) * model.dim, stride_size=(1,) * model.dim, padding='Other', name='generator_n_conv_1_{}'.format(convs[-1].shape[1]), dim=model.dim)), model.dim)

        convs += [tf.reshape(convs[-1], [tf.shape(latent_var)[0]] + [initial_size] * model.dim + [model.get_filter_num(0)])]

        convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(0), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, name='generator_n_conv_2_{}'.format(convs[-1].shape[1]), dim=model.dim)), dim=model.dim)

        for i in range(depth):

            if i == depth - 1 and transition:
                #To RGB
                transition_conv = DnConv(convs[-1], output_dim=model.channels, kernel_size=(1,) * model.dim, stride_size=(1,) * model.dim, name='generator_y_rgb_conv_{}'.format(convs[-1].shape[1]), dim=model.dim)
                transition_conv = DnUpsampling(transition_conv, (2,) * model.dim, dim=model.dim)

            convs += [DnUpsampling(convs[-1], (2,) * model.dim, dim=model.dim)]
            convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(i + 1), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, name='generator_n_conv_1_{}'.format(convs[-1].shape[1]), dim=model.dim)), dim=model.dim)

            convs += [DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(i + 1), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, name='generator_n_conv_2_{}'.format(convs[-1].shape[1]), dim=model.dim)), dim=model.dim)]

        #To RGB
        convs += [DnConv(convs[-1], output_dim=model.channels, kernel_size=(1,) * model.dim, stride_size=(1,) * model.dim, name='generator_y_rgb_conv_{}'.format(convs[-1].shape[1]), dim=model.dim)]

        if transition:
            convs[-1] = (1 - alpha_transition) * transition_conv + alpha_transition * convs[-1]

        return convs[-1]
Esempio n. 7
0
def generator(model, latent_var, depth=1, initial_size=(4, 4), max_size=None, reuse=False, transition=False, alpha_transition=0, name=None):
    
    """Summary
    
    Parameters
    ----------
    model : TYPE
        Description
    latent_var : TYPE
        Description
    depth : int, optional
        Description
    initial_size : tuple, optional
        Description
    max_size : None, optional
        Description
    reuse : bool, optional
        Description
    transition : bool, optional
        Description
    alpha_transition : int, optional
        Description
    name : None, optional
        Description
    
    Returns
    -------
    TYPE
        Description
    """
    
    import tensorflow as tf

    with tf.variable_scope(name) as scope:

        convs = []

        if reuse:
            scope.reuse_variables()

        convs += [tf.reshape(latent_var, [tf.shape(latent_var)[0]] + [1] * model.dim + [model.latent_size])]

        # TODO: refactor the padding on this step. Or replace with a dense layer?
        with tf.variable_scope('generator_n_conv_1_{}'.format(convs[-1].shape[1])):
            convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(0), kernel_size=initial_size, stride_size=(1,) * model.dim, padding='Other', dim=model.dim)), model.dim)

        convs += [tf.reshape(convs[-1], [tf.shape(latent_var)[0]] + list(initial_size) + [model.get_filter_num(0)])]

        with tf.variable_scope('generator_n_conv_2_{}'.format(convs[-1].shape[1])):
            convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(0), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, dim=model.dim)), dim=model.dim)

        for i in range(depth):

            # Calculate Next Upsample Ratio
            if max_size is None:
                upsample_ratio = (2,) * model.dim
            else:
                upsample_ratio = []
                for size_idx, size in enumerate(max_size):
                    if size >= convs[-1].shape[size_idx + 1] * 2:
                        upsample_ratio += [2]
                    else:
                        upsample_ratio += [1]
                upsample_ratio = tuple(upsample_ratio)

            # Upsampling, with conversion to RGB if necessary.
            if i == depth - 1 and transition:
                transition_conv = DnConv(convs[-1], output_dim=model.channels, kernel_size=(1,) * model.dim, stride_size=(1,) * model.dim, dim=model.dim, name='generator_y_rgb_conv_{}'.format(convs[-1].shape[1]))
                transition_conv = DnUpsampling(transition_conv, upsample_ratio, dim=model.dim)

            convs += [DnUpsampling(convs[-1], upsample_ratio, dim=model.dim)]

            # Convolutional blocks. TODO: Replace with block module.
            with tf.variable_scope('generator_n_conv_1_{}'.format(convs[-1].shape[1])):
                convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(i + 1), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, dim=model.dim)), dim=model.dim)
            with tf.variable_scope('generator_n_conv_2_{}'.format(convs[-1].shape[1])):
                convs += [DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(i + 1), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, dim=model.dim)), dim=model.dim)]

        # Conversion to RGB
        convs += [DnConv(convs[-1], output_dim=model.channels, kernel_size=(1,) * model.dim, stride_size=(1,) * model.dim, name='generator_y_rgb_conv_{}'.format(convs[-1].shape[1]), dim=model.dim)]

        if transition:
            convs[-1] = (1 - alpha_transition) * transition_conv + alpha_transition * convs[-1]

        return convs[-1]
Esempio n. 8
0
    def build_model(self):
        """ A basic implementation of the U-Net proposed in https://arxiv.org/abs/1505.04597
        
            TODO: specify optimizer

            Returns
            -------
            Keras model or tensor
                If input_tensor is provided, this will return a tensor. Otherwise,
                this will return a Keras model.
        """

        left_outputs = []

        for level in range(self.depth):

            filter_num = int(self.max_filter / (2**(self.depth - level)) /
                             self.downsize_filters_factor)

            if level == 0:

                left_outputs += [
                    DnConv(self.inputs,
                           filter_num,
                           kernel_size=self.kernel_size,
                           stride_size=(1, ) * self.dim,
                           activation=self.activation,
                           padding=self.padding,
                           dim=self.dim,
                           name='downsampling_conv_{}_{}'.format(level, 0),
                           backend='keras')
                ]

                if self.dropout is not None and self.dropout != 0:
                    left_outputs[level] = Dropout(self.dropout)(
                        left_outputs[level])

                if self.batch_norm:
                    left_outputs[level] = BatchNormalization()(
                        left_outputs[level])

                for block_num in range(1, self.num_blocks):
                    left_outputs[level] = DnConv(
                        left_outputs[level],
                        filter_num *
                        (self.block_filter_growth_ratio**block_num),
                        kernel_size=self.kernel_size,
                        stride_size=(1, ) * self.dim,
                        activation=self.activation,
                        padding=self.padding,
                        dim=self.dim,
                        name='downsampling_conv_{}_{}'.format(
                            level, block_num),
                        backend='keras')

                    if self.dropout is not None and self.dropout != 0:
                        left_outputs[level] = Dropout(self.dropout)(
                            left_outputs[level])

                    if self.batch_norm:
                        left_outputs[level] = BatchNormalization()(
                            left_outputs[level])
            else:

                left_outputs += [
                    DnMaxPooling(left_outputs[level - 1],
                                 pool_size=self.pool_size,
                                 dim=self.dim,
                                 backend='keras',
                                 padding=self.pooling_padding)
                ]

                for block_num in range(self.num_blocks):
                    left_outputs[level] = DnConv(
                        left_outputs[level],
                        filter_num *
                        (self.block_filter_growth_ratio**block_num),
                        kernel_size=self.kernel_size,
                        stride_size=(1, ) * self.dim,
                        activation=self.activation,
                        padding=self.padding,
                        dim=self.dim,
                        name='downsampling_conv_{}_{}'.format(
                            level, block_num),
                        backend='keras')

                    if self.dropout is not None and self.dropout != 0:
                        left_outputs[level] = Dropout(self.dropout)(
                            left_outputs[level])

                    if self.batch_norm:
                        left_outputs[level] = BatchNormalization()(
                            left_outputs[level])

        right_outputs = [left_outputs[self.depth - 1]]

        for level in range(self.depth):

            filter_num = int(self.max_filter / (2**(level)) /
                             self.downsize_filters_factor)

            if level > 0:
                right_outputs += [
                    DnUpsampling(right_outputs[level - 1],
                                 pool_size=self.pool_size,
                                 dim=self.dim,
                                 backend='keras')
                ]

                if right_outputs[level].shape[1:-1] == left_outputs[
                        self.depth - level - 1].shape[1:-1]:
                    right_outputs[level] = concatenate([
                        right_outputs[level],
                        left_outputs[self.depth - level - 1]
                    ],
                                                       axis=self.dim + 1)
                else:
                    # Very complex code to facilitate dimension errors arising from odd-numbered patches.
                    # Is essentially same-padding, may have performance impacts on networks.
                    # Very tentative.
                    input_tensor = right_outputs[level]
                    concatenate_tensor = left_outputs[self.depth - level - 1]
                    padding = []
                    for dim in range(self.dim):
                        padding += [
                            int(concatenate_tensor.shape[dim + 1]) -
                            int(input_tensor.shape[dim + 1])
                        ]
                    if len(padding) < self.dim:
                        padding += [1] * self.dim - len(self.padding)
                    lambda_dict = {
                        0: Lambda(lambda x: x[:, 0:padding[0], :, :, :]),
                        1: Lambda(lambda x: x[:, :, 0:padding[1], :, :]),
                        2: Lambda(lambda x: x[:, :, :, 0:padding[2], :])
                    }
                    for dim in range(self.dim):
                        tensor_slice = [slice(None)] + [
                            slice(padding[dim])
                            if i_dim == dim else slice(None)
                            for i_dim in range(self.dim)
                        ] + [slice(None)]

                        # Causes JSON Serialization Error.
                        # tensor_slice = Lambda(lambda x, tensor_slice=tensor_slice: x[tensor_slice])(input_tensor)

                        tensor_slice = lambda_dict[dim](input_tensor)
                        input_tensor = concatenate(
                            [input_tensor, tensor_slice], axis=dim + 1)
                    right_outputs[level] = concatenate(
                        [input_tensor, concatenate_tensor], axis=self.dim + 1)

                for block_num in range(self.num_blocks):
                    right_outputs[level] = DnConv(
                        right_outputs[level],
                        filter_num //
                        (self.block_filter_growth_ratio**block_num),
                        kernel_size=self.kernel_size,
                        stride_size=(1, ) * self.dim,
                        activation=self.activation,
                        padding=self.padding,
                        dim=self.dim,
                        name='upsampling_conv_{}_{}'.format(level, block_num),
                        backend='keras')

                    if self.dropout is not None and self.dropout != 0:
                        right_outputs[level] = Dropout(self.dropout)(
                            right_outputs[level])

                    if self.batch_norm:
                        right_outputs[level] = BatchNormalization()(
                            right_outputs[level])

        self.output_layer = DnConv(right_outputs[level],
                                   self.output_channels, (1, ) * self.dim,
                                   stride_size=(1, ) * self.dim,
                                   dim=self.dim,
                                   name='end_conv',
                                   backend='keras')

        super(UNet, self).build_model()
Esempio n. 9
0
    def build_model(self):
        """ A basic implementation of the U-Net proposed in https://arxiv.org/abs/1505.04597
        
            TODO: specify optimizer

            Returns
            -------
            Keras model or tensor
                If input_tensor is provided, this will return a tensor. Otherwise,
                this will return a Keras model.
        """

        left_outputs = []

        for level in range(self.depth):

            filter_num = int(self.max_filter / (2**(self.depth - level)) /
                             self.downsize_filters_factor)

            if level == 0:

                left_outputs += [
                    DnConv(self.inputs,
                           filter_num,
                           kernel_size=self.kernel_size,
                           stride_size=(1, ) * self.dim,
                           activation=self.activation,
                           padding=self.padding,
                           dim=self.dim,
                           name='downsampling_conv_{}_{}'.format(level, 0),
                           backend='keras')
                ]

                for block_num in range(1, self.num_blocks):
                    left_outputs[level] = DnConv(
                        left_outputs[level],
                        filter_num *
                        (self.block_filter_growth_ratio**block_num),
                        kernel_size=self.kernel_size,
                        stride_size=(1, ) * self.dim,
                        activation=self.activation,
                        padding=self.padding,
                        dim=self.dim,
                        name='downsampling_conv_{}_{}'.format(
                            level, block_num),
                        backend='keras')
            else:

                left_outputs += [
                    DnMaxPooling(left_outputs[level - 1],
                                 pool_size=self.pool_size,
                                 dim=self.dim,
                                 backend='keras')
                ]

                for block_num in range(self.num_blocks):
                    left_outputs[level] = DnConv(
                        left_outputs[level],
                        filter_num *
                        (self.block_filter_growth_ratio**block_num),
                        kernel_size=self.kernel_size,
                        stride_size=(1, ) * self.dim,
                        activation=self.activation,
                        padding=self.padding,
                        dim=self.dim,
                        name='downsampling_conv_{}_{}'.format(
                            level, block_num),
                        backend='keras')

            if self.dropout is not None and self.dropout != 0:
                left_outputs[level] = Dropout(self.dropout)(
                    left_outputs[level])

            if self.batch_norm:
                left_outputs[level] = BatchNormalization()(left_outputs[level])

        right_outputs = [left_outputs[self.depth - 1]]

        for level in range(self.depth):

            filter_num = int(self.max_filter / (2**(level)) /
                             self.downsize_filters_factor)

            if level > 0:
                right_outputs += [
                    DnUpsampling(right_outputs[level - 1],
                                 pool_size=self.pool_size,
                                 dim=self.dim,
                                 backend='keras')
                ]
                right_outputs[level] = concatenate([
                    right_outputs[level], left_outputs[self.depth - level - 1]
                ],
                                                   axis=self.dim + 1)

                for block_num in range(self.num_blocks):
                    right_outputs[level] = DnConv(
                        right_outputs[level],
                        filter_num //
                        (self.block_filter_growth_ratio**block_num),
                        kernel_size=self.kernel_size,
                        stride_size=(1, ) * self.dim,
                        activation=self.activation,
                        padding=self.padding,
                        dim=self.dim,
                        name='upsampling_conv_{}_{}'.format(level, block_num),
                        backend='keras')

            else:
                continue

            if self.dropout is not None and self.dropout != 0:
                right_outputs[level] = Dropout(self.dropout)(
                    right_outputs[level])

            if self.batch_norm:
                right_outputs[level] = BatchNormalization()(
                    right_outputs[level])

        self.output_layer = DnConv(right_outputs[level],
                                   self.output_channels, (1, ) * self.dim,
                                   stride_size=(1, ) * self.dim,
                                   dim=self.dim,
                                   name='end_conv',
                                   backend='keras')

        super(UNet, self).build_model()
Esempio n. 10
0
    def build_model(self):
        """ A basic implementation of the U-Net proposed in https://arxiv.org/abs/1505.04597
        
            TODO: specify optimizer

            Returns
            -------
            Keras model or tensor
                If input_tensor is provided, this will return a tensor. Otherwise,
                this will return a Keras model.
        """

        left_outputs = []

        for level in range(self.depth):

            filter_num = int(self.max_filter / (2**(self.depth - level)) /
                             self.downsize_filters_factor)

            if level == 0:
                left_outputs += [
                    DnConv(self.inputs,
                           filter_num,
                           kernel_size=self.kernel_size,
                           stride_size=(1, ) * self.dim,
                           activation=self.activation,
                           padding=self.padding,
                           dim=self.dim,
                           name='downsampling_conv_{}_1'.format(level),
                           backend='keras')
                ]
                left_outputs[level] = DnConv(
                    left_outputs[level],
                    2 * filter_num,
                    kernel_size=self.kernel_size,
                    stride_size=(1, ) * self.dim,
                    activation=self.activation,
                    padding=self.padding,
                    dim=self.dim,
                    name='downsampling_conv_{}_2'.format(level),
                    backend='keras')
            else:
                left_outputs += [
                    DnMaxPooling(left_outputs[level - 1],
                                 pool_size=self.pool_size,
                                 dim=self.dim,
                                 backend='keras')
                ]
                left_outputs[level] = DnConv(
                    left_outputs[level],
                    filter_num,
                    kernel_size=self.kernel_size,
                    stride_size=(1, ) * self.dim,
                    activation=self.activation,
                    padding=self.padding,
                    dim=self.dim,
                    name='downsampling_conv_{}_1'.format(level),
                    backend='keras')
                left_outputs[level] = DnConv(
                    left_outputs[level],
                    2 * filter_num,
                    kernel_size=self.kernel_size,
                    stride_size=(1, ) * self.dim,
                    activation=self.activation,
                    padding=self.padding,
                    dim=self.dim,
                    name='downsampling_conv_{}_2'.format(level),
                    backend='keras')

            if self.dropout is not None and self.dropout != 0:
                left_outputs[level] = Dropout(self.dropout)(
                    left_outputs[level])

            if self.batch_norm:
                left_outputs[level] = BatchNormalization()(left_outputs[level])

        right_outputs = [left_outputs[self.depth - 1]]

        for level in range(self.depth):

            filter_num = int(self.max_filter / (2**(level)) /
                             self.downsize_filters_factor)

            if level > 0:
                right_outputs += [
                    DnUpsampling(right_outputs[level - 1],
                                 pool_size=self.pool_size,
                                 dim=self.dim,
                                 backend='keras')
                ]
                right_outputs[level] = concatenate([
                    right_outputs[level], left_outputs[self.depth - level - 1]
                ],
                                                   axis=self.dim + 1)
                right_outputs[level] = DnConv(
                    right_outputs[level],
                    filter_num,
                    kernel_size=self.kernel_size,
                    stride_size=(1, ) * self.dim,
                    activation=self.activation,
                    padding=self.padding,
                    dim=self.dim,
                    name='upsampling_conv_{}_1'.format(level),
                    backend='keras')
                right_outputs[level] = DnConv(
                    right_outputs[level],
                    int(filter_num / 2),
                    kernel_size=self.kernel_size,
                    stride_size=(1, ) * self.dim,
                    activation=self.activation,
                    padding=self.padding,
                    dim=self.dim,
                    name='upsampling_conv_{}_2'.format(level),
                    backend='keras')
            else:
                continue

            if self.dropout is not None and self.dropout != 0:
                right_outputs[level] = Dropout(self.dropout)(
                    right_outputs[level])

            if self.batch_norm:
                right_outputs[level] = BatchNormalization()(
                    right_outputs[level])

        self.output_layer = DnConv(right_outputs[level],
                                   1, (1, ) * self.dim,
                                   stride_size=(1, ) * self.dim,
                                   dim=self.dim,
                                   name='end_conv',
                                   backend='keras')

        # TODO: Brainstorm better way to specify outputs
        if self.input_tensor is None:

            if self.output_type == 'regression':
                self.model = Model(inputs=self.inputs,
                                   outputs=self.output_layer)
                self.model.compile(
                    optimizer=Nadam(lr=self.initial_learning_rate),
                    loss='mean_squared_error',
                    metrics=['mean_squared_error'])

            if self.output_type == 'dice':
                act = Activation('sigmoid')(self.output_layer)
                self.model = Model(inputs=self.inputs, outputs=act)
                self.model.compile(
                    optimizer=Nadam(lr=self.initial_learning_rate),
                    loss=dice_coef_loss,
                    metrics=[dice_coef])

            if self.output_type == 'binary_label':
                act = Activation('sigmoid')(self.output_layer)
                self.model = Model(inputs=self.inputs, outputs=act)
                self.model.compile(
                    optimizer=Nadam(lr=self.initial_learning_rate),
                    loss='binary_crossentropy',
                    metrics=['binary_accuracy'])

            if self.output_type == 'categorical_label':
                act = Activation('softmax')(self.output_layer)
                self.model = Model(inputs=self.inputs, outputs=act)
                self.model.compile(
                    optimizer=Nadam(lr=self.initial_learning_rate),
                    loss='categorical_crossentropy',
                    metrics=['categorical_accuracy'])

            super(UNet, self).build()

            return self.model

        else:

            return
Esempio n. 11
0
def unet(model, input_tensor, backend='tensorflow'):

        left_outputs = []

        for level in range(model.depth):

            filter_num = int(model.max_filter / (2 ** (model.depth - level)) / model.downsize_filters_factor)

            if level == 0:
                left_outputs += [DnConv(input_tensor, filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_downsampling_conv_{}_1'.format(level), backend=backend)]
                left_outputs[level] = DnConv(left_outputs[level], 2 * filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_downsampling_conv_{}_2'.format(level), backend=backend)
            else:
                left_outputs += [DnMaxPooling(left_outputs[level - 1], pool_size=model.pool_size, dim=model.dim, backend=backend)]
                left_outputs[level] = DnConv(left_outputs[level], filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_downsampling_conv_{}_1'.format(level), backend=backend)
                left_outputs[level] = DnConv(left_outputs[level], 2 * filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_downsampling_conv_{}_2'.format(level), backend=backend)

            if model.dropout is not None and model.dropout != 0:
                left_outputs[level] = DnDropout(model.dropout)(left_outputs[level])

            if model.batch_norm:
                left_outputs[level] = DnBatchNormalization(left_outputs[level])

        right_outputs = [left_outputs[model.depth - 1]]

        for level in range(model.depth):

            filter_num = int(model.max_filter / (2 ** (level)) / model.downsize_filters_factor)

            if level > 0:
                right_outputs += [DnUpsampling(right_outputs[level - 1], pool_size=model.pool_size, dim=model.dim, backend=backend)]
                right_outputs[level] = concatenate([right_outputs[level], left_outputs[model.depth - level - 1]], axis=model.dim + 1)
                right_outputs[level] = DnConv(right_outputs[level], filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_upsampling_conv_{}_1'.format(level), backend=backend)
                right_outputs[level] = DnConv(right_outputs[level], int(filter_num / 2), model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_upsampling_conv_{}_2'.format(level), backend=backend)
            else:
                continue

            if model.dropout is not None and model.dropout != 0:
                right_outputs[level] = DnDropout(model.dropout)(right_outputs[level])

            if model.batch_norm:
                right_outputs[level] = DnBatchNormalization()(right_outputs[level])

        output_layer = DnConv(right_outputs[level], 1, (1, ) * model.dim, stride_size=(1,) * model.dim, dim=model.dim, name='end_conv', backend=backend) 

        # TODO: Brainstorm better way to specify outputs
        if model.input_tensor is not None:
            return output_layer

        return model.model

# def progressive_generator(model, latent_var, progressive_depth=1, name=None, transition=False, alpha_transition=0.0):

#     with tf.variable_scope(name) as scope:

#         convs = []

#         convs += [tf.reshape(latent_var, [model.training_batch_size, 1, 1, model.latent_size])]

#         convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(1, depth), kernel_size=(4, 4), stride_size=(1,) * model.dim, padding='Other', name='generator_n_1_conv', dim=model.dim)))

#         convs += [tf.reshape(convs[-1], [model.training_batch_size, 4, 4, model.get_filter_num(1, depth)])] # why necessary? --andrew
#         convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(1, depth), stride_size=(1,) * model.dim, name='generator_n_2_conv', dim=model.dim)))

#         for i in range(progressive_depth - 1):

#             if i == progressive_depth - 2 and transition:  # redundant conditions? --andrew
#                 #To RGB
#                 # Don't totally understand this yet, diagram out --andrew
#                 transition_conv = DnConv(convs[-1], output_dim=model.channels, kernel_size=(1, 1), stride_size=(1,) * model.dim, name='generator_y_rgb_conv_{}'.format(convs[-1].shape[1]), dim=model.dim)
#                 transition_conv = upscale(transition_conv, 2)

#             convs += [upscale(convs[-1], 2)]
#             convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(i + 1, depth), stride_size=(1,) * model.dim, name='generator_n_conv_1_{}'.format(convs[-1].shape[1]), dim=model.dim)))

#             convs += [DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(i + 1, depth), stride_size=(1,) * model.dim, name='generator_n_conv_2_{}'.format(convs[-1].shape[1]), dim=model.dim)))]

#         #To RGB
#         convs += [DnConv(convs[-1], output_dim=model.channels, kernel_size=(1, 1), stride_size=(1,) * model.dim, name='generator_y_rgb_conv_{}'.format(convs[-1].shape[1]), dim=model.dim)]

#         if transition:
#             convs[-1] = (1 - alpha_transition) * transition_conv + alpha_transition * convs[-1]

#         return convs[-1]