def getResnetModel(d):
    n = d.num_blocks
    sf = d.start_filter
    dataset = d.dataset
    activation = d.act
    advanced_act = d.aact
    drop_prob = d.dropout
    if "mnist" in dataset:
        inputShape = (1, 28, 28) if K.image_dim_ordering() == "th" else (28,
                                                                         28, 1)
    else:
        inputShape = (3, 32, 32) if K.image_dim_ordering() == "th" else (32,
                                                                         32, 3)
    channelAxis = 1 if K.image_data_format() == 'channels_first' else -1
    filsize = (3, 3)
    convArgs = {
        "padding": "same",
        "use_bias": False,
        "kernel_regularizer": l2(0.0001),
    }
    bnArgs = {"axis": channelAxis, "momentum": 0.9, "epsilon": 1e-04}

    import copy

    convArgs_real = copy.deepcopy(convArgs)

    # if   d.model == "real":
    if "real" in d.model:
        sf *= 2
        convArgs.update({"kernel_initializer": Orthogonal(float(np.sqrt(2)))})
        convArgs_real = convArgs
    # elif d.model == "complex":
    elif "complex" in d.model:
        convArgs.update({
            "spectral_parametrization": d.spectral_param,
            "kernel_initializer": d.comp_init
        })

    #
    # Input Layer
    #

    I = Input(shape=inputShape)

    #
    # Stage 1
    #

    O = learnConcatRealImagBlock(I, (1, 1), (3, 3), 0, '0', convArgs, bnArgs,
                                 d)
    O = Concatenate(channelAxis)([I, O])
    if d.model == "real":
        O = Conv2D(sf, filsize, name='conv1', **convArgs)(O)
        O = BatchNormalization(name="bn_conv1_2a", **bnArgs)(O)
    elif d.model == "real_dws":
        O = SeparableConv2D(sf, filsize, name='conv1', **convArgs)(O)
        O = BatchNormalization(name="bn_conv1_2a", **bnArgs)(O)
    elif (d.model == "real_group") or (d.model == "real_group_pwc_full") or (
            d.model == "real_group_pwc_group"):
        O_g0 = Lambda(lambda O: O[:, :(O.shape[1] // 2), :, :])(O)
        O_g1 = Lambda(lambda O: O[:, (O.shape[1] // 2):, :, :])(O)
        O_g0 = Conv2D(sf // 2, filsize, name='conv1_g0', **convArgs)(O_g0)
        O_g1 = Conv2D(sf // 2, filsize, name='conv1_g1', **convArgs)(O_g1)
        O_g00 = Lambda(lambda O_g0: O_g0[:, :(O_g0.shape[1] // 2), :, :])(O_g0)
        O_g01 = Lambda(lambda O_g0: O_g0[:, (O_g0.shape[1] // 2):, :, :])(O_g0)
        O_g10 = Lambda(lambda O_g1: O_g1[:, :(O_g1.shape[1] // 2), :, :])(O_g1)
        O_g11 = Lambda(lambda O_g1: O_g1[:, (O_g1.shape[1] // 2):, :, :])(O_g1)
        O = Concatenate(axis=1)([O_g00, O_g11, O_g01, O_g10])
        if d.model == "real_group_pwc_full":
            O = Conv2D(sf, (1, 1), name='conv1_pwc', **convArgs)(O)
        elif d.model == "real_group_pwc_group":
            O_g0 = Lambda(lambda O: O[:, :(O.shape[1] // 2), :, :])(O)
            O_g1 = Lambda(lambda O: O[:, (O.shape[1] // 2):, :, :])(O)
            O_g0 = Conv2D(int(O.shape[1] // 2), (1, 1),
                          name='conv1_pwc_g0',
                          **convArgs_real)(O_g0)
            O_g1 = Conv2D(int(O.shape[1] // 2), (1, 1),
                          name='conv1_pwc_g1',
                          **convArgs_real)(O_g1)
            O = Concatenate(axis=1)([O_g0, O_g1])
        O = BatchNormalization(name="bn_conv1_2a", **bnArgs)(O)
    elif d.model == "complex":
        O = ComplexConv2D(sf, filsize, name='conv1', **convArgs)(O)
        O = ComplexBN(name="bn_conv1_2a", **bnArgs)(O)
    elif (d.model == "complex_concat") or (d.model
                                           == "complex_concat_pwc_group"):
        O = ComplexConvConcat2D(sf // 2, filsize, name='conv1', **convArgs)(O)
        O = ComplexBN(name="bn_conv1_2a", **bnArgs)(O)
        if d.model == "complex_concat_pwc_group":
            O_g0 = Lambda(lambda O: O[:, :(O.shape[1] // 2), :, :])(O)
            O_g1 = Lambda(lambda O: O[:, (O.shape[1] // 2):, :, :])(O)
            O_g0 = Conv2D(int(O.shape[1] // 2), (1, 1),
                          name='conv1_pwc_g0',
                          **convArgs_real)(O_g0)
            O_g1 = Conv2D(int(O.shape[1] // 2), (1, 1),
                          name='conv1_pwc_g1',
                          **convArgs_real)(O_g1)
            O = Concatenate(axis=1)([O_g0, O_g1])
    else:
        print("Error: unknown model type")
        exit(-1)

    if d.aact == "complex_joint_relu":
        O = Lambda(ComplexJointReLU)(O)
    else:
        O = Activation(activation)(O)

    #
    # Stage 2
    #

    for i in xrange(n):
        O = getResidualBlock(O, filsize, [sf, sf], 2, str(i), 'regular',
                             convArgs, convArgs_real, bnArgs, d)
        if i == n // 2 and d.spectral_pool_scheme == "stagemiddle":
            O = applySpectralPooling(O, d)

    #
    # Stage 3
    #

    O = getResidualBlock(O, filsize, [sf, sf], 3, '0', 'projection', convArgs,
                         convArgs_real, bnArgs, d)
    if d.spectral_pool_scheme == "nodownsample":
        O = applySpectralPooling(O, d)

    for i in xrange(n - 1):
        O = getResidualBlock(O, filsize, [sf * 2, sf * 2], 3, str(i + 1),
                             'regular', convArgs, convArgs_real, bnArgs, d)
        if i == n // 2 and d.spectral_pool_scheme == "stagemiddle":
            O = applySpectralPooling(O, d)

    #
    # Stage 4
    #

    O = getResidualBlock(O, filsize, [sf * 2, sf * 2], 4, '0', 'projection',
                         convArgs, convArgs_real, bnArgs, d)
    if d.spectral_pool_scheme == "nodownsample":
        O = applySpectralPooling(O, d)

    for i in xrange(n - 1):
        O = getResidualBlock(O, filsize, [sf * 4, sf * 4], 4, str(i + 1),
                             'regular', convArgs, convArgs_real, bnArgs, d)
        if i == n // 2 and d.spectral_pool_scheme == "stagemiddle":
            O = applySpectralPooling(O, d)

    #
    # Pooling
    #

    if d.spectral_pool_scheme == "nodownsample":
        O = applySpectralPooling(O, d)
        if "mnist" in dataset:
            O = AveragePooling2D(pool_size=(28, 28))(O)
        else:
            O = AveragePooling2D(pool_size=(32, 32))(O)
    else:
        if "mnist" in dataset:
            O = AveragePooling2D(pool_size=(7, 7))(O)
        else:
            O = AveragePooling2D(pool_size=(8, 8))(O)

    #
    # Flatten
    #

    O = Flatten()(O)

    #
    # Dense
    #

    if dataset == 'cifar10':
        O = Dense(10, activation='softmax', kernel_regularizer=l2(0.0001))(O)
    elif dataset == 'cifar100':
        O = Dense(100, activation='softmax', kernel_regularizer=l2(0.0001))(O)
    elif dataset == 'svhn':
        O = Dense(10, activation='softmax', kernel_regularizer=l2(0.0001))(O)
    elif dataset == 'mnist':
        O = Dense(10, activation='softmax', kernel_regularizer=l2(0.0001))(O)
    elif dataset == 'fashion_mnist':
        O = Dense(10, activation='softmax', kernel_regularizer=l2(0.0001))(O)
    else:
        raise ValueError("Unknown dataset " + d.dataset)

    # Return the model
    return Model(I, O)
           padding="same",
           activation='linear',
           name='conv2d_8',
           kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_8'))
model.add(LeakyReLU(alpha=0.01, name='leaky_re_lu_8'))
"""256 Layers"""

model.add(MaxPooling2D(pool_size=3, strides=(2, 2), name='max_pooling2d_3'))
#model.add(Dropout(0.25))

model.add(
    Conv2D(256, (3, 3),
           padding="same",
           activation='linear',
           kernel_initializer=Orthogonal(gain=1.0),
           name='conv2d_9',
           bias_initializer=Constant(value=0.05),
           kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_11'))
model.add(LeakyReLU(alpha=0.01, name='leaky_re_lu_12'))

model.add(
    Conv2D(256, (3, 3),
           padding="same",
           activation='linear',
           kernel_initializer=Orthogonal(gain=1.0),
           name='conv2d_10',
           bias_initializer=Constant(value=0.05),
           kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_12'))
예제 #3
0

AUGMENTATIONS = strong_aug(p=0.9)



seed_everything()



model = Sequential()


#,padding="same"    , strides=(2, 2)

model.add(Conv2D(32, (5, 5),padding="same",strides=(2, 2), activation='linear', input_shape=(128, 128, 3), data_format="channels_last", kernel_initializer=Orthogonal(gain=1.0),
                 bias_initializer=Constant(value=0.05),kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.01))


model.add(Conv2D(32, (3, 3),padding="same", activation='linear', kernel_initializer=Orthogonal(gain=1.0),
                 bias_initializer=Constant(value=0.05),kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.01))

model.add(FractionalPooling2D(pooling_ratio=(1, 1.8, 1.8, 1),pseudo_random = True,overlap=True))


model.add(Conv2D(64, (5, 5),padding="same", strides=(2, 2), activation='linear', kernel_initializer=Orthogonal(gain=1.0),
                 bias_initializer=Constant(value=0.05),kernel_regularizer=regularizers.l2(0.0005)))
예제 #4
0
def get_ohm_net(dim,
                conv_depth,
                hourglass_depth,
                num_output_classes=cg.num_classes,
                input_channels=1,
                train=True):

    ##
    ## Inputs
    ##
    gt_matrix = []
    gt_matrix += [Input((2, ), name='mat_translation')]
    gt_matrix += [Input((1, ), name='mat_rotation')]
    gt_matrix += [Input((1, ), name='mat_scaling')]

    model_inputs = []
    model_inputs += [Input((dim, dim, input_channels), name='image')]
    model_inputs += [Input((dim, dim, input_channels), name='image_cp')]
    model_inputs += gt_matrix

    train_outputs = []
    predict_outputs = []

    # image translation loss
    input_true_translated = STN(downsample_factor=1.0,
                                transform_type='translation',
                                name='true_inp_xy')([
                                    model_inputs[1], gt_matrix[0],
                                    gt_matrix[0], gt_matrix[0]
                                ])

    # image rotation loss
    input_true_rotated = STN(downsample_factor=1.0,
                             transform_type='rotation',
                             name='true_inp_rt')([
                                 model_inputs[1], gt_matrix[0], gt_matrix[1],
                                 gt_matrix[1]
                             ])

    # image sclaing loss
    input_true_scaled = STN(downsample_factor=cg.downsampling_factor,
                            transform_type='uniform_scale',
                            name='true_inp_zm')([
                                model_inputs[1], gt_matrix[0], gt_matrix[1],
                                gt_matrix[2]
                            ])

    ##
    ## First UNet
    ##

    unet1_pool5, _, unet1_seg_pred = get_unet(dim, num_output_classes,
                                              conv_depth[0],
                                              1)(model_inputs[0])

    train_outputs += [unet1_seg_pred]
    predict_outputs += [unet1_seg_pred]
    ##
    ## STN
    ##

    pool5_flat = Flatten()(unet1_pool5)

    # Loc Networks.
    pool5_dense1 = Dense(384,
                         kernel_initializer=Orthogonal(gain=1.0),
                         kernel_regularizer=l2(weight_decay),
                         activation='relu',
                         name='loc')(pool5_flat)

    # translation matrix loss
    stn_translation = Dense(3,
                            kernel_initializer=Orthogonal(gain=1e-1),
                            kernel_regularizer=l2(weight_decay),
                            name='mat_xy')(pool5_dense1)

    # rotation matrix loss
    stn_rotation = Dense(1,
                         kernel_initializer=Orthogonal(gain=1e-1),
                         kernel_regularizer=l2(weight_decay),
                         name='mat_rt')(pool5_dense1)
    rotation_loss = dvpy.tf.wrapped_phase_difference_loss

    # scaling matrix loss
    stn_scaling = Dense(1,
                        kernel_initializer=Orthogonal(gain=1e-1),
                        bias_initializer=my_init,
                        kernel_regularizer=l2(weight_decay),
                        name='mat_zm')(pool5_dense1)

    train_outputs += [stn_translation]
    train_outputs += [stn_rotation]
    train_outputs += [stn_scaling]

    predict_outputs += [stn_translation]
    predict_outputs += [stn_rotation]
    predict_outputs += [stn_scaling]

    # image translation loss
    input_pred_translated = STN(downsample_factor=1.0,
                                transform_type='translation',
                                name='pred_inp_xy')([
                                    model_inputs[1], stn_translation,
                                    stn_translation, stn_translation
                                ])

    # image rotation loss
    input_pred_rotated = STN(downsample_factor=1.0,
                             transform_type='rotation',
                             name='pred_inp_rt')([
                                 model_inputs[1], stn_translation,
                                 stn_rotation, stn_rotation
                             ])

    # image sclaing loss
    input_pred_scaled = STN(downsample_factor=cg.downsampling_factor,
                            transform_type='uniform_scale',
                            name='pred_inp_zm')([
                                model_inputs[1], stn_translation, stn_rotation,
                                stn_scaling
                            ])

    predict_outputs += [input_pred_scaled]
    current_input = input_pred_scaled

    if hourglass_depth > 0:

        model_inputs += [
            Input((dim, dim, num_output_classes), name='output_mask')
        ]

        # image sclaing loss
        mask_scaled = STN(downsample_factor=cg.downsampling_factor,
                          transform_type='uniform_scale',
                          name='pred_mask_zm')([
                              model_inputs[-1], stn_translation, stn_rotation,
                              stn_scaling
                          ])

        predict_outputs += [mask_scaled]
        ##
        ## Define Ground Truth for All Subsequent UNets
        ##
        for i in range(2, hourglass_depth + 2):

            _, current_input, seg_pred = get_unet(
                dim // cg.downsampling_factor, num_output_classes,
                conv_depth[i - 1], i)(current_input)
            predict_outputs += [seg_pred]
            train_outputs += [
                Categorical_Crossentropy_Layer(name='seg%d_zm' %
                                               (i))([mask_scaled, seg_pred])
            ]
            train_outputs += [
                Categorical_Crossentropy_Acc_Layer(name='seg%d_zm_acc' % (i))(
                    [mask_scaled, seg_pred])
            ]

    ##
    ## Compile Model
    ##

    if train == True:
        train_outputs += [
            Mean_Square_Error_Layer(name='translation_mse')(
                [input_true_translated, input_pred_translated])
        ]
        train_outputs += [
            Mean_Square_Error_Layer(name='rotation_mse')(
                [input_true_rotated, input_pred_rotated])
        ]
        train_outputs += [
            Mean_Square_Error_Layer(name='scaling_mse')(
                [input_true_scaled, input_pred_scaled])
        ]

        model = Model(inputs=model_inputs, outputs=train_outputs)
        opt = Adam(lr=1e-3)

        dummy_losses = {
            'seg%d_zm' % (i): dummy_loss
            for i in range(2, hourglass_depth + 2)
        }
        dummy_acc = {
            'seg%d_zm_acc' % (i): dummy_loss
            for i in range(2, hourglass_depth + 2)
        }
        dummy_zooms = {
            'seg%d_zm' % (i): 1.0
            for i in range(2, hourglass_depth + 2)
        }

        losses = {
            'img_sg1': 'categorical_crossentropy',
            'mat_xy': 'mse',
            'mat_zm': 'mse',
            'mat_rt': rotation_loss,
            'translation_mse': dummy_loss,
            'rotation_mse': dummy_loss,
            'scaling_mse': dummy_loss,
        }
        losses.update(dummy_losses)  #???
        losses.update(dummy_acc)
        losses_weights = {
            'img_sg1': 100.0,
            'mat_xy': 100.0,
            'mat_rt': 50.0,
            'mat_zm': 100.0,
            'translation_mse': 0.1,
            'rotation_mse': 0.1,
            'scaling_mse': 0.1,
        }
        losses_weights.update(dummy_zooms)

        model.compile(optimizer=opt,
                      loss=losses,
                      metrics={'img_sg1': 'acc'},
                      loss_weights=losses_weights)

    else:

        predict_outputs += gt_matrix
        predict_outputs += [input_true_scaled]

        model = Model(inputs=model_inputs, outputs=predict_outputs)

    return model
예제 #5
0
    def _get_model(input_dim,
                   output_dim,
                   one_hot,
                   filters=32,
                   max_filters=128,
                   subtract_embeddings=False,
                   dropout=False,
                   learn_rate=0.001,
                   optimizer="rmsprop",
                   kernel_size=3,
                   lr_decay=0):
        if optimizer == "rmsprop":
            optimizer = RMSprop(lr=learn_rate, decay=lr_decay)
        elif optimizer == "adam":
            optimizer = Adam(lr=learn_rate, decay=lr_decay)
        elif optimizer == "sgd":
            optimizer = SGD(lr=learn_rate, momentum=0.9, decay=lr_decay)
        else:
            raise ValueError("Invalid argument optimizer")

        initializer = Orthogonal(np.sqrt(2))

        model = Sequential()

        dropout = dropout if isinstance(dropout,
                                        float) else (0.5 if dropout else 0)

        if not subtract_embeddings:
            # Reshape to 2D, do 2D conv and reshape to 1D
            model.add(
                Reshape(input_shape=(input_dim, ),
                        target_shape=(2, input_dim // 2, 1)))

            model.add(
                Conv2D(filters=filters,
                       kernel_size=[2, 7],
                       strides=[1, 2],
                       padding="VALID",
                       activation="relu",
                       kernel_initializer=initializer))

            model.add(BatchNormalization())
        else:
            # Reshape to add single channel
            model.add(
                Reshape(input_shape=(input_dim, ),
                        target_shape=(input_dim, 1)))
            model.add(
                Conv1D(filters=filters,
                       kernel_size=7,
                       strides=2,
                       padding="VALID",
                       activation="relu",
                       kernel_initializer=initializer))
            model.add(BatchNormalization())

        model.add(Reshape((-1, filters)))

        if dropout:
            model.add(Dropout(dropout))

        # Half conv with kernel size 3 until not greater than 3
        while model.layers[-1].output_shape[1] > 3:
            filters = min(filters * 2, max_filters)

            model.add(
                Conv1D(filters=filters,
                       kernel_size=kernel_size,
                       strides=2,
                       padding="SAME",
                       activation="relu",
                       kernel_initializer=initializer))

            model.add(BatchNormalization())
            if dropout:
                model.add(Dropout(dropout))

        # Conv valid so output is 1 if necessary
        if model.layers[-1].output_shape[1] != 1:
            filters = min(filters * 2, max_filters)
            model.add(
                Conv1D(filters=filters,
                       kernel_size=(model.layers[-1].output_shape[1], ),
                       padding="VALID",
                       activation="relu",
                       kernel_initializer=initializer))
            model.add(BatchNormalization())
            if dropout:
                model.add(Dropout(dropout))

        # Dense sigmoid output
        model.add(Flatten())
        #model.add(Dropout(0.5))
        model.add(
            Dense(output_dim,
                  activation="softmax" if one_hot else "sigmoid",
                  kernel_initializer="orthogonal"))

        print(model.summary())

        all_metrics = [
            "categorical_accuracy" if one_hot else "binary_accuracy"
        ]
        for class_id in range(output_dim):
            all_metrics += make_metrics(class_id, one_hot)

        model.compile(optimizer=optimizer,
                      loss="categorical_crossentropy"
                      if one_hot else "binary_crossentropy",
                      metrics=all_metrics)

        return model
예제 #6
0
from data import _seed
import numpy as np
np.random.seed(_seed)
import sys
from keras.layers import Dense, merge, Dropout, RepeatVector, Embedding
from keras.layers import recurrent, Input, Activation
from keras.models import Model
from keras.initializers import RandomNormal, Orthogonal
from experiment import evaluate_model, handlesysargs
from model import model as modelwrapper

print("MTL model sharing everything but output, based on gru")

INITIALIZER = RandomNormal(mean=0.0, stddev=0.05, seed=_seed)
RINITIALIZER = Orthogonal(gain=1.0, seed=_seed)
RNN = recurrent.GRU
EMBED_HIDDEN_SIZE = 30  #50
SENT_HIDDEN_SIZE = 100
QUERY_HIDDEN_SIZE = 100


def compile_model(inputs, repeat):
    (vocab_size, story_maxlen, query_maxlen) = inputs[0]
    #(vocab_size2, story_maxlen2, query_maxlen2) = inputs[1]
    #mvocab_size = vocab_size1
    #if (vocab_size2 > mvocab_size):
    #    mvocab_size = vocab_size2

    ensinput = Input((story_maxlen, ))
    sentemb = Embedding(vocab_size,
                        EMBED_HIDDEN_SIZE,
예제 #7
0
 def __init__(self, eps_std=0.05, seed=None):
     self.eps_std = eps_std
     self.seed = seed
     self.orthogonal = Orthogonal()
예제 #8
0
    def generator_network(self, filters, name):
        def resblock(feature_in, filters, num):

            #init = RandomNormal(stddev=0.02)
            init = Orthogonal(gain=1)

            temp = ConvSN2D(filters, (3, 3),
                            strides=1,
                            padding='SAME',
                            name=('resblock_%d_CONV_1' % num),
                            kernel_initializer=init)(feature_in)
            #temp = BatchNormalization(axis=-1)(temp)
            temp = LeakyReLU(alpha=0.2)(temp)

            temp = ConvSN2D(filters, (3, 3),
                            strides=1,
                            padding='SAME',
                            name=('resblock_%d_CONV_2' % num),
                            kernel_initializer=init)(temp)
            #temp = BatchNormalization(axis=-1)(temp)
            temp = LeakyReLU(alpha=0.2)(temp)

            return Add()([temp, feature_in])

        #init = RandomNormal(stddev=0.02)
        init = Orthogonal(gain=1)

        image = Input(self.img_shape)
        y = Lambda(lambda x: 2.0 * x - 1.0, output_shape=lambda x: x)(image)

        b1_in = ConvSN2D(filters, (9, 9),
                         strides=1,
                         padding='SAME',
                         name='CONV_1',
                         activation='relu',
                         kernel_initializer=init)(y)
        b1_in = LeakyReLU(alpha=0.2)(b1_in)
        # residual blocks
        b1_out = resblock(b1_in, filters, 1)
        b2_out = resblock(b1_out, filters, 2)
        b3_out = resblock(b2_out, filters, 3)
        b4_out = resblock(b3_out, filters, 4)

        # conv. layers after residual blocks
        temp = ConvSN2D(filters, (3, 3),
                        strides=1,
                        padding='SAME',
                        name='CONV_2',
                        kernel_initializer=init)(b4_out)
        #temp = BatchNormalization(axis=-1)(temp)
        temp = LeakyReLU(alpha=0.2)(temp)

        temp = ConvSN2D(filters, (3, 3),
                        strides=1,
                        padding='SAME',
                        name='CONV_3',
                        kernel_initializer=init)(temp)
        #temp = BatchNormalization(axis=-1)(temp)
        temp = LeakyReLU(alpha=0.2)(temp)

        temp = ConvSN2D(filters, (3, 3),
                        strides=1,
                        padding='SAME',
                        name='CONV_4',
                        kernel_initializer=init)(temp)
        #temp = BatchNormalization(axis=-1)(temp)
        temp = LeakyReLU(alpha=0.2)(temp)

        temp = Conv2D(3, (9, 9),
                      strides=1,
                      padding='SAME',
                      name='CONV_5',
                      kernel_initializer=init)(temp)
        temp = Activation('tanh')(temp)

        temp = Lambda(lambda x: 0.5 * x + 0.5, output_shape=lambda x: x)(temp)

        return Model(inputs=image, outputs=temp, name=name)
예제 #9
0
    def discriminator_network(self, name, preprocess='gray'):
        #The main modification from the original approach is the use of the InstanceNormalisation layer

        #init = RandomNormal(stddev=0.02)
        init = Orthogonal(gain=1)

        image = Input(self.img_shape)

        if preprocess == 'gray':
            #convert to grayscale image
            print("Discriminator-texture")

            #output_shape=(image.shape[0], image.shape[1], 1)
            gray_layer = Conv2D(1, (1, 1),
                                strides=1,
                                padding="SAME",
                                use_bias=False,
                                name="Gray_layer")
            image_processed = gray_layer(image)
            gray_layer.set_weights([self.texture_weights])
            gray_layer.trainable = False

            #image_processed=Lambda(rgb2gray, output_shape = output_gray_shape)(image)
            #print(image_processed.shape)
            #image_processed = rgb_to_grayscale(image)

        elif preprocess == 'blur':
            print("Discriminator-color (blur)")

            g_layer = DepthwiseConv2D(self.kernel_size,
                                      use_bias=False,
                                      padding='same')
            image_processed = g_layer(image)

            g_layer.set_weights([self.blur_kernel_weights])
            g_layer.trainable = False

        else:
            print("Discriminator-color (none)")
            image_processed = image

        # conv layer 1
        d = Lambda(lambda x: 2.0 * x - 1.0,
                   output_shape=lambda x: x)(image_processed)
        temp = ConvSN2D(48, (11, 11),
                        strides=4,
                        padding='SAME',
                        name='CONV_1',
                        kernel_initializer=init)(d)
        #temp = BatchNormalization(axis=-1)(temp)
        temp = LeakyReLU(alpha=0.2)(temp)

        # conv layer 2
        temp = ConvSN2D(128, (5, 5),
                        strides=2,
                        padding='SAME',
                        name='CONV_2',
                        kernel_initializer=init)(temp)
        #temp = BatchNormalization(axis=-1)(temp)
        temp = LeakyReLU(alpha=0.2)(temp)

        # conv layer 3
        temp = ConvSN2D(192, (3, 3),
                        strides=1,
                        padding='SAME',
                        name='CONV_3',
                        kernel_initializer=init)(temp)
        #temp = BatchNormalization(axis=-1)(temp)
        temp = LeakyReLU(alpha=0.2)(temp)

        # conv layer 4
        temp = ConvSN2D(192, (3, 3),
                        strides=1,
                        padding='SAME',
                        name='CONV_4',
                        kernel_initializer=init)(temp)
        #temp = BatchNormalization(axis=-1)(temp)
        temp = LeakyReLU(alpha=0.2)(temp)

        # conv layer 5
        temp = ConvSN2D(128, (3, 3),
                        strides=2,
                        padding='SAME',
                        name='CONV_5',
                        kernel_initializer=init)(temp)
        #temp = BatchNormalization(axis=-1)(temp)
        temp = LeakyReLU(alpha=0.2)(temp)

        # FC layer 1
        fc_in = Flatten()(temp)

        fc_out = Dense(512)(fc_in)
        fc_out = LeakyReLU(alpha=0.2)(fc_out)

        # FC layer 2
        logits = Dense(1)(fc_out)
        #prob = Activation('sigmoid')(logits)
        #probability = sigmoid(logits)

        return Model(inputs=image, outputs=logits, name=name)
예제 #10
0
#########

base = InceptionV3(weights = "imagenet", include_top = False, input_shape = (299, 299, 3))

#########

x = base.output

x = MaxPooling2D()(x)

x = Flatten()(x)

x = Dropout(rate = 0.5)(x)

x = Dense(128, activation = "relu", kernel_initializer = Orthogonal())(x)

x = Dropout(rate = 0.5)(x)

predictions = Dense(5, activation = "softmax", kernel_initializer = Orthogonal())(x)

model = Model(inputs = base.input, outputs = predictions)

#########
 
for layer in model.layers:
    layer.trainable = True

#########

model.compile(optimizer = "SGD", loss = "categorical_crossentropy", metrics = ["accuracy"])
base = InceptionResNetV2(weights="imagenet",
                         include_top=False,
                         input_shape=(299, 299, 3))

#########

x = base.output

x = MaxPooling2D()(x)

x = Flatten()(x)

x = Dropout(rate=0.5)(x)

x = Dense(128, activation="relu", kernel_initializer=Orthogonal())(x)

x = Dropout(rate=0.5)(x)

predictions = Dense(5, activation="softmax",
                    kernel_initializer=Orthogonal())(x)

model = Model(inputs=base.input, outputs=predictions)

#########

for layer in model.layers:
    layer.trainable = True

#########
예제 #12
0
def dilated_module(input_layer):
    conv_size = (3, 3)
    n_filters = 32

    conv1 = Conv2D(n_filters,
                   conv_size,
                   activation='relu',
                   dilation_rate=(1, 1),
                   kernel_initializer=Orthogonal(),
                   bias_initializer=Zeros())(input_layer)
    norm1 = BatchNormalization()(conv1)
    conv2 = Conv2D(n_filters,
                   conv_size,
                   activation='relu',
                   dilation_rate=(1, 1),
                   kernel_initializer=Orthogonal(),
                   bias_initializer=Zeros())(norm1)
    norm2 = BatchNormalization()(conv2)
    conv3 = Conv2D(n_filters,
                   conv_size,
                   activation='relu',
                   dilation_rate=(2, 2),
                   kernel_initializer=Orthogonal(),
                   bias_initializer=Zeros())(norm2)
    norm3 = BatchNormalization()(conv3)
    conv4 = Conv2D(n_filters,
                   conv_size,
                   activation='relu',
                   dilation_rate=(4, 4),
                   kernel_initializer=Orthogonal(),
                   bias_initializer=Zeros())(norm3)
    norm4 = BatchNormalization()(conv4)
    conv5 = Conv2D(n_filters,
                   conv_size,
                   activation='relu',
                   dilation_rate=(8, 8),
                   kernel_initializer=Orthogonal(),
                   bias_initializer=Zeros())(norm4)
    norm5 = BatchNormalization()(conv5)
    conv6 = Conv2D(n_filters,
                   conv_size,
                   activation='relu',
                   dilation_rate=(16, 16),
                   kernel_initializer=Orthogonal(),
                   bias_initializer=Zeros())(norm5)
    norm6 = BatchNormalization()(conv6)
    conv7 = Conv2D(n_filters,
                   conv_size,
                   activation='relu',
                   dilation_rate=(32, 32),
                   kernel_initializer=Orthogonal(),
                   bias_initializer=Zeros())(norm6)
    norm7 = BatchNormalization()(conv7)
    conv8 = Conv2D(n_filters,
                   conv_size,
                   activation='relu',
                   dilation_rate=(1, 1),
                   kernel_initializer=Orthogonal(),
                   bias_initializer=Zeros())(norm7)
    norm8 = BatchNormalization()(conv8)
    conv9 = Conv2D(n_filters, (1, 1),
                   activation='relu',
                   dilation_rate=(1, 1),
                   kernel_initializer=Orthogonal(),
                   bias_initializer=Zeros())(norm8)
    norm9 = BatchNormalization()(conv9)
    drop = Dropout(0.5)(norm9)

    return drop
예제 #13
0
def get_model(config: dict):
    """
    """
    cfg = ED(deepcopy(config))

    model = Sequential(name='TI_CNN')

    vgg_block_cfg = deepcopy(vgg_block_basic)

    for block_idx, (num_convs, filters) in enumerate(
            zip(vgg16.num_convs, vgg16.num_filters)):
        for idx in range(num_convs):
            if block_idx == idx == 0:
                model.add(
                    Conv1D(
                        input_shape=(cfg.input_len, 12),
                        filters=filters,
                        kernel_size=vgg_block_cfg.filter_length,
                        strides=vgg_block_cfg.subsample_length,
                        padding='same',
                        kernel_initializer=he_normal(SEED),
                    ))
            else:
                model.add(
                    Conv1D(
                        filters=filters,
                        kernel_size=vgg_block_cfg.filter_length,
                        strides=vgg_block_cfg.subsample_length,
                        padding='same',
                        kernel_initializer=he_normal(SEED),
                    ))
            model.add(BatchNormalization())
            model.add(ReLU())
        model.add(
            MaxPooling1D(
                pool_size=vgg_block_cfg.pool_size,
                strides=vgg_block_cfg.pool_size,
            ))

    if cfg.tranches_for_training:
        nb_classes = len(cfg.tranche_classes[cfg.tranches_for_training])
    else:
        nb_classes = len(cfg.classes)

    for units in [256, 64]:
        model.add(
            Bidirectional(
                LSTM(
                    units,
                    kernel_initializer=Orthogonal(seed=SEED),
                    return_sequences=True,
                )))
    model.add(
        Bidirectional(
            LSTM(
                nb_classes,
                kernel_initializer=Orthogonal(seed=SEED),
                return_sequences=False,
            )))

    model.add(Dense(nb_classes, activation='sigmoid'))

    return model
def getSimpleConvnetModel(d):
    n = d.num_blocks
    sf = d.start_filter
    dataset = d.dataset
    activation = d.act
    advanced_act = d.aact
    drop_prob = d.dropout
    if "mnist" in dataset:
        inputShape = (1, 28, 28) if K.image_dim_ordering() == "th" else (28,
                                                                         28, 1)
    else:
        inputShape = (3, 32, 32) if K.image_dim_ordering() == "th" else (32,
                                                                         32, 3)
    channelAxis = 1 if K.image_data_format() == 'channels_first' else -1
    filsize = (3, 3)
    convArgs = {
        "padding": "same",
        "use_bias": False,
        "kernel_regularizer": l2(0.0001),
    }
    bnArgs = {"axis": channelAxis, "momentum": 0.9, "epsilon": 1e-04}

    if d.model == "real":
        sf *= 2
        convArgs.update({"kernel_initializer": Orthogonal(float(np.sqrt(2)))})
    elif d.model == "complex":
        convArgs.update({
            "spectral_parametrization": d.spectral_param,
            "kernel_initializer": d.comp_init
        })

    from keras.models import Sequential
    from keras.layers import Dense, Dropout, Flatten
    from keras.layers import Conv2D, MaxPooling2D
    from keras.layers.normalization import BatchNormalization

    batch_size = 256
    num_classes = 10
    epochs = 50

    # input image dimensions
    img_rows, img_cols = 28, 28

    I = Input(shape=inputShape)
    O = Conv2D(32,
               kernel_size=(3, 3),
               activation='relu',
               kernel_initializer='he_normal',
               input_shape=inputShape)(I)
    O = MaxPooling2D(pool_size=(2, 2))(O)
    O = Dropout(0.25)(O)
    O = Conv2D(64, (3, 3), activation='relu')(O)
    O = MaxPooling2D(pool_size=(2, 2))(O)
    O = Dropout(0.25)(O)
    O = Conv2D(128, (3, 3), activation='relu')(O)
    O = Dropout(0.4)(O)
    O = Flatten()(O)
    O = Dense(128, activation='relu')(O)
    O = Dropout(0.3)(O)
    O = Dense(num_classes, activation='softmax')(O)

    # Return the model
    return Model(I, O)
예제 #15
0
def getResnetModel(d):
    n = d.num_blocks
    sf = d.start_filter
    dataset = d.dataset
    activation = d.act
    advanced_act = d.aact
    drop_prob = d.dropout
    inputShape = (3, 32, 32) if K.image_dim_ordering() == "th" else (32, 32, 3)
    channelAxis = 1 if K.image_data_format() == 'channels_first' else -1
    filsize = (3, 3)
    convArgs = {
        "padding": "same",
        "use_bias": False,
        "kernel_regularizer": l2(0.0001),
    }
    bnArgs = {"axis": channelAxis, "momentum": 0.9, "epsilon": 1e-04}

    if d.model == "real":
        sf *= 2
        convArgs.update({"kernel_initializer": Orthogonal(float(np.sqrt(2)))})
    elif d.model == "complex":
        convArgs.update({
            "spectral_parametrization": d.spectral_param,
            "kernel_initializer": d.comp_init
        })

    #
    # Input Layer
    #

    I = Input(shape=inputShape)

    #
    # Stage 1
    #

    O = learnConcatRealImagBlock(I, (1, 1), (3, 3), 0, '0', convArgs, bnArgs,
                                 d)
    O = Concatenate(channelAxis)([I, O])
    if d.model == "real":
        O = Conv2D(sf, filsize, name='conv1', **convArgs)(O)
        O = BatchNormalization(name="bn_conv1_2a", **bnArgs)(O)
    else:
        O = ComplexConv2D(sf, filsize, name='conv1', **convArgs)(O)
        O = ComplexBN(name="bn_conv1_2a", **bnArgs)(O)
    O = Activation(activation)(O)

    #
    # Stage 2
    #

    for i in xrange(n):
        O = getResidualBlock(O, filsize, [sf, sf], 2, str(i), 'regular',
                             convArgs, bnArgs, d)
        if i == n // 2 and d.spectral_pool_scheme == "stagemiddle":
            O = applySpectralPooling(O, d)

    #
    # Stage 3
    #

    O = getResidualBlock(O, filsize, [sf, sf], 3, '0', 'projection', convArgs,
                         bnArgs, d)
    if d.spectral_pool_scheme == "nodownsample":
        O = applySpectralPooling(O, d)

    for i in xrange(n - 1):
        O = getResidualBlock(O, filsize, [sf * 2, sf * 2], 3, str(i + 1),
                             'regular', convArgs, bnArgs, d)
        if i == n // 2 and d.spectral_pool_scheme == "stagemiddle":
            O = applySpectralPooling(O, d)

    #
    # Stage 4
    #

    O = getResidualBlock(O, filsize, [sf * 2, sf * 2], 4, '0', 'projection',
                         convArgs, bnArgs, d)
    if d.spectral_pool_scheme == "nodownsample":
        O = applySpectralPooling(O, d)

    for i in xrange(n - 1):
        O = getResidualBlock(O, filsize, [sf * 4, sf * 4], 4, str(i + 1),
                             'regular', convArgs, bnArgs, d)
        if i == n // 2 and d.spectral_pool_scheme == "stagemiddle":
            O = applySpectralPooling(O, d)

    #
    # Pooling
    #

    if d.spectral_pool_scheme == "nodownsample":
        O = applySpectralPooling(O, d)
        O = AveragePooling2D(pool_size=(32, 32))(O)
    else:
        O = AveragePooling2D(pool_size=(8, 8))(O)

    #
    # Flatten
    #

    O = Flatten()(O)

    #
    # Dense
    #

    if dataset == 'cifar10':
        O = Dense(10, activation='softmax', kernel_regularizer=l2(0.0001))(O)
    elif dataset == 'cifar100':
        O = Dense(100, activation='softmax', kernel_regularizer=l2(0.0001))(O)
    elif dataset == 'svhn':
        O = Dense(10, activation='softmax', kernel_regularizer=l2(0.0001))(O)
    else:
        raise ValueError("Unknown dataset " + d.dataset)

    # Return the model
    return Model(I, O)
예제 #16
0
 def conv_layer(filters, kernel_size, strides):
     return Conv2D(filters=filters,
                   kernel_size=kernel_size,
                   strides=(strides, strides),
                   activation='relu',
                   kernel_initializer=Orthogonal())
예제 #17
0
    def createModel(self):
        '''Creates model architecture
        Input: Data input dimensions, eventually architecture specifications parsed from a config file? (activations, costFunction, hyperparameters (nr layers), dropout....)
        Output: Keras Model'''

        #seed = 1337

        #mod1      = Input((self.dpatch,self.dpatch,self.dpatch, self.num_channels))
        mod1 = Input((None, None, None,
                      self.num_channels))  # last channel is the individual TPM

        #############   Normal pathway   ##################

        # reduces 57 into 9 ( - 48)

        x1 = Cropping3D(cropping=((16, 16), (16, 16), (16, 16)),
                        input_shape=(None, None, None,
                                     self.num_channels))(mod1)

        # 25  , to 9  =  -16

        for feature in self.conv_features:
            x1 = Conv3D(
                filters=feature,
                kernel_size=(3, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=he_normal(),
                kernel_regularizer=regularizers.l2(self.L2))(x1)
            x1 = LeakyReLU()(x1)
            x1 = BatchNormalization()(x1)

        #############   Downsampled pathway   ##################
        #x2        = MaxPooling3D(pool_size=(self.d_factor,self.d_factor,self.d_factor), padding="same")(mod1)

        x2 = AveragePooling3D(pool_size=(self.d_factor, self.d_factor,
                                         self.d_factor),
                              padding="same")(mod1)

        # Reduces into by 1/3  = 19, then down to 3 : -16

        for feature in self.conv_features:
            x2 = Conv3D(
                filters=feature,
                kernel_size=(3, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=he_normal(),
                kernel_regularizer=regularizers.l2(self.L2))(x2)
            x2 = LeakyReLU()(x2)
            x2 = BatchNormalization()(x2)

        x2 = UpSampling3D(size=(3, 3, 3))(x2)

        #############   Fully connected layers   ##################

        x = concatenate([x1, x2])

        x = Conv3D(
            filters=self.fc_features[0],
            kernel_size=(1, 1, 1),
            #kernel_initializer=he_normal(seed=seed),
            kernel_initializer=Orthogonal(),
            kernel_regularizer=regularizers.l2(self.L2))(x)
        x = LeakyReLU()(x)
        x = BatchNormalization()(x)

        x = Conv3D(
            filters=self.fc_features[1],
            kernel_size=(1, 1, 1),
            #kernel_initializer=he_normal(seed=seed),
            kernel_initializer=Orthogonal(),
            kernel_regularizer=regularizers.l2(self.L2))(x)
        x = LeakyReLU()(x)
        x = BatchNormalization()(x)

        #
        #        x        = Conv3D(filters = self.output_classes,
        #                           kernel_size = (1,1,1),
        #                           #kernel_initializer=he_normal(seed=seed),
        #                           kernel_initializer=Orthogonal(),
        #                           kernel_regularizer=regularizers.l2(self.L2))(x)
        #        #x        = BatchNormalization()(x)

        #        tpm = Input((None,None,None,6))

        #x4        = Cropping3D(cropping = ((24,24),(24,24),(24,24)), input_shape=(None, None, None, self.num_channels))(mod1)
        #x        = concatenate([x,tpm])#,x4])  #  MIXING ONLY CHANNELS + CHANNELS.

        # Skipping this bandfilter and going straigth to the softmax makes everything pointless (no nonlinearity besides softmax), and pushes performance to the floor.
        #        x        = Conv3D(filters = self.fc_features[1],
        #                   kernel_size = (1,1,1),
        #                   kernel_initializer=Orthogonal(),
        #                   kernel_regularizer=regularizers.l2(self.L2))(x)
        #        x        = LeakyReLU()(x)
        #        x        = BatchNormalization()(x)
        #
        #        x        = Conv3D(filters = self.fc_features[1],
        #                   kernel_size = (1,1,1),
        #                   kernel_initializer=Orthogonal(),
        #                   kernel_regularizer=regularizers.l2(self.L2))(x)
        #        x        = LeakyReLU()(x)

        x = Conv3D(filters=self.output_classes,
                   kernel_size=(1, 1, 1),
                   kernel_initializer=Orthogonal(),
                   kernel_regularizer=regularizers.l2(self.L2))(x)
        x = Activation(softmax)(x)

        model = Model(inputs=[mod1], outputs=x)
        #print_summary(model, positions=[.33, .6, .67,1])

        #rmsprop = RMSprop(lr=self.learning_rate, rho=0.9, epsilon=1e-8, decay=self.optimizer_decay)

        if self.loss_function == 'Multinomial':
            model.compile(loss='categorical_crossentropy',
                          optimizer=Adam(lr=self.learning_rate),
                          metrics=[
                              dice_coef_multilabel0, dice_coef_multilabel1,
                              dice_coef_multilabel2, dice_coef_multilabel3,
                              dice_coef_multilabel4, dice_coef_multilabel5,
                              dice_coef_multilabel6
                          ])
        elif self.loss_function == 'Dice2':
            model.compile(
                loss=Generalised_dice_coef_multilabel2,
                optimizer=Adam(lr=self.learning_rate),
                metrics=[dice_coef_multilabel0, dice_coef_multilabel1])
        elif self.loss_function == 'Dice6':
            model.compile(loss=dice_coef_multilabel6,
                          optimizer=Adam(lr=self.learning_rate),
                          metrics=[
                              dice_coef_multilabel0, dice_coef_multilabel1,
                              dice_coef_multilabel2, dice_coef_multilabel3,
                              dice_coef_multilabel4, dice_coef_multilabel5
                          ])
        elif self.loss_function == 'wDice6':
            model.compile(loss=w_dice_coef_multilabel6,
                          optimizer=Adam(lr=self.learning_rate),
                          metrics=[
                              dice_coef_multilabel0, dice_coef_multilabel1,
                              dice_coef_multilabel2, dice_coef_multilabel3,
                              dice_coef_multilabel4, dice_coef_multilabel5
                          ])
        elif self.loss_function == 'wDice2':
            model.compile(
                loss=w_dice_coef_multilabel2,
                optimizer=Adam(lr=self.learning_rate),
                metrics=[dice_coef_multilabel0, dice_coef_multilabel1])
        elif self.loss_function == 'Dice7':
            model.compile(loss=Generalised_dice_coef_multilabel7,
                          optimizer=Adam(lr=self.learning_rate),
                          metrics=[
                              dice_coef_multilabel0, dice_coef_multilabel1,
                              dice_coef_multilabel2, dice_coef_multilabel3,
                              dice_coef_multilabel4, dice_coef_multilabel5,
                              dice_coef_multilabel6
                          ])
        return model


#dm = MultiPriors_noTPM_Model(7, 1, 0.001, [0], 0.01, 0, 'Dice7' )
#model = dm.createModel()
#model.summary()
#from keras.utils import plot_model
#plot_model(model, to_file='/home/hirsch/Documents/projects/brainSegmentation/DeepPriors' +'/multiscale_TPM_noTPM.png', show_shapes=True)
#
##
#X = np.random.randn(1,57,57,57,1)
#y = np.random.binomial(n=1, p=0.5,size=9**3*7).reshape(1,9,9,9,7)
#y.shape
#
#TPM = np.random.randn(1,9,9,9,6)
#
#yhat = model.predict([X,TPM])
#yhat.shape
#
#model.fit([X,TPM], y)
예제 #18
0
 def fc_layer(units, activation_fn='relu'):
     return Dense(units=units,
                  activation=activation_fn,
                  kernel_initializer=Orthogonal())
예제 #19
0
    def createModel(self):
        '''Creates model architecture
        Input: Data input dimensions, eventually architecture specifications parsed from a config file? (activations, costFunction, hyperparameters (nr layers), dropout....)
        Output: Keras Model'''

        #seed = 1337

        mod1 = Input(
            (self.dpatch, self.dpatch, self.dpatch, self.num_channels))

        #############   Normal pathway   ##################

        x1 = Cropping3D(cropping=((13, 13), (13, 13), (13, 13)),
                        input_shape=(self.dpatch, self.dpatch, self.dpatch,
                                     self.num_channels))(mod1)
        x1 = Conv3D(
            filters=self.conv_features[0],
            kernel_size=(3, 3, 3),
            #kernel_initializer=he_normal(seed=seed),
            kernel_initializer=Orthogonal(),
            kernel_regularizer=regularizers.l2(self.L2))(x1)
        x1 = BatchNormalization()(x1)
        #x1        = Activation('relu')(x1)
        x1 = PReLU()(x1)
        #x1        = BatchNormalization()(x1)

        for feature in self.conv_features[1:]:
            x1 = Conv3D(
                filters=feature,
                kernel_size=(3, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x1)
            x1 = BatchNormalization()(x1)
            #x1        = Activation('relu')(x1)
            x1 = PReLU()(x1)
            #x1        = BatchNormalization()(x1)

        #############   Downsampled pathway   ##################
        #x2        = MaxPooling3D(pool_size=(self.d_factor,self.d_factor,self.d_factor), padding="same")(mod1)

        x2 = AveragePooling3D(pool_size=(self.d_factor, self.d_factor,
                                         self.d_factor),
                              padding="same")(mod1)

        x3 = Conv3D(
            filters=5,
            kernel_size=(7, 7, 7),
            padding="same",
            strides=(3, 3, 3),
            kernel_initializer=Orthogonal(),
        )(mod1)
        x3 = BatchNormalization()(x3)
        x3 = PReLU()(x3)

        x2 = concatenate([x2, x3])

        x2 = Conv3D(
            filters=self.conv_features[0],
            kernel_size=(3, 3, 3),
            #kernel_initializer=he_normal(seed=seed),
            #dilation_rate = (17,17,17),
            kernel_initializer=Orthogonal(),
            kernel_regularizer=regularizers.l2(self.L2))(x2)
        x2 = BatchNormalization()(x2)
        #x2        = Activation('relu')(x2)
        x2 = PReLU()(x2)
        #x2        = BatchNormalization()(x2)

        for feature in (self.conv_features[1:]):
            x2 = Conv3D(
                filters=feature,
                kernel_size=(3, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x2)
            x2 = BatchNormalization()(x2)
            #x2        = Activation('relu')(x2)
            x2 = PReLU()(x2)
            #x2        = BatchNormalization()(x2)

        x2 = UpSampling3D(size=(9, 9, 9))(x2)

        #############   Fully connected layers   ##################

        x = concatenate([x1, x2])
        #x        = Reshape(target_shape = (1, 60))(x)   # do I need this?
        '''x        = Flatten()(x)
        x        = Dense(units = self.fc_features[0], activation = 'elu')(x)
        x        = Dropout(rate = 0.5)(x)
        x        = Dense(units = self.fc_features[1], activation = 'elu')(x)
        x        = Dropout(rate = 0.5)(x)    
        x        = Dense(units = self.fc_features[2], activation = 'softmax', name = 'softmax')(x)'''

        #   Fully convolutional variant

        #tpm = Input((9,9,9,6))

        #x        = Dropout(rate = self.dropout[0])(x)
        x = Conv3D(
            filters=self.fc_features[0],
            kernel_size=(1, 1, 1),
            #kernel_initializer=he_normal(seed=seed),
            kernel_initializer=Orthogonal(),
            kernel_regularizer=regularizers.l2(self.L2))(x)
        x = BatchNormalization()(x)
        #x        = Activation('relu')(x)
        x = PReLU()(x)
        #x        = BatchNormalization()(x)
        #x        = Dropout(rate = self.dropout[0])(x)
        x = Conv3D(
            filters=self.fc_features[1],
            kernel_size=(1, 1, 1),
            #kernel_initializer=he_normal(seed=seed),
            kernel_initializer=Orthogonal(),
            kernel_regularizer=regularizers.l2(self.L2))(x)
        x = BatchNormalization()(x)
        #x        = Activation('relu')(x)
        x = PReLU()(x)
        #x        = BatchNormalization()(x)
        #x        = Dropout(rate = self.dropout[1])(x)
        #x        = Flatten()(x)
        x = Dense(units=self.fc_features[2],
                  activation='softmax',
                  name='softmax')(x)

        model = Model(inputs=[mod1], outputs=x)
        #print_summary(model, positions=[.33, .6, .67,1])

        #rmsprop = RMSprop(lr=self.learning_rate, rho=0.9, epsilon=1e-8, decay=self.optimizer_decay)

        if self.loss_function == 'Multinomial':
            model.compile(loss='categorical_crossentropy',
                          optimizer=Adam(lr=self.learning_rate),
                          metrics=[
                              dice_coef_multilabel0, dice_coef_multilabel1,
                              dice_coef_multilabel2, dice_coef_multilabel3,
                              dice_coef_multilabel4, dice_coef_multilabel5
                          ])
        elif self.loss_function == 'Dice2':
            model.compile(
                loss=Generalised_dice_coef_multilabel2,
                optimizer=Adam(lr=self.learning_rate),
                metrics=[dice_coef_multilabel0, dice_coef_multilabel1])
        elif self.loss_function == 'Dice6':
            model.compile(loss=dice_coef_multilabel6,
                          optimizer=Adam(lr=self.learning_rate),
                          metrics=[
                              dice_coef_multilabel0, dice_coef_multilabel1,
                              dice_coef_multilabel2, dice_coef_multilabel3,
                              dice_coef_multilabel4, dice_coef_multilabel5
                          ])
        elif self.loss_function == 'wDice6':
            model.compile(loss=w_dice_coef_multilabel6,
                          optimizer=Adam(lr=self.learning_rate),
                          metrics=[
                              dice_coef_multilabel0, dice_coef_multilabel1,
                              dice_coef_multilabel2, dice_coef_multilabel3,
                              dice_coef_multilabel4, dice_coef_multilabel5
                          ])
        elif self.loss_function == 'wDice2':
            model.compile(
                loss=w_dice_coef_multilabel2,
                optimizer=Adam(lr=self.learning_rate),
                metrics=[dice_coef_multilabel0, dice_coef_multilabel1])
        return model
예제 #20
0
    def createModel(self):
        '''Creates model architecture
        Input: Data input dimensions, eventually architecture specifications parsed from a config file? (activations, costFunction, hyperparameters (nr layers), dropout....)
        Output: Keras Model'''

        highRes = Input(
            (None, None, None, self.num_channels
             ))  # Dim = 25^3 (from a 57^3 cube, cropped 16 per side)
        lowRes = Input(
            (None, None, None, self.num_channels
             ))  # Dim = 19^3 (from a 57^3 cube downsampled by three)
        #############   Normal pathway   ##################

        # This input is now already cropped     = -32
        x1 = highRes

        # 25 --> 9  =  -16
        for kernels in self.conv_features:
            x1 = Conv3D(filters=kernels,
                        kernel_size=(3, 3, 3),
                        kernel_initializer=Orthogonal(),
                        kernel_regularizer=regularizers.l2(self.L2))(x1)
            x1 = LeakyReLU()(x1)
            x1 = BatchNormalization()(x1)

        #############   Downsampled pathway   ##################

        # This input is already downsampled       = /3
        x2 = lowRes

        # in total (x/3 - 16)*3  =  -66
        # -22
        for kernels in self.conv_features:
            x2 = Conv3D(filters=kernels,
                        kernel_size=(3, 3, 3),
                        kernel_initializer=Orthogonal(),
                        kernel_regularizer=regularizers.l2(self.L2))(x2)
            x2 = LeakyReLU()(x2)
            x2 = BatchNormalization()(x2)

        x2 = UpSampling3D(size=(3, 3, 3))(x2)

        #############   Fully connected layers   ##################

        tpm = Input((None, None, None, 6))

        x = concatenate([x1, x2, tpm])

        for feature in self.fc_features:
            x = Conv3D(filters=feature,
                       kernel_size=(1, 1, 1),
                       kernel_initializer=Orthogonal(),
                       kernel_regularizer=regularizers.l2(self.L2))(x)
            x = LeakyReLU()(x)
            x = BatchNormalization()(x)

        x = Conv3D(filters=self.output_classes,
                   kernel_size=(1, 1, 1),
                   kernel_initializer=Orthogonal(),
                   kernel_regularizer=regularizers.l2(self.L2))(x)
        x = Activation(softmax)(x)

        model = Model(inputs=[highRes, lowRes, tpm], outputs=x)

        if self.loss_function == 'Multinomial':
            model.compile(loss='categorical_crossentropy',
                          optimizer=Adam(lr=self.learning_rate),
                          metrics=[
                              dice_coef_multilabel0, dice_coef_multilabel1,
                              dice_coef_multilabel2, dice_coef_multilabel3,
                              dice_coef_multilabel4, dice_coef_multilabel5,
                              dice_coef_multilabel6
                          ])
        elif self.loss_function == 'Dice2':
            model.compile(
                loss=Generalised_dice_coef_multilabel2,
                optimizer=Adam(lr=self.learning_rate),
                metrics=[dice_coef_multilabel0, dice_coef_multilabel1])
        elif self.loss_function == 'Dice6':
            model.compile(loss=dice_coef_multilabel6,
                          optimizer=Adam(lr=self.learning_rate),
                          metrics=[
                              dice_coef_multilabel0, dice_coef_multilabel1,
                              dice_coef_multilabel2, dice_coef_multilabel3,
                              dice_coef_multilabel4, dice_coef_multilabel5
                          ])
        elif self.loss_function == 'wDice6':
            model.compile(loss=w_dice_coef_multilabel6,
                          optimizer=Adam(lr=self.learning_rate),
                          metrics=[
                              dice_coef_multilabel0, dice_coef_multilabel1,
                              dice_coef_multilabel2, dice_coef_multilabel3,
                              dice_coef_multilabel4, dice_coef_multilabel5
                          ])
        elif self.loss_function == 'wDice2':
            model.compile(
                loss=w_dice_coef_multilabel2,
                optimizer=Adam(lr=self.learning_rate),
                metrics=[dice_coef_multilabel0, dice_coef_multilabel1])
        elif self.loss_function == 'Dice7':
            model.compile(loss=Generalised_dice_coef_multilabel7,
                          optimizer=Adam(lr=self.learning_rate),
                          metrics=[
                              dice_coef_multilabel0, dice_coef_multilabel1,
                              dice_coef_multilabel2, dice_coef_multilabel3,
                              dice_coef_multilabel4, dice_coef_multilabel5,
                              dice_coef_multilabel6
                          ])
        return model
예제 #21
0
    def createModel(self):
    
        T1post = Input((None,None,None, 1),name = 'T1post_input')
       
        ########################  T1 post pathway #########################
            
        x = AveragePooling3D(pool_size=(1, 3, 3), name='T1post_Context')(T1post)    
        # (13, 25, 25)        
        for _ in range(6):    
            x        = Conv3D(filters = 30, 
                               kernel_size = (3,3,3), 
                               #kernel_initializer=he_normal(seed=seed),
                               kernel_initializer=Orthogonal(),
                               kernel_regularizer=regularizers.l2(self.L2))(x)
            x        = LeakyReLU()(x)                              
            x        = BatchNormalization()(x) 
 
        for _ in range(5):    
            x        = Conv3D(filters = 30, 
                               kernel_size = (1,3,3), 
                               #kernel_initializer=he_normal(seed=seed),
                               kernel_initializer=Orthogonal(),
                               kernel_regularizer=regularizers.l2(self.L2))(x)
            x        = LeakyReLU()(x)                              
            x        = BatchNormalization()(x)     
     
        x   =  UpSampling3D(size=(1,3,3))(x)
      

        ######################## FC Parts #############################
          
        for feature in (self.fc_features[0:2]):  
            x        = Conv3D(filters = 60, 
                               kernel_size = (1,1,1), 
                               #kernel_initializer=he_normal(seed=seed),
                               kernel_initializer=Orthogonal(),
                               kernel_regularizer=regularizers.l2(self.L2))(x)
            x        = LeakyReLU()(x)
            x        = BatchNormalization()(x)
        
           
        x        = Conv3D(filters = 100, 
                   kernel_size = (1,1,1), 
                   #kernel_initializer=he_normal(seed=seed),
                   kernel_initializer=Orthogonal(),
                   kernel_regularizer=regularizers.l2(self.L2))(x)
        x        = LeakyReLU()(x)
        x        = BatchNormalization()(x)
        

        x        = Conv3D(filters = 2, 
                   kernel_size = (1,1,1), 
                   kernel_initializer=Orthogonal(),
                   kernel_regularizer=regularizers.l2(self.L2))(x)
        x        = Activation('sigmoid')(x)
        
        model     = Model(inputs=[T1post], outputs=x)

    	#model = multi_gpu_model(model, gpus=4)
    
        if self.loss_function == 'Dice':
            model.compile(loss=Generalised_dice_coef_multilabel2, optimizer=Adam(lr=self.learning_rate), metrics=['acc', dice_coef_multilabel0, dice_coef_multilabel1])
        elif self.loss_function == 'Multinomial':
            model.compile(loss='binary_crossentropy', optimizer=Adam(lr=self.learning_rate), metrics=['acc', dice_coef_multilabel0, dice_coef_multilabel1])
        return model
예제 #22
0
 def lstm_layer(self):
     """Create a LSTM layer of a model."""
     if self.pooling:
         ret_seq = True
     else:
         ret_seq = False
     ker_in = glorot_uniform(seed=self.seed)
     rec_in = Orthogonal(seed=self.seed)
     if self.type_of_weights == "shared":
         if self.recurrent == "bilstm" or self.recurrent is None:
             out_a = Bidirectional(LSTM(self.hidden_dim,
                                        input_shape=(
                                            self.max_sequence_length,
                                            self.embedding_dim,
                                        ),
                                        kernel_initializer=ker_in,
                                        recurrent_initializer=rec_in,
                                        return_sequences=ret_seq),
                                   merge_mode='concat')
         elif self.recurrent == "lstm":
             out_a = LSTM(self.hidden_dim,
                          input_shape=(
                              self.max_sequence_length,
                              self.embedding_dim,
                          ),
                          kernel_initializer=ker_in,
                          recurrent_initializer=rec_in,
                          return_sequences=ret_seq)
         return out_a, out_a
     elif self.type_of_weights == "separate":
         if self.recurrent == "bilstm" or self.recurrent is None:
             out_a = Bidirectional(LSTM(self.hidden_dim,
                                        input_shape=(
                                            self.max_sequence_length,
                                            self.embedding_dim,
                                        ),
                                        kernel_initializer=ker_in,
                                        recurrent_initializer=rec_in,
                                        return_sequences=ret_seq),
                                   merge_mode='concat')
             out_b = Bidirectional(LSTM(self.hidden_dim,
                                        input_shape=(
                                            self.max_sequence_length,
                                            self.embedding_dim,
                                        ),
                                        kernel_initializer=ker_in,
                                        recurrent_initializer=rec_in,
                                        return_sequences=ret_seq),
                                   merge_mode='concat')
         elif self.recurrent == "lstm":
             out_a = LSTM(self.hidden_dim,
                          input_shape=(
                              self.max_sequence_length,
                              self.embedding_dim,
                          ),
                          kernel_initializer=ker_in,
                          recurrent_initializer=rec_in,
                          return_sequences=ret_seq)
             out_b = LSTM(self.hidden_dim,
                          input_shape=(
                              self.max_sequence_length,
                              self.embedding_dim,
                          ),
                          kernel_initializer=ker_in,
                          recurrent_initializer=rec_in,
                          return_sequences=ret_seq)
         return out_a, out_b
예제 #23
0
  def create_model(self, n_dim, r):
    # load inputs
    X, _, _ = self.inputs
    K.set_session(self.sess)

    with tf.compat.v1.name_scope('generator'):
      x = X
      L = self.layers
      n_filters = [  128,  256,  512, 512, 512, 512, 512, 512]
      n_blocks = [ 128, 64, 32, 16, 8]
      n_filtersizes = [65, 33, 17,  9,  9,  9,  9, 9, 9]
      downsampling_l = []

      print('building model...')

      def _make_normalizer(x_in, n_filters, n_block):
        """applies an lstm layer on top of x_in"""
        x_shape = tf.shape(input=x_in)

        n_steps = x_shape[1] / int(n_block) # will be 32 at training

        # first, apply standard conv layer to reduce the dimension
        # input of (-1, 4096, 128) becomes (-1, 32, 128)
        # input of (-1, 512, 512) becomes (-1, 32, 512)

        x_in_down = (MaxPooling1D(pool_size=int(n_block), padding='valid'))(x_in)

        # pooling to reduce dimension
        x_shape = tf.shape(input=x_in_down)

        x_rnn = LSTM(units = n_filters, return_sequences = True)(x_in_down)

        # output: (-1, n_steps, n_filters)
        return x_rnn

      def _apply_normalizer(x_in, x_norm, n_filters, n_block):
        x_shape = tf.shape(input=x_in)
        n_steps = x_shape[1] / int(n_block) # will be 32 at training

        # reshape input into blocks
        x_in = tf.reshape(x_in, shape=(-1, n_steps, int(n_block), n_filters))
        x_norm = tf.reshape(x_norm, shape=(-1, n_steps, 1, n_filters))

        # multiply
        x_out = x_norm * x_in

        # return to original shape
        x_out = tf.reshape(x_out, shape=x_shape)

        return x_out


      # downsampling layers
      for l, nf, fs in zip(list(range(L)), n_filters, n_filtersizes):
        with tf.compat.v1.name_scope('downsc_conv%d' % l):
          x = (Conv1D(filters=nf, kernel_size=fs, dilation_rate = DRATE,
                  activation=None, padding='same', kernel_initializer=Orthogonal()))(x)
          x = (MaxPooling1D(pool_size=self.pool_size, padding='valid', strides=self.strides))(x)
          x = LeakyReLU(0.2)(x)

          # create and apply the normalizer
          nb = 128 / (2**l)

          params_before = np.sum([np.prod(v.get_shape().as_list()) for v in tf.compat.v1.trainable_variables()])
          x_norm = _make_normalizer(x, nf, nb)
          params_after = np.sum([np.prod(v.get_shape().as_list()) for v in tf.compat.v1.trainable_variables()])

          x = _apply_normalizer(x, x_norm, nf, nb)

          print('D-Block: ', x.get_shape())
          downsampling_l.append(x)

      # bottleneck layer
      with tf.compat.v1.name_scope('bottleneck_conv'):
          x = (Conv1D(filters=n_filters[-1], kernel_size=n_filtersizes[-1], dilation_rate = DRATE,
                  activation=None, padding='same', kernel_initializer=Orthogonal))(x)
          x = (MaxPooling1D(pool_size=self.pool_size, padding='valid', strides=self.strides))(x)
          x = Dropout(rate=0.5)(x)
          x = LeakyReLU(0.2)(x)

          # create and apply the normalizer
          nb = 128 / (2**L)
          x_norm = _make_normalizer(x, n_filters[-1], nb)
          x = _apply_normalizer(x, x_norm, n_filters[-1], nb)

      # upsampling layers
      for l, nf, fs, l_in in reversed(list(zip(list(range(L)), n_filters, n_filtersizes, downsampling_l))):
        with tf.compat.v1.name_scope('upsc_conv%d' % l):
          # (-1, n/2, 2f)
          x = (Conv1D(filters=2*nf, kernel_size=fs, dilation_rate = DRATE,
                  activation=None, padding='same', kernel_initializer=Orthogonal()))(x)

          x = Dropout(rate=0.5)(x)
          x = Activation('relu')(x)
          # (-1, n, f)
          x = SubPixel1D(x, r=2)

          # create and apply the normalizer

          x_norm = _make_normalizer(x, nf, nb)
          x = _apply_normalizer(x, x_norm, nf, nb)
          # (-1, n, 2f)
          x = Concatenate()([x, l_in])
          print('U-Block: ', x.get_shape())

      # final conv layer
      with tf.compat.v1.name_scope('lastconv'):
        x = Conv1D(filters=2, kernel_size=9,
                activation=None, padding='same', kernel_initializer=RandomNormal(stddev=1e-3))(x)
        x = SubPixel1D(x, r=2)

      g = Add()([x, X])
    return g
예제 #24
0
def build_model():
    input_1 = Input(batch_shape=(batch_size, num_channels, input_width,
                                 input_height))
    conv_1_1 = Conv2D(32,
                      kernel_size=(7, 7),
                      strides=(2, 2),
                      padding='same',
                      use_bias=True,
                      activation='linear',
                      data_format='channels_first',
                      kernel_initializer=Orthogonal(1.0),
                      bias_initializer=Constant(0.1),
                      name='conv1_1')(input_1)

    relu_1_1 = LeakyReLU(leakiness)(conv_1_1)
    pool_1_1 = MaxPool2D((3, 3),
                         strides=(2, 2),
                         data_format='channels_first',
                         batch_size=batch_size)(relu_1_1)

    conv_2_1 = Conv2D(32, (3, 3),
                      padding='same',
                      use_bias=True,
                      activation='linear',
                      data_format='channels_first',
                      kernel_initializer=Orthogonal(1.0),
                      bias_initializer=Constant(0.1),
                      name='conv2_1')(pool_1_1)
    relu_2_1 = LeakyReLU(leakiness)(conv_2_1)
    conv_2_2 = Conv2D(32, (3, 3),
                      padding='same',
                      use_bias=True,
                      activation='linear',
                      data_format='channels_first',
                      kernel_initializer=Orthogonal(1.0),
                      bias_initializer=Constant(0.1),
                      name='conv2_2')(relu_2_1)
    relu_2_2 = LeakyReLU(leakiness)(conv_2_2)
    pool_2_2 = MaxPool2D((3, 3),
                         strides=(2, 2),
                         data_format='channels_first',
                         batch_size=batch_size)(relu_2_2)

    conv_3_1 = Conv2D(64, (3, 3),
                      padding='same',
                      use_bias=True,
                      activation='linear',
                      data_format='channels_first',
                      kernel_initializer=Orthogonal(1.0),
                      bias_initializer=Constant(0.1),
                      name='conv3_1')(pool_2_2)
    relu_3_1 = LeakyReLU(leakiness)(conv_3_1)
    conv_3_2 = Conv2D(64, (3, 3),
                      padding='same',
                      use_bias=True,
                      activation='linear',
                      data_format='channels_first',
                      kernel_initializer=Orthogonal(1.0),
                      bias_initializer=Constant(0.1),
                      name='conv3_2')(relu_3_1)
    relu_3_2 = LeakyReLU(leakiness)(conv_3_2)
    pool_3_2 = MaxPool2D((3, 3),
                         strides=(2, 2),
                         data_format='channels_first',
                         batch_size=batch_size)(relu_3_2)

    conv_4_1 = Conv2D(128, (3, 3),
                      padding='same',
                      use_bias=True,
                      activation='linear',
                      data_format='channels_first',
                      kernel_initializer=Orthogonal(1.0),
                      bias_initializer=Constant(0.1),
                      name='conv4_1')(pool_3_2)
    relu_4_1 = LeakyReLU(leakiness)(conv_4_1)
    conv_4_2 = Conv2D(128, (3, 3),
                      padding='same',
                      use_bias=True,
                      activation='linear',
                      data_format='channels_first',
                      kernel_initializer=Orthogonal(1.0),
                      bias_initializer=Constant(0.1),
                      name='conv4_2')(relu_4_1)
    relu_4_2 = LeakyReLU(leakiness)(conv_4_2)
    conv_4_3 = Conv2D(128, (3, 3),
                      padding='same',
                      use_bias=True,
                      activation='linear',
                      data_format='channels_first',
                      kernel_initializer=Orthogonal(1.0),
                      bias_initializer=Constant(0.1),
                      name='conv4_3')(relu_4_2)
    relu_4_3 = LeakyReLU(leakiness)(conv_4_3)
    conv_4_4 = Conv2D(128, (3, 3),
                      padding='same',
                      use_bias=True,
                      activation='linear',
                      data_format='channels_first',
                      kernel_initializer=Orthogonal(1.0),
                      bias_initializer=Constant(0.1),
                      name='conv4_4')(relu_4_3)
    relu_4_4 = LeakyReLU(leakiness)(conv_4_4)
    pool_4_4 = MaxPool2D((3, 3),
                         strides=(2, 2),
                         data_format='channels_first',
                         batch_size=batch_size)(relu_4_4)

    conv_5_1 = Conv2D(256, (3, 3),
                      padding='same',
                      use_bias=True,
                      activation='linear',
                      data_format='channels_first',
                      kernel_initializer=Orthogonal(1.0),
                      bias_initializer=Constant(0.1),
                      name='conv5_1')(pool_4_4)
    relu_5_1 = LeakyReLU(leakiness)(conv_5_1)
    conv_5_2 = Conv2D(256, (3, 3),
                      padding='same',
                      use_bias=True,
                      activation='linear',
                      data_format='channels_first',
                      kernel_initializer=Orthogonal(1.0),
                      bias_initializer=Constant(0.1),
                      name='conv5_2')(relu_5_1)
    relu_5_2 = LeakyReLU(leakiness)(conv_5_2)
    conv_5_3 = Conv2D(256, (3, 3),
                      padding='same',
                      use_bias=True,
                      activation='linear',
                      data_format='channels_first',
                      kernel_initializer=Orthogonal(1.0),
                      bias_initializer=Constant(0.1),
                      name='conv5_3')(relu_5_2)
    relu_5_3 = LeakyReLU(leakiness)(conv_5_3)
    conv_5_4 = Conv2D(256, (3, 3),
                      padding='same',
                      use_bias=True,
                      activation='linear',
                      data_format='channels_first',
                      kernel_initializer=Orthogonal(1.0),
                      bias_initializer=Constant(0.1),
                      name='conv5_4')(relu_5_3)
    relu_5_4 = LeakyReLU(leakiness)(conv_5_4)
    pool_5_4 = MaxPool2D((3, 3),
                         strides=(2, 2),
                         data_format='channels_first',
                         batch_size=batch_size,
                         name='coarse_last_pool')(relu_5_4)

    dropout_1 = Dropout(0.5)(pool_5_4)
    flatten_1 = Flatten()(dropout_1)
    maxout_dense_0 = MaxoutDense(output_dim=512,
                                 dtype='float32',
                                 init='orthogonal',
                                 name='first_fc_0')(flatten_1)

    new_batch = batch_size // 2
    reshape_tensor = Lambda(reshape_batch,
                            arguments={'batch_size':
                                       new_batch})(maxout_dense_0)

    dropout_2 = Dropout(0.5)(reshape_tensor)
    maxout_dense_1 = MaxoutDense(output_dim=512,
                                 init='orthogonal',
                                 name='first_fc_1')(dropout_2)

    dropout_3 = Dropout(0.5)(maxout_dense_1)
    dense_1 = Dense(4,
                    kernel_initializer=Orthogonal(1.0),
                    bias_initializer=Constant(0.1),
                    name='last_dense')(dropout_3)

    reshape_tensor = Lambda(reshape_batch,
                            arguments={'batch_size': batch_size})(dense_1)
    predictions = Activation('softmax', name='last_out')(reshape_tensor)

    model = Model(inputs=input_1, outputs=predictions)
    return model
예제 #25
0
from keras import optimizers
from skimage import io
from keras.callbacks import CSVLogger, Callback, EarlyStopping
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau
from keras.callbacks import TensorBoard, ModelCheckpoint

base = load_model('Xception_model(71).h5')

top_model = Sequential()
top_model.add(base)
top_model.add(MaxPooling2D(pool_size=(2, 2)))
top_model.add(Flatten())
top_model.add(Dropout(0.5))
top_model.add(Dense(128, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(3, activation='softmax', kernel_initializer=Orthogonal()))
top_model.summary()

top_model.compile(optimizer="SGD",
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])

top_model.load_weights('./Xception/Xception_decay.hdf5')

import cv2
import numpy as np
from sklearn import cross_validation, metrics

test_folders = [
    './raw/test/cancer cell/', './raw/test/lymphocyte/',
    './raw/test/plasma cell/'
def model(input_shape):
    model = Sequential()
    model.add(Cropping2D(cropping=(2, 4), input_shape=input_shape))
    model.add(
        Conv2D(32, (5, 5),
               strides=(2, 2),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1),
               kernel_regularizer=regularizers.l2(0.0005)))
    model.add(LeakyReLU(0.01))

    model.add(
        Conv2D(32, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1),
               kernel_regularizer=regularizers.l2(0.0005)))
    #model.add(Conv2D(32, (3,3), strides=(1,1), padding='same', kernel_initializer=Orthogonal(1.0),
    #          bias_initializer=Constant(0.1), kernel_regularizer=regularizers.l2(0.0005)))
    #model.add(Conv2D(32, (3,3), strides=(2,2), padding='same', kernel_initializer=Orthogonal(1.0),
    #          bias_initializer=Constant(0.1), kernel_regularizer=regularizers.l2(0.0005)))
    model.add(LeakyReLU(0.01))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))

    model.add(
        Conv2D(64, (5, 5),
               strides=(2, 2),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1),
               kernel_regularizer=regularizers.l2(0.0005)))
    model.add(LeakyReLU(0.01))

    model.add(
        Conv2D(64, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1),
               kernel_regularizer=regularizers.l2(0.0005)))
    model.add(LeakyReLU(0.01))

    model.add(
        Conv2D(64, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1),
               kernel_regularizer=regularizers.l2(0.0005)))
    model.add(LeakyReLU(0.01))

    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))

    model.add(
        Conv2D(128, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1),
               kernel_regularizer=regularizers.l2(0.0005)))
    model.add(LeakyReLU(0.01))

    model.add(
        Conv2D(128, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1),
               kernel_regularizer=regularizers.l2(0.0005)))
    model.add(LeakyReLU(0.01))

    model.add(
        Conv2D(128, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1),
               kernel_regularizer=regularizers.l2(0.0005)))
    model.add(LeakyReLU(0.01))

    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))

    model.add(
        Conv2D(256, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1),
               kernel_regularizer=regularizers.l2(0.0005)))
    model.add(LeakyReLU(0.01))

    model.add(
        Conv2D(256, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1),
               kernel_regularizer=regularizers.l2(0.0005)))
    model.add(LeakyReLU(0.01))

    model.add(
        Conv2D(256, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1),
               kernel_regularizer=regularizers.l2(0.0005)))
    model.add(LeakyReLU(0.01))

    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))

    model.add(
        Conv2D(512, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1),
               kernel_regularizer=regularizers.l2(0.0005)))
    model.add(LeakyReLU(0.01))

    model.add(
        Conv2D(512, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1),
               kernel_regularizer=regularizers.l2(0.0005)))
    model.add(LeakyReLU(0.01))

    model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2)))
    model.add(Flatten())
    model.add(
        Dense(1024,
              kernel_initializer=Orthogonal(1.0),
              bias_initializer=Constant(0.1),
              kernel_regularizer=regularizers.l2(0.0005)))
    model.add(LeakyReLU(0.01))
    model.add(
        Lambda(lambda x: maxout(x, 512), lambda shp: maxout_shape(shp, 512)))
    model.add(
        Dense(1,
              kernel_initializer=Orthogonal(1.0),
              bias_initializer=Constant(0.1),
              kernel_regularizer=regularizers.l2(0.0005)))

    return model
    def createModel(self):
        '''Creates model architecture
        Input: Data input dimensions, eventually architecture specifications parsed from a config file? (activations, costFunction, hyperparameters (nr layers), dropout....)
        Output: Keras Model'''
    
        #seed = 1337
        #mod1 = Input(input_shape=(None,None,None, self.num_channels))
        
        mod1      = Input((self.dpatch[0],self.dpatch[1],self.dpatch[2], self.num_channels))
        
        #############   Downsampled pathway   ##################   
        
        #x2        = AveragePooling3D(pool_size=(d_factor[0],d_factor[1],d_factor[2]), padding="same")(mod1)
        
        x3        = Conv3D(filters = 30, 
                           kernel_size = (3,3,3), 
                           dilation_rate = (1,1,1),
                           padding = 'same',
                           kernel_initializer=Orthogonal(),
                           kernel_regularizer=regularizers.l2(self.L2))(mod1)
        x3        = BatchNormalization()(x3)
        #x3        = Activation('relu')(x3)
        x3        = LeakyReLU()(x3)
        
        x3        = Conv3D(filters = 30, 
                           kernel_size = (3,3,3), 
                           dilation_rate = (1,1,1),
                           padding = 'same',
                           kernel_initializer=Orthogonal(),
                           kernel_regularizer=regularizers.l2(self.L2))(x3)
        x3        = BatchNormalization()(x3)
        #x3        = Activation('relu')(x3)
        x3        = LeakyReLU()(x3)
        # -------------------   Dilation Pyramid
        
        x3        = Conv3D(filters = 40, 
                           kernel_size = (3,3,3), 
                           dilation_rate = (1,2,2),
                           padding = 'valid',
                           #kernel_initializer=he_normal(seed=seed),
                           kernel_initializer=Orthogonal(),
                           kernel_regularizer=regularizers.l2(self.L2))(x3)
        x3        = BatchNormalization()(x3)
        #x3        = Activation('relu')(x3)
	x3        = LeakyReLU()(x3)        

        x3        = Conv3D(filters = 40, 
                           kernel_size = (3,3,3), 
                           dilation_rate = (1,4,4),
                           padding = 'valid',
                           #kernel_initializer=he_normal(seed=seed),
                           kernel_initializer=Orthogonal(),
                           kernel_regularizer=regularizers.l2(self.L2))(x3)
        x3        = BatchNormalization()(x3)
        #x3        = Activation('relu')(x3)
	x3        = LeakyReLU()(x3)       
        
        x3        = Conv3D(filters = 50, 
                           kernel_size = (3,3,3), 
                           dilation_rate = (1,8,8),
                           padding = 'valid',
                           #kernel_initializer=he_normal(seed=seed),
                           kernel_initializer=Orthogonal(),
                           kernel_regularizer=regularizers.l2(self.L2))(x3)
        x3        = BatchNormalization()(x3)
        #x3        = Activation('relu')(x3)
	x3        = LeakyReLU()(x3)       
        
        x3        = Conv3D(filters = 50, 
                           kernel_size = (3,3,3), 
                           dilation_rate = (1,16,16),
                           padding = 'valid',
                           #kernel_initializer=he_normal(seed=seed),
                           kernel_initializer=Orthogonal(),
                           kernel_regularizer=regularizers.l2(self.L2))(x3)
        x3        = BatchNormalization()(x3)
        #x3        = Activation('relu')(x3)
	x3        = LeakyReLU()(x3)       
        
       
        
        #############   High res pathway   ##################  
        
        x1        = Cropping3D(cropping = ((0,0),(22,22),(22,22)), input_shape=(self.dpatch[0],self.dpatch[1],self.dpatch[2],  self.num_channels))(mod1)
        
        for feature in self.conv_features[0:8]:  
            x1        = Conv3D(filters = feature, 
                               kernel_size = (2,3,3), 
                               #kernel_initializer=he_normal(seed=seed),
                               kernel_initializer=Orthogonal(),
                               kernel_regularizer=regularizers.l2(self.L2))(x1)
            x1        = BatchNormalization()(x1)
            #x1        = Activation('relu')(x1)
	    x1        = LeakyReLU()(x1)       
        
        
        #############   Fully connected layers   ################## 
        
        x        = concatenate([x1,x3])
        
        #   Fully convolutional variant
        
        for feature in (self.conv_features[10:12]):  
            x        = Conv3D(filters = feature, 
                               kernel_size = (3,3,3), 
                               #kernel_initializer=he_normal(seed=seed),
                               kernel_initializer=Orthogonal(),
                               kernel_regularizer=regularizers.l2(self.L2))(x)
            x        = BatchNormalization()(x)
            #x        = Activation('relu')(x)
            x        = LeakyReLU()(x)

        #x        = BatchNormalization()(x)
        #x        = Dropout(rate = self.dropout[0])(x)
        
        
        x        = Conv3D(filters = 100, 
                           kernel_size = (1,3,3), 
                           kernel_initializer=Orthogonal(),
                           kernel_regularizer=regularizers.l2(self.L2))(x)
        #x        = Activation('relu')(x)
        x        = LeakyReLU()(x)
        #coords_x = Input((1,9,9,1))  

        coords_y = Input((1,9,9,1))
        coords_z = Input((1,9,9,1))
        
        #x = concatenate([x, coords_y, coords_z])

   
        for fc_filters in self.fc_features:
        	    x        = Conv3D(filters = fc_filters, 
        		       kernel_size = (1,1,1), 
        		           #kernel_initializer=he_normal(seed=seed),
        		       kernel_initializer=Orthogonal(),
        		       kernel_regularizer=regularizers.l2(self.L2))(x)
        	    x        = BatchNormalization()(x)        
        	    #x        = Activation('relu')(x)
        	    x        = LeakyReLU()(x)
 
        
        	# Final Softmax Layer
        x        = Conv3D(filters = self.output_classes, 
                   kernel_size = (1,1,1), 
                   kernel_initializer=Orthogonal(),
                   kernel_regularizer=regularizers.l2(self.L2))(x)
        x        = Activation(softmax)(x)
        
        model     = Model(inputs=[mod1,coords_y,coords_z], outputs=x)
        model.compile(loss=Generalised_dice_coef_multilabel2, optimizer=Adam(lr=self.learning_rate), metrics=[dice_coef_multilabel0,dice_coef_multilabel1])
                                  
        return model
예제 #28
0
    def create_model(self, n_dim, r):
        # load inputs
        X, _, _ = self.inputs
        K.set_session(self.sess)

        with tf.compat.v1.name_scope('generator'):
            x = X
            L = self.layers
            # dim/layer: 4096, 2048, 1024, 512, 256, 128,  64,  32,
            n_filters = [128, 384, 512, 512, 512, 512, 512, 512]
            n_filtersizes = [65, 33, 17, 9, 9, 9, 9, 9, 9]
            downsampling_l = []

            print('building model...')

            # downsampling layers
            for l, nf, fs in zip(list(range(L)), n_filters, n_filtersizes):
                with tf.compat.v1.name_scope('downsc_conv%d' % l):
                    x = (Conv1D(filters=nf,
                                kernel_size=fs,
                                activation=None,
                                padding='same',
                                init=Orthogonal(),
                                subsample_length=2))(x)
                    # if l > 0: x = BatchNormalization(mode=2)(x)
                    x = LeakyReLU(0.2)(x)
                    print('D-Block: ', x.get_shape())
                    downsampling_l.append(x)

            # bottleneck layer
            with tf.compat.v1.name_scope('bottleneck_conv'):
                x = (Conv1D(filters=n_filters[-1],
                            kernel_size=n_filtersizes[-1],
                            activation=None,
                            padding='same',
                            init=Orthogonal(),
                            subsample_length=2))(x)
                x = Dropout(rate=0.5)(x)
                x = LeakyReLU(0.2)(x)

            # upsampling layers
            for l, nf, fs, l_in in reversed(
                    list(
                        zip(list(range(L)), n_filters, n_filtersizes,
                            downsampling_l))):
                with tf.compat.v1.name_scope('upsc_conv%d' % l):
                    # (-1, n/2, 2f)
                    x = (Conv1D(filters=2 * nf,
                                kernel_size=fs,
                                activation=None,
                                padding='same',
                                init=Orthogonal()))(x)
                    x = Dropout(rate=0.5)(x)
                    x = Activation('relu')(x)
                    # (-1, n, f)
                    x = SubPixel1D(x, r=2)
                    # (-1, n, 2f)
                    x = K.concatenate(tensors=[x, l_in], axis=2)
                    print('U-Block: ', x.get_shape())

            # final conv layer
            with tf.compat.v1.name_scope('lastconv'):
                x = Convolution1D(filters=2,
                                  kernel_size=9,
                                  activation=None,
                                  padding='same',
                                  init=RandomNormal(stdev=1e-3))(x)
                x = SubPixel1D(x, r=2)
                print(x.get_shape())

            g = merge([x, X], mode='sum')

        return g
def model(input_shape):
    model = Sequential()
    model.add(Cropping2D(cropping=(2, 4), input_shape=input_shape))
    model.add(
        Conv2D(32, (7, 7),
               strides=(2, 2),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1)))
    #model.add(Conv2D(32, (3,3), strides=(1,1), padding='same', kernel_initializer=Orthogonal(1.0),
    #          bias_initializer=Constant(0.1)))
    #model.add(Conv2D(32, (3,3), strides=(1,1), padding='same', kernel_initializer=Orthogonal(1.0),
    #          bias_initializer=Constant(0.1)))
    #model.add(Conv2D(32, (3,3), strides=(2,2), padding='same', kernel_initializer=Orthogonal(1.0),
    #          bias_initializer=Constant(0.1)))
    model.add(LeakyReLU(0.5))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))

    model.add(
        Conv2D(32, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1)))
    model.add(LeakyReLU(0.5))

    model.add(
        Conv2D(32, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1)))
    model.add(LeakyReLU(0.5))

    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))

    model.add(
        Conv2D(64, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1)))
    model.add(LeakyReLU(0.5))

    model.add(
        Conv2D(64, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1)))
    model.add(LeakyReLU(0.5))

    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))

    model.add(
        Conv2D(128, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1)))
    model.add(LeakyReLU(0.5))

    model.add(
        Conv2D(128, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1)))
    model.add(LeakyReLU(0.5))

    model.add(
        Conv2D(128, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1)))
    model.add(LeakyReLU(0.5))

    model.add(
        Conv2D(128, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1)))
    model.add(LeakyReLU(0.5))

    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))

    model.add(
        Conv2D(256, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1)))
    model.add(LeakyReLU(0.5))

    model.add(
        Conv2D(256, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1)))
    model.add(LeakyReLU(0.5))

    model.add(
        Conv2D(256, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1)))
    model.add(LeakyReLU(0.5))

    model.add(
        Conv2D(256, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1)))
    model.add(LeakyReLU(0.5))

    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
    model.add(Dropout(0.5))
    model.add(GlobalMaxPooling2D())
    model.add(
        Dense(1,
              kernel_initializer=Orthogonal(1.0),
              bias_initializer=Constant(0.1),
              activation='sigmoid'))

    return model
예제 #30
0
def model(input_shape):
    model = Sequential()
    model.add(Cropping2D(cropping=(2, 4), input_shape=input_shape))
    model.add(
        Conv2D(32, (7, 7),
               strides=(2, 2),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1)))
    #model.add(Conv2D(32, (3,3), strides=(1,1), padding='same', kernel_initializer=Orthogonal(1.0),
    #          bias_initializer=Constant(0.1)))
    #model.add(Conv2D(32, (3,3), strides=(1,1), padding='same', kernel_initializer=Orthogonal(1.0),
    #          bias_initializer=Constant(0.1)))
    #model.add(Conv2D(32, (3,3), strides=(2,2), padding='same', kernel_initializer=Orthogonal(1.0),
    #          bias_initializer=Constant(0.1)))
    model.add(LeakyReLU(0.5))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))

    model.add(
        Conv2D(32, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1)))
    model.add(LeakyReLU(0.5))

    model.add(
        Conv2D(32, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1)))
    model.add(LeakyReLU(0.5))

    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))

    model.add(
        Conv2D(64, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1)))
    model.add(LeakyReLU(0.5))

    model.add(
        Conv2D(64, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1)))
    model.add(LeakyReLU(0.5))

    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))

    model.add(
        Conv2D(128, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1)))
    model.add(LeakyReLU(0.5))

    model.add(
        Conv2D(128, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1)))
    model.add(LeakyReLU(0.5))

    model.add(
        Conv2D(128, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1)))
    model.add(LeakyReLU(0.5))

    model.add(
        Conv2D(128, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1)))
    model.add(LeakyReLU(0.5))

    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))

    model.add(
        Conv2D(256, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1)))
    model.add(LeakyReLU(0.5))

    model.add(
        Conv2D(256, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1)))
    model.add(LeakyReLU(0.5))

    model.add(
        Conv2D(256, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1)))
    model.add(LeakyReLU(0.5))

    model.add(
        Conv2D(256, (3, 3),
               strides=(1, 1),
               padding='same',
               kernel_initializer=Orthogonal(1.0),
               bias_initializer=Constant(0.1)))
    model.add(LeakyReLU(0.5))

    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
    model.add(Dropout(0.5))
    model.add(GlobalMaxPooling2D())
    model.add(
        Dense(1,
              kernel_initializer=Orthogonal(1.0),
              bias_initializer=Constant(0.1)))
    model.add(Activation('sigmoid'))
    sgd = SGD(lr=0.01, momentum=0.9, nesterov=True)
    model.compile(loss='binary_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])

    return model