def _create_model(self, input_shape, dropout=0, last_activation='sigmoid'):
        resnet_base = ResNet50(input_shape=input_shape, include_top=False)

        for l in resnet_base.layers:
            l.trainable = True
        conv1 = resnet_base.get_layer("activation_1").output
        conv2 = resnet_base.get_layer("activation_10").output
        conv3 = resnet_base.get_layer("activation_22").output
        conv4 = resnet_base.get_layer("activation_40").output
        conv5 = resnet_base.get_layer("activation_49").output

        up6 = concatenate([UpSampling2D()(conv5), conv4], axis=-1)
        conv6 = conv_block_simple(up6, 256, "conv6_1")
        conv6 = conv_block_simple(conv6, 256, "conv6_2")

        up7 = concatenate([UpSampling2D()(conv6), conv3], axis=-1)
        conv7 = conv_block_simple(up7, 192, "conv7_1")
        conv7 = conv_block_simple(conv7, 192, "conv7_2")

        up8 = concatenate([UpSampling2D()(conv7), conv2], axis=-1)
        conv8 = conv_block_simple(up8, 128, "conv8_1")
        conv8 = conv_block_simple(conv8, 128, "conv8_2")

        up9 = concatenate([UpSampling2D()(conv8), conv1], axis=-1)
        conv9 = conv_block_simple(up9, 64, "conv9_1")
        conv9 = conv_block_simple(conv9, 64, "conv9_2")

        vgg = VGG16(input_shape=input_shape,
                    input_tensor=resnet_base.input,
                    include_top=False)
        for l in vgg.layers:
            l.trainable = False
        vgg_first_conv = vgg.get_layer("block1_conv2").output
        up10 = concatenate(
            [UpSampling2D()(conv9), resnet_base.input, vgg_first_conv],
            axis=-1)
        conv10 = conv_block_simple(up10, 32, "conv10_1")
        conv10 = conv_block_simple(conv10, 32, "conv10_2")
        conv10 = SpatialDropout2D(0.2)(conv10)
        x = Conv2D(1, (1, 1), activation=last_activation,
                   name="prediction")(conv10)
        model = Model(resnet_base.input, x)
        return model
コード例 #2
0
def dsv(x, size, scale_factor, dropout=0.0, batch_norm=True):
    if K.image_dim_ordering() == 'th':
        axis = 1
    else:
        axis = 3
    conv = Conv2D(size, (3, 3), padding='same')(x)
    if batch_norm is True:
        conv = BatchNormalization(axis=axis)(conv)
    conv = Activation('relu')(conv)
    conv = Conv2D(size, (3, 3), padding='same')(conv)
    if batch_norm is True:
        conv = BatchNormalization(axis=axis)(conv)
    conv = Activation('relu')(conv)
    if dropout > 0:
        conv = SpatialDropout2D(dropout)(conv)

    upsampler = UpSampling2DBilinear(stride=scale_factor)(conv)

    return upsampler
コード例 #3
0
def double_conv_layer(x, size, dropout=0.0, batch_norm=True):
    if K.image_dim_ordering() == 'th':
        axis = 1
    else:
        axis = 3
    conv = Conv2D(
        size,
        (3, 3),
        padding='same',
    )(x)
    if batch_norm is True:
        conv = BatchNormalization(axis=axis)(conv)
    conv = Activation('relu')(conv)
    conv = Conv2D(size, (3, 3), padding='same')(conv)
    if batch_norm is True:
        conv = BatchNormalization(axis=axis)(conv)
    conv = Activation('relu')(conv)
    if dropout > 0:
        conv = SpatialDropout2D(dropout)(conv)
    return conv
コード例 #4
0
def get_unet_resnet(input_shape):
    resnet_base = ResNet50(input_shape=input_shape, include_top=False)

    #     for idx,l in enumerate(resnet_base.layers):
    #         if idx <= 78 :
    #             l.trainable = False
    #         else:
    #             l.trainable = True
    for l in resnet_base.layers:
        l.trainable = True

    conv1 = resnet_base.get_layer("activation_1").output
    conv2 = resnet_base.get_layer("activation_10").output
    conv3 = resnet_base.get_layer("activation_22").output
    conv4 = resnet_base.get_layer("activation_40").output
    conv5 = resnet_base.get_layer("activation_49").output

    up6 = concatenate([UpSampling2D()(conv5), conv4], axis=-1)
    conv6 = conv_block_simple(up6, 256, "conv6_1")
    conv6 = conv_block_simple(conv6, 256, "conv6_2")

    up7 = concatenate([UpSampling2D()(conv6), conv3], axis=-1)
    conv7 = conv_block_simple(up7, 192, "conv7_1")
    conv7 = conv_block_simple(conv7, 192, "conv7_2")

    up8 = concatenate([UpSampling2D()(conv7), conv2], axis=-1)
    conv8 = conv_block_simple(up8, 128, "conv8_1")
    conv8 = conv_block_simple(conv8, 128, "conv8_2")

    up9 = concatenate([UpSampling2D()(conv8), conv1], axis=-1)
    conv9 = conv_block_simple(up9, 64, "conv9_1")
    conv9 = conv_block_simple(conv9, 64, "conv9_2")

    up10 = UpSampling2D()(conv9)
    conv10 = conv_block_simple(up10, 32, "conv10_1")
    conv10 = conv_block_simple(conv10, 32, "conv10_2")
    conv10 = SpatialDropout2D(0.2)(conv10)
    x = Conv2D(1, (1, 1), activation="sigmoid", name="prediction")(conv10)
    model = Model(resnet_base.input, x)
    return model
コード例 #5
0
def bottleneck(inputData, output, internal_scale=4, asymmetric=0, dilated=0, downsample=False, dropout_rate=0.1):
	internal = output // internal_scale
	encodeData = inputData
	if downsample:
		input_stride = 2
	else:
		input_stride=1 
	encodeData = Conv2D(internal, (input_stride, input_stride),
					 strides=(input_stride, input_stride), use_bias=False)(encoder)
	encodeData = BatchNormalization(momentum=0.1)(encodeData)  
	encodeData = PReLU(shared_axes=[1, 2])(encodeData)
	if not asymmetric and not dilated:
		encodeData = Conv2D(internal, (3, 3), padding='same')(encodeData)
	elif asymmetric:
		encodeData = Conv2D(internal, (1, asymmetric), padding='same', use_bias=False)(encodeData)
		encodeData = Conv2D(internal, (asymmetric, 1), padding='same')(encodeData)
	else :
		encodeData = Conv2D(internal, (3, 3), dilation_rate=(dilated, dilated), padding='same')(encodeData)
	

	encodeData = BatchNormalization(momentum=0.1)(encodeData)  
	encodeData = PReLU(shared_axes=[1, 2])(encodeData)
	encodeData = Conv2D(output, (1, 1), use_bias=False)(encodeData)
	encodeData = BatchNormalization(momentum=0.1)(encodeData) 
	encodeData = SpatialDropout2D(dropout_rate)(encodeData)
	prevData = inputData
	if downsample:
		prevData = MaxPooling2D()(prevData)

		prevData = Permute((1, 3, 2))(prevData)
		pad_feature_maps = prevData - inputData.get_shape().as_list()[3]
		tb_pad = (0, 0)
		lr_pad = (0, pad_feature_maps)
		prevData = ZeroPadding2D(padding=(tb_pad, lr_pad))(other)
		prevData = Permute((1, 3, 2))(other)

	encodeData = add([encodeData, prevData])
	encodeData = PReLU(shared_axes=[1, 2])(encodeData)

	return encodeData
コード例 #6
0
def get_unet_inception_resnet_v2(input_shape, numberOfMaskChannels):
    if numberOfMaskChannels == 1:
        base_model = GetOrBuildModel("./encoder_models/inception_resnet_v2_model_untrained_1_channel_masks_notop.h5", input_shape)
    elif numberOfMaskChannels == 2:
        base_model = GetOrBuildModel("./encoder_models/inception_resnet_v2_model_untrained_2_channel_masks_notop.h5", input_shape)
    else:
        raise ValueError('numberOfMaskChannels must be 1 or 2')

    conv1 = base_model.get_layer('activation_3').output
    conv2 = base_model.get_layer('activation_5').output
    conv3 = base_model.get_layer('block35_10_ac').output
    conv4 = base_model.get_layer('block17_20_ac').output
    conv5 = base_model.get_layer('conv_7b_ac').output
    up6 = concatenate([UpSampling2D()(conv5), conv4], axis=-1)
    conv6 = conv_block_simple(up6, 256, "conv6_1")
    conv6 = conv_block_simple(conv6, 256, "conv6_2")

    up7 = concatenate([UpSampling2D()(conv6), conv3], axis=-1)
    conv7 = conv_block_simple(up7, 256, "conv7_1")
    conv7 = conv_block_simple(conv7, 256, "conv7_2")

    up8 = concatenate([UpSampling2D()(conv7), conv2], axis=-1)
    conv8 = conv_block_simple(up8, 128, "conv8_1")
    conv8 = conv_block_simple(conv8, 128, "conv8_2")

    up9 = concatenate([UpSampling2D()(conv8), conv1], axis=-1)
    conv9 = conv_block_simple(up9, 64, "conv9_1")
    conv9 = conv_block_simple(conv9, 64, "conv9_2")

    up10 = concatenate([UpSampling2D()(conv9), base_model.input], axis=-1)
    conv10 = conv_block_simple(up10, 48, "conv10_1")
    conv10 = conv_block_simple(conv10, 32, "conv10_2")
    conv10 = SpatialDropout2D(0.4)(conv10)
    if numberOfMaskChannels == 1:
        x = Conv2D(1, (1, 1), activation="sigmoid", name="prediction")(conv10)
    elif numberOfMaskChannels == 2:
        x = Conv2D(2, (1, 1), activation="sigmoid", name="prediction")(conv10)
    model = Model(base_model.input, x)
    return model
コード例 #7
0
def get_simple_unet(input_shape):
    img_input = Input(input_shape)
    conv1 = conv_block_simple(img_input, 32, "conv1_1")
    conv1 = conv_block_simple(conv1, 32, "conv1_2")
    pool1 = MaxPooling2D((2, 2), strides=(2, 2), padding="same",
                         name="pool1")(conv1)

    conv2 = conv_block_simple(pool1, 64, "conv2_1")
    conv2 = conv_block_simple(conv2, 64, "conv2_2")
    pool2 = MaxPooling2D((2, 2), strides=(2, 2), padding="same",
                         name="pool2")(conv2)

    conv3 = conv_block_simple(pool2, 128, "conv3_1")
    conv3 = conv_block_simple(conv3, 128, "conv3_2")
    pool3 = MaxPooling2D((2, 2), strides=(2, 2), padding="same",
                         name="pool3")(conv3)

    conv4 = conv_block_simple(pool3, 256, "conv4_1")
    conv4 = conv_block_simple(conv4, 256, "conv4_2")
    conv4 = conv_block_simple(conv4, 256, "conv4_3")

    up5 = concatenate([UpSampling2D()(conv4), conv3], axis=-1)
    conv5 = conv_block_simple(up5, 128, "conv5_1")
    conv5 = conv_block_simple(conv5, 128, "conv5_2")

    up6 = concatenate([UpSampling2D()(conv5), conv2], axis=-1)
    conv6 = conv_block_simple(up6, 64, "conv6_1")
    conv6 = conv_block_simple(conv6, 64, "conv6_2")

    up7 = concatenate([UpSampling2D()(conv6), conv1], axis=-1)
    conv7 = conv_block_simple(up7, 32, "conv7_1")
    conv7 = conv_block_simple(conv7, 32, "conv7_2")

    conv7 = SpatialDropout2D(0.2)(conv7)

    prediction = Conv2D(1, (1, 1), activation="sigmoid",
                        name="prediction")(conv7)
    model = Model(img_input, prediction)
    return model
コード例 #8
0
def conv2d_basic(x,
                 filters,
                 kernel_size=(3, 3),
                 strides=(1, 1),
                 dilation_rate=(1, 1),
                 padding='same',
                 activation='selu',
                 dropout=0.2,
                 weight_decay=1e-4,
                 name=None):
    """Utility function to apply conv + BN.

    # Arguments
        x: input tensor.
        filters: filters in `Conv2D`.
        kernel_size: kernel size as in `Conv2D`.
        strides: strides in `Conv2D`.
        padding: padding mode in `Conv2D`.
        activation: activation in `Conv2D`.
        use_bias: whether to use a bias in `Conv2D`.
        name: name of the ops; will become `name + '_ac'` for the activation
            and `name + '_bn'` for the batch norm layer.

    # Returns
        Output tensor after applying `Conv2D` and `BatchNormalization`.
    """
    x = Conv2D(filters,
               kernel_size,
               strides=strides,
               dilation_rate=dilation_rate,
               padding=padding,
               kernel_initializer='he_normal',
               kernel_regularizer=l2(weight_decay),
               name=name)(x)
    if activation is not None:
        ac_name = None if name is None else name + '_ac'
        x = Activation(activation, name=ac_name)(x)
    x = SpatialDropout2D(dropout)(x)
    return x
コード例 #9
0
ファイル: unet.py プロジェクト: vkolagotla/robin
def double_conv_layer(inputs: tf_tensor, filter: int) -> tf_tensor:
    """Create a double convolution layer with
    mentioned inputs and filters.

    Parameters
    ----------
    inputs: tf_tensor
        input keras layer
    filter: int
        filter size/ kernal size for the tf_tensor

    Returns
    -------
    tf_tensor
        tensor with mentioned number of filters

    Example
    -------
    robin.unet.double_conv_layer(Input, 32)

    """
    conv = Conv2D(
        filter,
        (3, 3),
        padding="same",
        kernel_initializer="he_normal")(inputs)

    conv = BatchNormalization(axis=3)(conv)
    conv = Activation("relu")(conv)
    conv = Conv2D(
        filter,
        (3, 3),
        padding="same",
        kernel_initializer="he_normal")(conv)
    conv = BatchNormalization(axis=3)(conv)
    conv = Activation("relu")(conv)
    conv = SpatialDropout2D(0.1)(conv)
    return conv
コード例 #10
0
def build_model(input_shape):
    input_tensor = Input(shape=input_shape)
    resnet_base = ResNet50(input_tensor=input_tensor,
                           weights='imagenet',
                           include_top=False)
    #resnet_base.summary()

    for l in resnet_base.layers:
        l.trainable = True
    conv1 = resnet_base.get_layer("activation_1").output
    conv2 = resnet_base.get_layer("activation_10").output
    conv3 = resnet_base.get_layer("activation_22").output
    conv4 = resnet_base.get_layer("activation_40").output
    conv5 = resnet_base.get_layer("activation_49").output

    up6 = concatenate([UpSampling2D()(conv5), conv4], axis=-1)
    conv6 = conv_block_simple(up6, 256, "conv6_1")
    conv6 = conv_block_simple(conv6, 256, "conv6_2")

    up7 = concatenate([UpSampling2D()(conv6), conv3], axis=-1)
    conv7 = conv_block_simple(up7, 192, "conv7_1")
    conv7 = conv_block_simple(conv7, 192, "conv7_2")

    up8 = concatenate([UpSampling2D()(conv7), conv2], axis=-1)
    conv8 = conv_block_simple(up8, 128, "conv8_1")
    conv8 = conv_block_simple(conv8, 128, "conv8_2")

    up9 = concatenate([UpSampling2D()(conv8), conv1], axis=-1)
    conv9 = conv_block_simple(up9, 64, "conv9_1")
    conv9 = conv_block_simple(conv9, 64, "conv9_2")

    up10 = UpSampling2D()(conv9)
    conv10 = conv_block_simple(up10, 32, "conv10_1")
    conv10 = conv_block_simple(conv10, 32, "conv10_2")
    conv10 = SpatialDropout2D(0.2)(conv10)
    x = Conv2D(4, (1, 1), activation="sigmoid", name="prediction")(conv10)
    model = Model(inputs=[resnet_base.input], outputs=[x])
    return model
コード例 #11
0
def build_discriminator(size):
    model = Sequential(name='discriminator')

    f = 64
    s = size

    while s > 7:
        if s == size:
            model.add(
                Conv2D(filters=f,
                       kernel_size=7,
                       strides=1,
                       padding='same',
                       input_shape=(size, size, 10)))
        else:
            model.add(
                Conv2D(filters=f, kernel_size=7, strides=1, padding='same'))

        model.add(BatchNormalization(momentum=0.8, axis=3))
        model.add(Activation('relu'))

        model.add(Conv2D(filters=f, kernel_size=3, strides=1, padding='same'))
        model.add(BatchNormalization(momentum=0.8, axis=3))
        model.add(Activation('relu'))

        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(SpatialDropout2D(0.2))

        f = f * 2
        s = s // 2

    model.add(GlobalAveragePooling2D())

    model.add(Dense(1, activation='sigmoid'))

    model.trainable = True
    return model
コード例 #12
0
def get_model(shape, dropout=0.5, path=None):
    print('building neural network')

    model=Sequential()

    model.add(Convolution2D(512, 3, 3, border_mode='same', input_shape=shape))
    model.add(Activation('relu'))
    model.add(Convolution2D(512, 3, 3, border_mode='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(dropout))

    model.add(Flatten())#input_shape=shape))
    # model.add(Dense(4096))
    # model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(1))
    #model.add(Activation('linear'))

    return model
コード例 #13
0
def model_init(input_shape, **kwargs):
    from keras.models import Sequential
    from keras.layers.core import Dense, Activation, Flatten, Dropout
    from keras.layers.convolutional import Convolution2D, MaxPooling2D
    from keras.layers.normalization import BatchNormalization
    from keras.layers import GlobalAveragePooling2D

    assert (len(input_shape) == 3)

    try:
        from keras.layers.core import SpatialDropout2D
    except:
        from keras import __version__ as __kv__
        from warnings import warn
        warn('no SpatialDropout2D layer in keras version: %s' % __kv__)
        SpatialDropout2D = Dropout

    # need to set the input_shape to first layer for a new model
    model = Sequential()
    model.add(
        Convolution2D(64, (3, 3), padding='same', input_shape=input_shape))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Convolution2D(64, (3, 3)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.1))

    model.add(Convolution2D(64, (2, 2), padding='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Convolution2D(64, (2, 2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.2))

    model.add(Convolution2D(72, (2, 2), padding='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Convolution2D(72, (2, 2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.3))

    model.add(Convolution2D(96, (2, 2), padding='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Convolution2D(96, (2, 2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.4))

    model.add(GlobalAveragePooling2D())
    model.add(Dense(2048))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    return dict(model=model, lr_mult=1.0)
コード例 #14
0
train_generator = generate(train_samples)
validation_generator = generate(validation_samples, train=False)

import keras
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.core import SpatialDropout2D
from keras.callbacks import ModelCheckpoint, EarlyStopping

model = Sequential()
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3)))
model.add(Cropping2D(cropping=((70, 25), (0, 0))))
model.add(Convolution2D(24, 5, 5, subsample=(2, 2), activation='relu'))
model.add(SpatialDropout2D(0.1))
model.add(Convolution2D(36, 5, 5, subsample=(2, 2), activation='relu'))
model.add(SpatialDropout2D(0.1))
model.add(Convolution2D(48, 5, 5, subsample=(2, 2), activation='relu'))
model.add(SpatialDropout2D(0.1))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(Flatten())
model.add(Dense(100))
model.add(Dropout(0.1))
model.add(Dense(50))
model.add(Dropout(0.1))
model.add(Dense(1))
print('training')
'''
callbacks are to make useful check points and to make the training stop as optimum as possible 
コード例 #15
0
model.add(Cropping2D(cropping=((75, 20), (0, 0)), input_shape=(180, 320, 3)))

# Preprocess incoming data, centered around zero with small standard deviation
#model.add(Lambda(lambda x: (x / 255.0) - 0.5))

#Nvidia model
model.add(
    Convolution2D(24, (5, 5), activation="relu", name="conv_1",
                  strides=(2, 2)))
model.add(
    Convolution2D(36, (5, 5), activation="relu", name="conv_2",
                  strides=(2, 2)))
model.add(
    Convolution2D(48, (5, 5), activation="relu", name="conv_3",
                  strides=(2, 2)))
model.add(SpatialDropout2D(.5, dim_ordering='default'))

model.add(
    Convolution2D(64, (3, 3), activation="relu", name="conv_4",
                  strides=(1, 1)))
model.add(
    Convolution2D(64, (3, 3), activation="relu", name="conv_5",
                  strides=(1, 1)))

model.add(Flatten())

model.add(Dense(1164))
model.add(Dropout(.5))
model.add(Dense(100, activation='relu'))
model.add(Dropout(.5))
model.add(Dense(50, activation='relu'))
コード例 #16
0
def run():

    driving_log = pd.read_csv(data_path,
                              names=('Center Image', 'Left Image',
                                     'Right Image', 'Steering Angle',
                                     'Throttle', 'Break', 'Speed'))

    image_names_full = []
    y_data_full = []

    for index, row in driving_log.iterrows():
        center_img = row['Center Image']
        left_img = row['Left Image'].strip()
        right_img = row['Right Image'].strip()
        steering_angle = row['Steering Angle']

        image_names_full.append(center_img)
        y_data_full.append(steering_angle)

        left = steering_angle + angle_correction
        right = steering_angle - angle_correction

        image_names_full.append(left_img)
        y_data_full.append(left)

        image_names_full.append(right_img)
        y_data_full.append(right)
    image_names_full, y_data_full = np.array(image_names_full), np.array(
        y_data_full)

    print('CSV loaded')

    #split data
    X_train, X_val, y_train, y_val = train_test_split(image_names_full,
                                                      y_data_full,
                                                      test_size=0.2)

    #model
    model = Sequential()
    model.add(
        Cropping2D(cropping=((60, 20), (0, 0)), input_shape=(160, 320, 3)))
    model.add(Lambda(resize))
    model.add(BatchNormalization(axis=1))

    model.add(Convolution2D(24, 5, 5, border_mode='same', activation='elu'))
    model.add(MaxPooling2D(border_mode='same'))
    model.add(SpatialDropout2D(0.2))

    model.add(Convolution2D(36, 5, 5, border_mode='same', activation='elu'))
    model.add(MaxPooling2D(border_mode='same'))
    model.add(SpatialDropout2D(0.2))

    model.add(Convolution2D(48, 5, 5, border_mode='same', activation='elu'))
    model.add(MaxPooling2D(border_mode='same'))
    model.add(SpatialDropout2D(0.2))

    model.add(Convolution2D(64, 3, 3, border_mode='same', activation='elu'))
    model.add(MaxPooling2D(border_mode='same'))
    model.add(SpatialDropout2D(0.2))

    model.add(Convolution2D(64, 3, 3, border_mode='same', activation='elu'))
    model.add(MaxPooling2D(border_mode='same'))
    model.add(SpatialDropout2D(0.2))

    model.add(Flatten())

    # Fully connected layers
    model.add(Dense(100, activation='elu', W_regularizer=l2(1e-6)))
    model.add(Dense(50, activation='elu', W_regularizer=l2(1e-6)))
    model.add(Dense(10, activation='elu', W_regularizer=l2(1e-6)))
    model.add(Dense(1))

    #summary
    model.summary()

    #training
    print('Start training')
    model.compile(optimizer='adam', loss='mse')
    datagen = MyDataGenerator()
    history = model.fit_generator(datagen.flow(X_train,
                                               y_train,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               flip_prob=flip_prob),
                                  samples_per_epoch=len(y_train),
                                  nb_epoch=nb_epoch,
                                  validation_data=datagen.flow(
                                      X_val,
                                      y_val,
                                      batch_size=batch_size,
                                      shuffle=True),
                                  nb_val_samples=len(y_val))

    #save model
    print('Save model')
    with open('model.json', 'w') as f:
        json.dump(model.to_json(), f)
    model.save_weights('model.h5')
コード例 #17
0
ファイル: nvidia.py プロジェクト: sohailkhanmarwat/DLToolkit
    def build_model(img_width, img_height, img_channels, num_classes):
        # Set the input shape
        if K.image_data_format() == "channels_first":
            input_shape = (img_channels, img_height, img_width)
        else:
            input_shape = (img_height, img_width, img_channels)

        # Create the model
        model = Sequential()

        # Create the model pipeline, including image preprocessing (avoids having to change drive_train.py)
        model = Sequential([

            # Resize and normalize the image
            Lambda(NVIDIA_NN.resize),
            Lambda(NVIDIA_NN.normalize),

            # Conv1
            Conv2D(24,
                   5,
                   5,
                   border_mode='valid',
                   activation='elu',
                   subsample=(2, 2),
                   init="he_normal"),
            SpatialDropout2D(0.2),

            # Conv2
            Conv2D(36,
                   5,
                   5,
                   border_mode='valid',
                   activation='elu',
                   subsample=(2, 2),
                   init="he_normal"),
            SpatialDropout2D(0.2),

            # Conv3
            Conv2D(48,
                   5,
                   5,
                   border_mode='valid',
                   activation='elu',
                   subsample=(2, 2),
                   init="he_normal"),
            SpatialDropout2D(0.2),

            # Conv4
            Conv2D(64,
                   3,
                   3,
                   border_mode='valid',
                   activation='elu',
                   init="he_normal"),
            SpatialDropout2D(0.2),

            # Conv5
            Conv2D(64,
                   3,
                   3,
                   border_mode='valid',
                   activation='elu',
                   init="he_normal"),
            SpatialDropout2D(0.2),

            # FC1
            Flatten(),
            Dense(100, activation='elu', init="he_normal"),
            Dropout(0.5),

            # FC2
            Dense(50, activation='elu', init="he_normal"),

            # FC3
            Dense(10, activation='elu', init="he_normal"),
            Dropout(0.5),

            # Final layer
            Dense(1)
        ])

        return model
コード例 #18
0
def unet_model(trainable=True, embedding=False):
    print "creating model trainable=", trainable, " embedding", embedding
    inputs = Input((512, 512, 1))
    conv1 = Convolution2D(64,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          trainable=trainable)(inputs)
    #conv1 = Dropout(0.2)(conv1)
    conv1 = Convolution2D(64,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          trainable=trainable)(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Convolution2D(128,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          trainable=trainable)(pool1)
    #conv2 = Dropout(0.2)(conv2)
    conv2 = Convolution2D(128,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          trainable=trainable)(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Convolution2D(256,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          trainable=trainable)(pool2)
    #conv3 = Dropout(0.2)(conv3)
    conv3 = Convolution2D(256,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          trainable=trainable)(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Convolution2D(512,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          trainable=trainable)(pool3)
    #conv4 = Dropout(0.2)(conv4)
    conv4 = Convolution2D(512,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          trainable=trainable)(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Convolution2D(1024,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          trainable=trainable)(pool4)
    #conv5 = Dropout(0.2)(conv5)

    if embedding:
        conv5 = Convolution2D(64,
                              1,
                              1,
                              name="embedding_64d",
                              activation='relu',
                              border_mode='same')(conv5)
        conv5 = Convolution2D(1024,
                              1,
                              1,
                              name="embedding_1024u",
                              activation='relu',
                              border_mode='same')(conv5)

    conv5 = Convolution2D(1024,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          trainable=trainable)(conv5)

    bridge4 = SpatialDropout2D(0.2)(conv4)
    up6 = merge([UpSampling2D(size=(2, 2))(conv5), bridge4], mode='concat')
    conv6 = Convolution2D(512,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          trainable=trainable)(up6)
    conv6 = Dropout(0.2)(conv6)
    conv6 = Convolution2D(512,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          trainable=trainable)(conv6)

    bridge3 = SpatialDropout2D(0.2)(conv3)
    up7 = merge([UpSampling2D(size=(2, 2))(conv6), bridge3], mode='concat')
    conv7 = Convolution2D(256,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          trainable=trainable)(up7)
    conv7 = Dropout(0.2)(conv7)
    conv7 = Convolution2D(256,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          trainable=trainable)(conv7)

    bridge2 = SpatialDropout2D(0.2)(conv2)
    up8 = merge([UpSampling2D(size=(2, 2))(conv7), bridge2], mode='concat')
    conv8 = Convolution2D(128,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          trainable=trainable)(up8)
    conv8 = Dropout(0.2)(conv8)
    conv8 = Convolution2D(128,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          trainable=trainable)(conv8)

    bridge1 = SpatialDropout2D(0.2)(conv1)
    up9 = merge([UpSampling2D(size=(2, 2))(conv8), bridge1], mode='concat')
    conv9 = Convolution2D(64,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          trainable=trainable)(up9)
    conv9 = Dropout(0.2)(conv9)
    conv9 = Convolution2D(64,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          trainable=trainable)(conv9)

    conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9)

    predictions = Reshape(target_shape=(262144, 1))(conv10)

    model = Model(input=inputs, output=predictions)
    model.summary()
    #optimizer = SGD(lr=0.00001, momentum=0.985, decay=0.0, nesterov=True)
    optimizer = Adam(lr=1e-5)
    model.compile(optimizer=optimizer,
                  loss='binary_crossentropy',
                  metrics=[
                      dice_coef, 'binary_accuracy', 'precision', 'recall',
                      'mean_squared_error'
                  ],
                  sample_weight_mode='temporal')

    return model
コード例 #19
0
    assert(len(input_shape)==3)
    
    try:
        from keras.layers.core import SpatialDropout2D
    except:
        from keras import __version__ as __kv__
        from warnings import warn
        warn('no SpatialDropout2D layer in keras version: %s'%__kv__)
        SpatialDropout2D = Dropout
    
    # need to set the input_shape to first layer for a new model
    model = Sequential()
    model.add(Convolution2D(16,(3,3),padding='same',input_shape=input_shape))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(SpatialDropout2D(0.1))
    
    model.add(Convolution2D(24,(2,2)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(SpatialDropout2D(0.1))

    model.add(Convolution2D(30,(2,2)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2,2)))    
    model.add(SpatialDropout2D(0.2))

    model.add(Convolution2D(36,(2,2)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2,2)))    
    model.add(SpatialDropout2D(0.2))
コード例 #20
0
def bottleneck(inp,
               output,
               internal_scale=4,
               asymmetric=0,
               dilated=0,
               downsample=False,
               dropout_rate=0.1):
    internal = output // internal_scale
    encoder = inp
    # 1x1
    # the 1st 1x1 projection is replaced with a 2x2 convolution when downsampling
    input_stride = 2 if downsample else 1
    encoder = Conv2D(
        internal,
        (input_stride, input_stride),
        # padding='same',
        strides=(input_stride, input_stride),
        use_bias=False,
        kernel_initializer='he_normal')(encoder)
    # Batch normalization + PReLU
    # enet uses momentum of 0.1, keras default is 0.99
    encoder = BatchNormalization(momentum=0.1)(encoder)
    encoder = PReLU(shared_axes=[1, 2])(encoder)

    # conv
    if not asymmetric and not dilated:
        encoder = Conv2D(internal, (3, 3),
                         padding='same',
                         kernel_initializer='he_normal')(encoder)
    elif asymmetric:
        encoder = Conv2D(internal, (1, asymmetric),
                         padding='same',
                         use_bias=False,
                         kernel_initializer='he_normal')(encoder)
        encoder = Conv2D(internal, (asymmetric, 1),
                         padding='same',
                         kernel_initializer='he_normal')(encoder)
    elif dilated:
        encoder = Conv2D(internal, (3, 3),
                         dilation_rate=(dilated, dilated),
                         padding='same',
                         kernel_initializer='he_normal')(encoder)
    else:
        raise (Exception('You shouldn\'t be here'))

    # enet uses momentum of 0.1, keras default is 0.99
    encoder = BatchNormalization(momentum=0.1)(encoder)
    encoder = PReLU(shared_axes=[1, 2])(encoder)

    # 1x1
    encoder = Conv2D(output, (1, 1),
                     use_bias=False,
                     kernel_initializer='he_normal')(encoder)

    # enet uses momentum of 0.1, keras default is 0.99
    encoder = BatchNormalization(momentum=0.1)(encoder)
    encoder = SpatialDropout2D(dropout_rate)(encoder)

    other = inp
    # other branch
    if downsample:
        other = MaxPooling2D()(other)

        other = Permute((1, 3, 2))(other)
        pad_feature_maps = output - inp.get_shape().as_list()[3]
        tb_pad = (0, 0)
        lr_pad = (0, pad_feature_maps)
        other = ZeroPadding2D(padding=(tb_pad, lr_pad))(other)
        other = Permute((1, 3, 2))(other)

    encoder = add([encoder, other])
    encoder = PReLU(shared_axes=[1, 2])(encoder)

    return encoder
コード例 #21
0
def UNET(channels,
         classes,
         initial_features=32,
         num_layers=5,
         loss="binary_crossentropy",
         optimizer=Adam(),
         metrics=[jaccard_coef, dice_coef],
         batch_normalization=False,
         dropout_type=0,
         dropout_p=1.0):
    """
    This UNET implements the standard model
    :param classes: Number of classes used
    :param initial_features: Number of features used initially for the convolutions
    :param num_layers: Number of down and up sampling blocks
    :param loss: The type of loss used
    :param optimizer: The optimizer used for the gradient descent
    :param metrics: The metrics used to assess the performance
    :param channels: Number of channels of the input image
    :return: Returns a UNET model
    """
    # Initialize a list which will store some of the layers in the model
    layers = []
    layers.append(Input((None, None, channels)))
    # DOWNDSAMPLING
    for i in range(num_layers):

        # LAYER BLOCK
        if i != (num_layers - 1):
            # Define two convolutions and a max pooling
            layers.append(
                Conv2D(initial_features * (2**i), (3, 3),
                       activation='relu',
                       padding='same')(layers[-1]))
            # Add batch normalization after convolution
            if batch_normalization:
                layers[-1] = BatchNormalization(axis=-1)(layers[-1])
            layers[-1] = Conv2D(initial_features * (2**i), (3, 3),
                                activation='relu',
                                padding='same')(layers[-1])
            # BATCH NORMALIZATION
            if batch_normalization:
                layers[-1] = BatchNormalization(axis=-1)(layers[-1])
            layers.append(MaxPooling2D(pool_size=(2, 2))(layers[-1]))

        # FINAL LAYER
        else:
            # Define just two convolutions for the final layer
            layers.append(
                Conv2D(initial_features * (2**i), (3, 3),
                       activation='relu',
                       padding='same')(layers[-1]))
            # BATCH NORMALIZATION
            if batch_normalization:
                layers[-1] = BatchNormalization(axis=-1)(layers[-1])
            layers[-1] = Conv2D(initial_features * (2**i), (3, 3),
                                activation='relu',
                                padding='same')(layers[-1])
            # BATCH NORMALIZATION
            if batch_normalization:
                layers[-1] = BatchNormalization(axis=-1)(layers[-1])
            # DROPOUT TYPE 1
            if dropout_type == 1:
                layers[-1] = SpatialDropout2D(dropout_p)(layers[-1])

    # UPSAMPLING
    for j in range(num_layers - 1):
        # Concatenate with the appropriate output of a previous layer and the upsample
        layers.append(
            concatenate([
                UpSampling2D(size=(2, 2))(layers[-1]), layers[2 * num_layers -
                                                              (2 * j) - 3]
            ],
                        axis=-1))
        # DROPOUT TYPE 2
        if dropout_type == 2:
            layers[-1] = SpatialDropout2D(dropout_p)(layers[-1])
        # Add two extra convolutions
        layers.append(
            Conv2D(initial_features * 2**(num_layers - 2 - j), (3, 3),
                   activation='relu',
                   padding='same')(layers[-1]))
        # BATCH NORMALIZATION
        if batch_normalization:
            layers[-1] = BatchNormalization(axis=-1)(layers[-1])
        layers[-1] = Conv2D(initial_features * 2**(num_layers - 2 - j), (3, 3),
                            activation='relu',
                            padding='same')(layers[-1])
        # BATCH NORMALIZATION
        if batch_normalization:
            layers[-1] = BatchNormalization(axis=-1)(layers[-1])

    # Add the final sigmoid output
    layers.append(
        Conv2D(classes - 1, (1, 1), activation='sigmoid')(layers[-1]))

    # Compile the model
    print(layers)
    model = Model(inputs=layers[0], outputs=layers[-1])
    model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
    return model
コード例 #22
0
def nvidia_model():

    # Create the Sequential model
    model = Sequential()

    # No. of units to reduce at the top / bottom of the image - (width, height)
    model.add(Cropping2D(cropping=((13, 0), (9, 0)),
                         input_shape=(3, 160, 320)))
    model.add(Lambda(resize))

    #model.add(Lambda(normalize), input_shape=(3, img_width, img_height))
    model.add(Lambda(normalize))

    model.add(
        Convolution2D(24,
                      5,
                      5,
                      activation='elu',
                      border_mode='valid',
                      subsample=(2, 2)))
    model.add(SpatialDropout2D(0.2))

    model.add(
        Convolution2D(36,
                      5,
                      5,
                      activation='elu',
                      border_mode='valid',
                      subsample=(2, 2)))
    model.add(SpatialDropout2D(0.2))

    model.add(
        Convolution2D(48,
                      5,
                      5,
                      activation='elu',
                      border_mode='valid',
                      subsample=(2, 2)))
    model.add(SpatialDropout2D(0.2))

    model.add(
        Convolution2D(64,
                      3,
                      3,
                      activation='elu',
                      border_mode='valid',
                      subsample=(1, 1)))
    model.add(SpatialDropout2D(0.2))

    model.add(
        Convolution2D(64,
                      3,
                      3,
                      activation='elu',
                      border_mode='valid',
                      subsample=(1, 1)))

    model.add(Flatten())
    model.add(Dropout(0.2))
    model.add(Activation('elu'))

    model.add(Dense(100, activation='elu'))
    model.add(Dropout(0.5))

    model.add(Dense(50, activation='elu'))
    model.add(Dropout(0.5))

    model.add(Dense(10, activation='elu'))
    model.add(Dropout(0.5))

    model.add(Dense(1, name='y_pred'))

    model.compile('adam', 'mean_squared_error')

    return model
コード例 #23
0
def model_init(input_shape,**kwargs):
    from keras import backend as _backend
    from keras.models import Sequential
    from keras.layers.core import Dense, Activation, Flatten, Dropout
    from keras.layers import SeparableConv2D as Convolution2D
    from keras.layers import MaxPooling2D, GlobalAveragePooling2D
    from keras.layers.normalization import BatchNormalization
    try:
        from keras.layers.core import SpatialDropout2D
    except:
        from keras import __version__ as __kv__
        from warnings import warn
        warn('no SpatialDropout2D layer in keras version: %s'%__kv__)
        SpatialDropout2D = Dropout

    assert(len(input_shape)==3)
      
    model = Sequential()
    model.add(Convolution2D(64,(3,3),padding='same',input_shape=input_shape))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(SpatialDropout2D(0.1))
    
    model.add(Convolution2D(128,(2,2)))
    model.add(BatchNormalization())        
    model.add(Activation('relu'))

    model.add(Convolution2D(128,(2,2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(SpatialDropout2D(0.2))

    model.add(Convolution2D(256,(2,2)))
    model.add(BatchNormalization())        
    model.add(Activation('relu'))

    model.add(Convolution2D(256,(2,2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(SpatialDropout2D(0.3))    

    model.add(Convolution2D(384,(2,2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    model.add(Convolution2D(384,(2,2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(SpatialDropout2D(0.4))

    model.add(Convolution2D(512,(2,2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    model.add(Convolution2D(512,(2,2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(SpatialDropout2D(0.5))

    model.add(GlobalAveragePooling2D())
    model.add(Dense(2048))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    return dict(model=model,lr_mult=1.0)
コード例 #24
0
######################
# Layer-2 convolution
######################
model.add(Convolution2D(12, 5, 5, border_mode='valid', activation=None))
model.add(LeakyReLU(alpha=0.15))

model.add(MaxPooling2D(pool_size=(2, 2), strides=None, border_mode='valid'))

######################
# Layer-3 convolution
######################
model.add(Convolution2D(16, 3, 3, border_mode='valid', activation=None))
model.add(LeakyReLU(alpha=0.15))

# Add spatial dropout layer instead of max pooling to prevent overfitting
model.add(SpatialDropout2D(p=0.2))
#model.add(MaxPooling2D(pool_size=(2, 2), strides=None, border_mode='valid'))

#################################################
# Use either globalpooling layer or flatten layer
#model.add(GlobalAveragePooling2D())
model.add(Flatten())

#########################
# Fully connected layers
#########################

model.add(Dense(1500))

model.add(Dense(300))
コード例 #25
0
def LinkNet(input_shape=(256, 256, 3),
            classes=1,
            dropout=0.5,
            feature_scale=4,
            pretrained_weights=None,
            skipConnectionConv1=False):
    """
    Architecture is similar to linknet, but there are a lot of enhancements
     - initial block is changed to strides 1, 2*32 convs to better handle borders.
     - additional encoder block with filter size=512 and corresponding decoder block.
     - ability to use subpixel deconvolution ( not optimized may lead to artifacts)
     - ability to have skip connection from conv1
     - ability to use pretrained RGB weights for network with more channels, additional channels are initialised with zeros the same way as in Deep Image Matting paper

    Overall this approach helps to better deal with very small objects and boundaries between them.

    The network has around 20m parameters in default configuration, for a small dataset like Urban3D it is better to use dropout rate = 0.5
    As SpatialDropout2D (aka feature map dropout) is used instead of ordinary Dropout it improves semantic segmetation performance and helps to reduce overfitting

    See:
        LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation (https://arxiv.org/abs/1707.03718)
        Understanding Convolution for Semantic Segmentation https://arxiv.org/abs/1702.08502
        Deep Image Matting https://arxiv.org/abs/1703.03872
    """
    layers = [2, 2, 2, 2, 2]
    filters = [64, 128, 256, 512, 512]

    inputs = Input(shape=input_shape)
    if pretrained_weights:
        print("Using pretrained weights {}".format(pretrained_weights))
    if pretrained_weights and input_shape[-1] > 3:
        x = conv_bn_relu(inputs, 32, 3, stride=1, name="block1_conv1_changed")
    else:
        x = conv_bn_relu(inputs, 32, 3, stride=1, name="block1_conv1")
    x = conv_bn_relu(x, 32, 3, stride=1, name="block1_conv2")
    conv1 = x
    x = MaxPooling2D((2, 2),
                     strides=(2, 2),
                     padding="same",
                     name="block1_pool")(x)

    enc1 = encoder(x,
                   m=32,
                   n=filters[0],
                   blocks=layers[0],
                   stride=1,
                   name='encoder1')
    enc2 = encoder(enc1,
                   m=filters[0],
                   n=filters[1],
                   blocks=layers[1],
                   stride=2,
                   name='encoder2')
    enc3 = encoder(enc2,
                   m=filters[1],
                   n=filters[2],
                   blocks=layers[2],
                   stride=2,
                   name='encoder3')
    enc4 = encoder(enc3,
                   m=filters[2],
                   n=filters[3],
                   blocks=layers[3],
                   stride=2,
                   name='encoder4')
    enc5 = encoder(enc4,
                   m=filters[3],
                   n=filters[4],
                   blocks=layers[4],
                   stride=2,
                   name='encoder5')

    x = linknet_decoder(conv1, enc1, enc2, enc3, enc4, enc5, filters,
                        feature_scale)
    x = SpatialDropout2D(dropout)(x)
    x = Conv2D(filters=classes,
               kernel_size=(1, 1),
               padding="same",
               name="prediction")(x)
    x = Activation("sigmoid", name="mask")(x)

    model = Model(inputs=inputs, outputs=x)
    if pretrained_weights:
        print("Loading pretrained weights {}".format(pretrained_weights))
        model.load_weights(pretrained_weights, by_name=True)

    if pretrained_weights and input_shape[-1] > 3:
        conv1_weights = np.zeros((3, 3, input_shape[-1], 32), dtype="float32")
        three_channels_net = LinkNet(input_shape=(224, 224, 3))
        three_channels_net.load_weights(pretrained_weights, by_name=True)
        conv1_weights[:, :, :3, :] = three_channels_net.get_layer(
            "block1_conv1_conv").get_weights()[0][:, :, :, :]
        bias = three_channels_net.get_layer(
            "block1_conv1_conv").get_weights()[1]
        model.get_layer('block1_conv1_changed_conv').set_weights(
            (conv1_weights, bias))
        model.get_layer('block1_conv1_changed_conv').name = 'block1_conv1_conv'

    return model
コード例 #26
0
def create_convnet_nvidia():
    '''
    Create a convnet using the network architecture documented in NVIDIA's paper
    '''

    # Create the model pipeline, including image preprocessing (avoids having to change drive.py)
    model = Sequential([

        # Crop the area above the horizon, resize and normalize the image
        Cropping2D(cropping=((22, 0), (0, 0)), input_shape=(160, 320, 3)),
        Lambda(resize),
        Lambda(normalize),

        # Conv1
        Convolution2D(24,
                      5,
                      5,
                      border_mode='valid',
                      activation='elu',
                      subsample=(2, 2),
                      init="he_normal"),
        SpatialDropout2D(0.2),

        # Conv2
        Convolution2D(36,
                      5,
                      5,
                      border_mode='valid',
                      activation='elu',
                      subsample=(2, 2),
                      init="he_normal"),
        SpatialDropout2D(0.2),

        # Conv3
        Convolution2D(48,
                      5,
                      5,
                      border_mode='valid',
                      activation='elu',
                      subsample=(2, 2),
                      init="he_normal"),
        SpatialDropout2D(0.2),

        # Conv4
        Convolution2D(64,
                      3,
                      3,
                      border_mode='valid',
                      activation='elu',
                      init="he_normal"),
        SpatialDropout2D(0.2),

        # Conv5
        Convolution2D(64,
                      3,
                      3,
                      border_mode='valid',
                      activation='elu',
                      init="he_normal"),
        SpatialDropout2D(0.2),

        # FC1
        Flatten(),
        Dense(100, activation='elu', init="he_normal"),
        Dropout(0.5),

        # FC2
        Dense(50, activation='elu', init="he_normal"),

        # FC3
        Dense(10, activation='elu', init="he_normal"),
        Dropout(0.5),

        # Final layer
        Dense(1)
    ])

    model.summary()
    model.compile(optimizer=Adam(lr=INITIAL_LR), loss="mse")

    return model
コード例 #27
0
def WideResidualNetwork(depth=28,
                        width=8,
                        dropout_rate=0.0,
                        include_top=True,
                        weights='cifar10',
                        input_tensor=None,
                        input_shape=None,
                        classes=10,
                        activation='softmax',
                        weight_decay=0.0):
    """Instantiate the Wide Residual Network architecture,
        optionally loading weights pre-trained
        on CIFAR-10. Note that when using TensorFlow,
        for best performance you should set
        `image_dim_ordering="tf"` in your Keras config
        at ~/.keras/keras.json.

        The model and the weights are compatible with both
        TensorFlow and Theano. The dimension ordering
        convention used by the model is the one
        specified in your Keras config file.

        # Arguments
            depth: number or layers in the DenseNet
            width: multiplier to the ResNet width (number of filters)
            dropout_rate: dropout rate
            include_top: whether to include the fully-connected
                layer at the top of the network.
            weights: one of `None` (random initialization) or
                "cifar10" (pre-training on CIFAR-10)..
            input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
                to use as image input for the model.
            input_shape: optional shape tuple, only to be specified
                if `include_top` is False (otherwise the input shape
                has to be `(32, 32, 3)` (with `tf` dim ordering)
                or `(3, 32, 32)` (with `th` dim ordering).
                It should have exactly 3 inputs channels,
                and width and height should be no smaller than 8.
                E.g. `(200, 200, 3)` would be one valid value.
            classes: optional number of classes to classify images
                into, only to be specified if `include_top` is True, and
                if no `weights` argument is specified.

        # Returns
            A Keras model instance.
        """

    if weights not in {'cifar10', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `cifar10` '
                         '(pre-training on CIFAR-10).')

    if weights == 'cifar10' and include_top and classes != 10:
        raise ValueError('If using `weights` as CIFAR 10 with `include_top`'
                         ' as true, `classes` should be 10')

    if (depth - 4) % 6 != 0:
        raise ValueError('Depth of the network must be such that (depth - 4)'
                         'should be divisible by 6.')

    # Determine proper input shape
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=32,
                                      min_size=8,
                                      data_format=K.image_dim_ordering(),
                                      require_flatten=include_top)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    img_input2 = SpatialDropout2D(rate=0.05)(img_input)
    x = __create_wide_residual_network(classes, img_input2, include_top, depth,
                                       width, dropout_rate, activation,
                                       weight_decay)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = Model(inputs, x, name='wide-resnet')

    # load weights
    if weights == 'cifar10':
        if (depth == 28) and (width == 8) and (dropout_rate == 0.0):
            # Default parameters match. Weights for this model exist:

            if K.image_dim_ordering() == 'th':
                if include_top:
                    h5_file = 'wide_resnet_28_8_th_dim_ordering_th_kernels.h5'
                    weights_path = get_file(h5_file,
                                            TH_WEIGHTS_PATH,
                                            cache_subdir='models')
                else:
                    h5_file = 'wide_resnet_28_8_th_dim_ordering_th_kernels_no_top.h5'
                    weights_path = get_file(h5_file,
                                            TH_WEIGHTS_PATH_NO_TOP,
                                            cache_subdir='models')

                model.load_weights(weights_path)

                if K.backend() == 'tensorflow':
                    warnings.warn(
                        'You are using the TensorFlow backend, yet you '
                        'are using the Theano '
                        'image dimension ordering convention '
                        '(`image_dim_ordering="th"`). '
                        'For best performance, set '
                        '`image_dim_ordering="tf"` in '
                        'your Keras config '
                        'at ~/.keras/keras.json.')
                    convert_all_kernels_in_model(model)
            else:
                if include_top:
                    h5_file = 'wide_resnet_28_8_tf_dim_ordering_tf_kernels.h5'
                    weights_path = get_file(h5_file,
                                            TF_WEIGHTS_PATH,
                                            cache_subdir='models')
                else:
                    h5_file = 'wide_resnet_28_8_tf_dim_ordering_tf_kernels_no_top.h5'
                    weights_path = get_file(h5_file,
                                            TF_WEIGHTS_PATH_NO_TOP,
                                            cache_subdir='models')

                model.load_weights(weights_path)

                if K.backend() == 'theano':
                    convert_all_kernels_in_model(model)

    return model
コード例 #28
0
def model_init(input_shape, **kwargs):
    from keras.models import Sequential
    from keras.layers.core import Dense, Activation, Flatten, Dropout
    from keras.layers.convolutional import Convolution2D, MaxPooling2D
    from keras.layers.normalization import BatchNormalization
    from keras.backend import set_image_data_format

    assert (len(input_shape) == 3 and input_shape[2] == 3)
    set_image_data_format('channels_last')

    try:
        from keras.layers.core import SpatialDropout2D
    except:
        from keras import __version__ as __kv__
        from warnings import warn
        warn('no SpatialDropout2D layer in keras version: %s' % __kv__)
        SpatialDropout2D = Dropout

    # need to set the input_shape to first layer for a new model
    model = Sequential()
    model.add(
        Convolution2D(32, (3, 3), padding='same', input_shape=input_shape))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.1))

    # 2
    model.add(Convolution2D(48, (2, 2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.1))

    # 3
    model.add(Convolution2D(64, (2, 2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.2))

    # 4
    model.add(Convolution2D(128, (2, 2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.2))

    # 5
    model.add(Convolution2D(164, (2, 2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.3))

    # 6
    model.add(Convolution2D(172, (2, 2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.3))

    # 7
    model.add(Convolution2D(196, (2, 2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.4))

    # 8
    model.add(Convolution2D(224, (2, 2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.4))

    # 9
    model.add(Convolution2D(248, (2, 2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.5))

    # 10
    model.add(Convolution2D(296, (2, 2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.5))

    model.add(Flatten())
    model.add(Dense(2048))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    return dict(model=model, lr_mult=1.0)