def create_unet_vgg16(C=3):
    inputs = Input(shape=(None, None, C))
    s = Lambda(lambda x: x / 255.0)(inputs)

    # Block 1 down
    c1 = Conv2D(64, (3, 3),
                activation='relu',
                padding='same',
                name='block1_conv1')(s)
    c1 = Conv2D(64, (3, 3),
                activation='relu',
                padding='same',
                name='block1_conv2')(c1)
    p1 = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(c1)

    # Block 2 down
    c2 = Conv2D(128, (3, 3),
                activation='relu',
                padding='same',
                name='block2_conv1')(p1)
    c2 = Conv2D(128, (3, 3),
                activation='relu',
                padding='same',
                name='block2_conv2')(c2)
    p2 = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(c2)

    # Block 3 down
    c3 = Conv2D(256, (3, 3),
                activation='relu',
                padding='same',
                name='block3_conv1')(p2)
    c3 = Conv2D(256, (3, 3),
                activation='relu',
                padding='same',
                name='block3_conv2')(c3)
    c3 = Conv2D(256, (3, 3),
                activation='relu',
                padding='same',
                name='block3_conv3')(c3)
    p3 = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(c3)

    # Block 4 down
    c4 = Conv2D(512, (3, 3),
                activation='relu',
                padding='same',
                name='block4_conv1')(p3)
    c4 = Conv2D(512, (3, 3),
                activation='relu',
                padding='same',
                name='block4_conv2')(c4)
    c4 = Conv2D(512, (3, 3),
                activation='relu',
                padding='same',
                name='block4_conv3')(c4)
    p4 = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(c4)

    # Block 5 down
    c5 = Conv2D(512, (3, 3),
                activation='relu',
                padding='same',
                name='block5_conv1')(p4)
    c5 = Conv2D(512, (3, 3),
                activation='relu',
                padding='same',
                name='block5_conv2')(c5)
    c5 = Conv2D(512, (3, 3),
                activation='relu',
                padding='same',
                name='block5_conv3')(c5)
    p5 = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(c5)

    model_vgg = Model(inputs=[inputs], outputs=[p5])
    weights_path = get_file(
        'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
        WEIGHTS_PATH_NO_TOP,
        cache_subdir='models')
    model_vgg.load_weights(weights_path)

    # Bottom of the U block
    c6 = Conv2D(512, (3, 3), activation='relu',
                padding='same')(model_vgg.layers[-1].output)
    c6 = Conv2D(512, (3, 3), activation='relu', padding='same')(c6)

    # Block 5 up
    u6 = Conv2DTranspose(512, (3, 3), strides=(2, 2), padding='same')(c6)
    u6 = concatenate([u6, model_vgg.layers[-2].output])
    c7 = Conv2D(512, (3, 3), activation='relu', padding='same')(u6)
    c7 = Conv2D(512, (3, 3), activation='relu', padding='same')(c7)
    c7 = Conv2D(512, (3, 3), activation='relu', padding='same')(c7)

    # Block 4 up
    u7 = Conv2DTranspose(512, (3, 3), strides=(2, 2), padding='same')(c7)
    u7 = concatenate([u7, model_vgg.layers[-6].output])
    c8 = Conv2D(512, (3, 3), activation='relu', padding='same')(u7)
    c8 = Conv2D(512, (3, 3), activation='relu', padding='same')(c8)
    c8 = Conv2D(512, (3, 3), activation='relu', padding='same')(c8)

    # Block 3 up
    u8 = Conv2DTranspose(256, (3, 3), strides=(2, 2), padding='same')(c8)
    u8 = concatenate([u8, model_vgg.layers[-10].output])
    c9 = Conv2D(256, (3, 3), activation='relu', padding='same')(u8)
    c9 = Conv2D(256, (3, 3), activation='relu', padding='same')(c9)
    c9 = Conv2D(256, (3, 3), activation='relu', padding='same')(c9)

    # Block 2 up
    u9 = Conv2DTranspose(128, (3, 3), strides=(2, 2), padding='same')(c9)
    u9 = concatenate([u9, model_vgg.layers[-14].output])
    c10 = Conv2D(128, (3, 3), activation='relu', padding='same')(u9)
    c10 = Conv2D(128, (3, 3), activation='relu', padding='same')(c10)

    # Block 1 up
    u10 = Conv2DTranspose(64, (3, 3), strides=(2, 2), padding='same')(c10)
    u10 = concatenate([u10, model_vgg.layers[-17].output])
    c11 = Conv2D(64, (3, 3), activation='relu', padding='same')(u10)
    c11 = Conv2D(64, (3, 3), activation='relu', padding='same')(c11)

    outputs = Conv2D(1, (1, 1), activation='sigmoid')(c11)

    model = Model(inputs=[inputs], outputs=[outputs])
    return model
def conv_4_fc_3_more_filters(dropout = []):

    """This network has four convolution layers and three fully connected layers and a lot more
    filters than the conv_4_fc_3 model.

    Parameters:
        dropout - list of dropout values for the 3 fully connected layers
        
    Returns:
        A model"""

    # ensure the dropout list has enough values in it for the model
    # augment the values of some are missing
    if dropout == None or len(dropout) == 0:
        dropout = [0.0, 0.0]
    elif len(dropout) == 1:
        dropout = dropout * 2

    # this hack gets the current function name and sets it to the name of the model
    model = Sequential(name=traceback.extract_stack(None, 2)[-1][2])

    # crop top 56 rows and bottom 24 rows from the images
    model.add(Cropping2D(cropping=((56, 24), (0, 0)), input_shape=(160, 320, 3), name='pp_crop'))

    # mean center the pixels
    model.add(Lambda(lambda x: (x / 255.0) - 0.5, name='pp_center'))

    # layer 1: convolution + max pooling. Input 80x320x3. Output 40x160x32
    model.add(Convolution2D(32, 5, 5, border_mode='same', name='conv1'))
    model.add(MaxPooling2D((2, 2), name='pool1'))
    model.add(Activation('relu', name='act1'))

    # layer 2: convolution = max pooling. Input 40x160x32. Output 20x80x64
    model.add(Convolution2D(64, 5, 5, border_mode='same', name='conv2'))
    model.add(MaxPooling2D((2, 2), name='pool2'))
    model.add(Activation('relu', name='act2'))

    # layer 3: convolution = max pooling. Input 20x80x64. Output 10x40x128
    model.add(Convolution2D(128, 3, 3, border_mode='same', name='conv3'))
    model.add(MaxPooling2D((2, 2), name='pool3'))
    model.add(Activation('relu', name='act3'))

    # layer 4: convolution = max pooling. Input 10x40x128. Output 5x20x128
    model.add(Convolution2D(128, 3, 3, border_mode='same', name='conv4'))
    model.add(MaxPooling2D((2, 2), name='pool4'))
    model.add(Activation('relu', name='act4'))

    # flatten: Input 5x20x128. Output 12800
    model.add(Flatten(name='flat'))

    # layer 5: fully connected + dropout. Input 12800. Output 556
    model.add(Dense(556, name='fc5'))
    model.add(Dropout(dropout[0], name='drop5'))
    model.add(Activation('relu', name='act5'))

    # layer 6: fully connected + dropout. Input 556. Output 24
    model.add(Dense(24, name='fc6'))
    model.add(Dropout(dropout[1], name='drop6'))
    model.add(Activation('relu', name='act6'))

    # layer 7: fully connected. Input 24. Output 1.
    model.add(Dense(1, name='out'))

    return model
Beispiel #3
0

# Build U-Net model
inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
s = Lambda(lambda x: x / 255)(inputs)

c1 = Conv2D(16, (3, 3),
            activation='elu',
            kernel_initializer='he_normal',
            padding='same')(s)
c1 = Dropout(0.1)(c1)
c1 = Conv2D(16, (3, 3),
            activation='elu',
            kernel_initializer='he_normal',
            padding='same')(c1)
p1 = MaxPooling2D((2, 2))(c1)

c2 = Conv2D(32, (3, 3),
            activation='elu',
            kernel_initializer='he_normal',
            padding='same')(p1)
c2 = Dropout(0.1)(c2)
c2 = Conv2D(32, (3, 3),
            activation='elu',
            kernel_initializer='he_normal',
            padding='same')(c2)
p2 = MaxPooling2D((2, 2))(c2)

c3 = Conv2D(64, (3, 3),
            activation='elu',
            kernel_initializer='he_normal',
### 1 CNN part
model.add(Lambda(lambda x: x / 127.5 - 1, input_shape=imshape))


def converter(im):
    import tensorflow as tf
    return tf.image.rgb_to_grayscale(im)


model.add(Lambda(converter, output_shape=imshape2))
model.add(Cropping2D(cropping=((70, 25), (0, 0))))
model.add(
    Conv2D(filters=16, kernel_size=(5, 5), activation='relu', padding='same'))
#model.add(Conv2D(filters=16,kernel_size=(3,3),activation='relu',padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), padding='valid'))
model.add(Dropout(0.5))
model.add(
    Conv2D(filters=32, kernel_size=(5, 5), activation='relu', padding='same'))
#model.add(Conv2D(filters=32,kernel_size=(3,3),activation='relu',padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), padding='valid'))
model.add(Dropout(0.5))
model.add(
    Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), padding='valid'))
model.add(Dropout(0.5))
model.add(
    Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
def densenet161_model(img_rows,
                      img_cols,
                      color_type=1,
                      nb_dense_block=4,
                      growth_rate=48,
                      nb_filter=96,
                      reduction=0.5,
                      dropout_rate=0.0,
                      weight_decay=1e-4,
                      num_classes=None):
    '''
    DenseNet 161 Model for Keras
    Model Schema is based on 
    https://github.com/flyyufelix/DenseNet-Keras
    ImageNet Pretrained Weights 
    Theano: https://drive.google.com/open?id=0Byy2AcGyEVxfVnlCMlBGTDR3RGs
    TensorFlow: https://drive.google.com/open?id=0Byy2AcGyEVxfUDZwVjU2cFNidTA
    # Arguments
        nb_dense_block: number of dense blocks to add to end
        growth_rate: number of filters to add per dense block
        nb_filter: initial number of filters
        reduction: reduction factor of transition blocks.
        dropout_rate: dropout rate
        weight_decay: weight decay factor
        classes: optional number of classes to classify images
        weights_path: path to pre-trained weights
    # Returns
        A Keras model instance.
    '''
    eps = 1.1e-5

    # compute compression factor
    compression = 1.0 - reduction

    # Handle Dimension Ordering for different backends
    global concat_axis
    if K.image_dim_ordering() == 'tf':
        concat_axis = 3
        img_input = Input(shape=(224, 224, 3), name='data')
    else:
        concat_axis = 1
        img_input = Input(shape=(3, 224, 224), name='data')

    # From architecture for ImageNet (Table 1 in the paper)
    nb_filter = 96
    nb_layers = [6, 12, 36, 24]  # For DenseNet-161

    # Initial convolution
    x = ZeroPadding2D((3, 3), name='conv1_zeropadding')(img_input)
    x = Convolution2D(nb_filter,
                      7,
                      7,
                      subsample=(2, 2),
                      name='conv1',
                      bias=False)(x)
    x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv1_bn')(x)
    x = Scale(axis=concat_axis, name='conv1_scale')(x)
    x = Activation('relu', name='relu1')(x)
    x = ZeroPadding2D((1, 1), name='pool1_zeropadding')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2), name='pool1')(x)

    # Add dense blocks
    for block_idx in range(nb_dense_block - 1):
        stage = block_idx + 2
        x, nb_filter = dense_block(x,
                                   stage,
                                   nb_layers[block_idx],
                                   nb_filter,
                                   growth_rate,
                                   dropout_rate=dropout_rate,
                                   weight_decay=weight_decay)

        # Add transition_block
        x = transition_block(x,
                             stage,
                             nb_filter,
                             compression=compression,
                             dropout_rate=dropout_rate,
                             weight_decay=weight_decay)
        nb_filter = int(nb_filter * compression)

    final_stage = stage + 1
    x, nb_filter = dense_block(x,
                               final_stage,
                               nb_layers[-1],
                               nb_filter,
                               growth_rate,
                               dropout_rate=dropout_rate,
                               weight_decay=weight_decay)

    x = BatchNormalization(epsilon=eps,
                           axis=concat_axis,
                           name='conv' + str(final_stage) + '_blk_bn')(x)
    x = Scale(axis=concat_axis,
              name='conv' + str(final_stage) + '_blk_scale')(x)
    x = Activation('relu', name='relu' + str(final_stage) + '_blk')(x)

    x_fc = GlobalAveragePooling2D(name='pool' + str(final_stage))(x)
    x_fc = Dense(1000, name='fc6')(x_fc)
    x_fc = Activation('softmax', name='prob')(x_fc)

    model = Model(img_input, x_fc, name='densenet')
    weights_path = densenet161_weights

    #    if K.image_dim_ordering() == 'th':
    #      # Use pre-trained weights for Theano backend
    #      weights_path = 'imagenet_models/densenet161_weights_th.h5'
    #    else:
    #      # Use pre-trained weights for Tensorflow backend
    #      weights_path = 'imagenet_models/densenet161_weights_tf.h5'

    model.load_weights(weights_path, by_name=True)

    # Truncate and replace softmax layer for transfer learning
    # Cannot use model.layers.pop() since model is not of Sequential() type
    # The method below works since pre-trained weights are stored in layers but not in the model
    x_newfc = GlobalAveragePooling2D(name='pool' + str(final_stage))(x)
    x_newfc = Dense(num_classes, name='fc6')(x_newfc)
    x_newfc = Activation('sigmoid', name='prob')(x_newfc)

    model = Model(img_input, x_newfc)

    # Learning rate is changed to 0.001
    #    sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
    #    model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])

    return model
Beispiel #6
0
def fcn_8s(num_classes, init_lr, input_shape, vgg_weight_path=None):
    img_input = Input(input_shape)

    # Block 1
    x = Conv2D(64, (3, 3), padding='same', name='block1_conv1')(img_input)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(64, (3, 3), padding='same', name='block1_conv2')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = MaxPooling2D()(x)

    # Block 2
    x = Conv2D(128, (3, 3), padding='same', name='block2_conv1')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(128, (3, 3), padding='same', name='block2_conv2')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = MaxPooling2D()(x)

    # Block 3
    x = Conv2D(256, (3, 3), padding='same', name='block3_conv1')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(256, (3, 3), padding='same', name='block3_conv2')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(256, (3, 3), padding='same', name='block3_conv3')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    block_3_out = MaxPooling2D()(x)

    # Block 4
    x = Conv2D(512, (3, 3), padding='same', name='block4_conv1')(block_3_out)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(512, (3, 3), padding='same', name='block4_conv2')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(512, (3, 3), padding='same', name='block4_conv3')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    block_4_out = MaxPooling2D()(x)

    # Block 5
    x = Conv2D(512, (3, 3), padding='same', name='block5_conv1')(block_4_out)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(512, (3, 3), padding='same', name='block5_conv2')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(512, (3, 3), padding='same', name='block5_conv3')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = MaxPooling2D()(x)

    # Load pretrained weights.
    if vgg_weight_path is not None:
        vgg16 = Model(img_input, x)
        vgg16.load_weights(vgg_weight_path, by_name=True)

    # Convolutinalized fully connected layer.
    x = Conv2D(4096, (7, 7), activation='relu', padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2D(4096, (1, 1), activation='relu', padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # Classifying layers.
    x = Conv2D(num_classes, (1, 1), strides=(1, 1), activation='linear')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    block_3_out = Conv2D(num_classes, (1, 1), strides=(1, 1), activation='linear')(block_3_out)
    block_3_out = BatchNormalization()(block_3_out)
    block_3_out = Activation('relu')(block_3_out)

    block_4_out = Conv2D(num_classes, (1, 1), strides=(1, 1), activation='linear')(block_4_out)
    block_4_out = BatchNormalization()(block_4_out)
    block_4_out = Activation('relu')(block_4_out)

    x = Lambda(lambda x: tf.image.resize_images(x, (x.shape[1] * 2, x.shape[2] * 2)))(x)
    x = Add()([x, block_4_out])
    x = Lambda(lambda x: tf.image.resize_images(x, (x.shape[1] * 2, x.shape[2] * 2)))(x)
    x = Add()([x, block_3_out])
    x = Lambda(lambda x: tf.image.resize_images(x, (x.shape[1] * 8, x.shape[2] * 8)))(x)

    x = Activation('softmax')(x)
    model = Model(img_input, x)
    model.compile(optimizer=Adam(lr=init_lr, decay=5e-4),
                  loss='categorical_crossentropy',
                  metrics=[dice_coef])

    return model
Beispiel #7
0
def resnet_v2_stem(input):
    '''The stem of the pure Inception-v4 and Inception-ResNet-v2 networks. This is input part of those networks.'''

    # Input shape is 299 * 299 * 3 (Tensorflow dimension ordering)
    x = Conv2D(32, (3, 3),
               kernel_regularizer=l2(0.0002),
               activation="relu",
               strides=(2, 2))(input)  # 149 * 149 * 32
    x = Conv2D(32, (3, 3), kernel_regularizer=l2(0.0002),
               activation="relu")(x)  # 147 * 147 * 32
    x = Conv2D(64, (3, 3),
               kernel_regularizer=l2(0.0002),
               activation="relu",
               padding="same")(x)  # 147 * 147 * 64

    x1 = MaxPooling2D((3, 3), strides=(2, 2))(x)
    x2 = Conv2D(96, (3, 3),
                kernel_regularizer=l2(0.0002),
                activation="relu",
                strides=(2, 2))(x)

    x = concatenate([x1, x2], axis=3)  # 73 * 73 * 160

    x1 = Conv2D(64, (1, 1),
                kernel_regularizer=l2(0.0002),
                activation="relu",
                padding="same")(x)
    x1 = Conv2D(96, (3, 3), kernel_regularizer=l2(0.0002),
                activation="relu")(x1)

    x2 = Conv2D(64, (1, 1),
                kernel_regularizer=l2(0.0002),
                activation="relu",
                padding="same")(x)
    x2 = Conv2D(64, (7, 1),
                kernel_regularizer=l2(0.0002),
                activation="relu",
                padding="same")(x2)
    x2 = Conv2D(64, (1, 7),
                kernel_regularizer=l2(0.0002),
                activation="relu",
                padding="same")(x2)
    x2 = Conv2D(96, (3, 3),
                kernel_regularizer=l2(0.0002),
                activation="relu",
                padding="valid")(x2)

    x = concatenate([x1, x2], axis=3)  # 71 * 71 * 192

    x1 = Conv2D(192, (3, 3),
                kernel_regularizer=l2(0.0002),
                activation="relu",
                strides=(2, 2))(x)

    x2 = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = concatenate([x1, x2], axis=3)  # 35 * 35 * 384

    x = BatchNormalization(axis=3)(x)
    x = Activation("relu")(x)

    return x
def get_2stack_unet_nonorm(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS):
    # Creates the 2-stack U-net model. 

    inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))

    # --------------------------------------------------------------------
    # U-Net 1:

    c1 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (inputs)
    c1 = Dropout(0.1) (c1)
    c1 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c1)
    p1 = MaxPooling2D((2, 2)) (c1)

    c2 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p1)
    c2 = Dropout(0.1) (c2)
    c2 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c2)
    p2 = MaxPooling2D((2, 2)) (c2)

    c3 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p2)
    c3 = Dropout(0.2) (c3)
    c3 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c3)
    p3 = MaxPooling2D((2, 2)) (c3)

    c4 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p3)
    c4 = Dropout(0.2) (c4)
    c4 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c4)
    p4 = MaxPooling2D(pool_size=(2, 2)) (c4)

    c5 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p4)
    c5 = Dropout(0.3) (c5)
    c5 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c5)

    u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same') (c5)
    u6 = concatenate([u6, c4])
    c6 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u6)
    c6 = Dropout(0.2) (c6)
    c6 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c6)

    u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same') (c6)
    u7 = concatenate([u7, c3])
    c7 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u7)
    c7 = Dropout(0.2) (c7)
    c7 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c7)

    u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same') (c7)
    u8 = concatenate([u8, c2])
    c8 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u8)
    c8 = Dropout(0.1) (c8)
    c8 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c8)


    u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same') (c8)
    u9 = concatenate([u9, c1], axis=3)
    c9 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u9)
    c9 = Dropout(0.1) (c9)
    c9 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c9)

    # --------------------------------------------------------------------

    # U-Net 2: 

    c12 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c9)
    c12 = Dropout(0.1) (c12)
    c12 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c12)
    p12 = MaxPooling2D((2, 2)) (c12)

    c22 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p12)
    c22 = Dropout(0.1) (c22)
    c22 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c22)
    p22 = MaxPooling2D((2, 2)) (c22)

    c32 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p22)
    c32 = Dropout(0.2) (c32)
    c32 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c32)
    p32 = MaxPooling2D((2, 2)) (c32)

    c42 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p32)
    c42 = Dropout(0.2) (c42)
    c42 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c42)
    p42 = MaxPooling2D(pool_size=(2, 2)) (c42)

    c52 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p42)
    c52 = Dropout(0.3) (c52)
    c52 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c52)

    u62 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same') (c52)
    u62 = concatenate([u62, c42])
    c62 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u62)
    c62 = Dropout(0.2) (c62)
    c62 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c62)

    u72 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same') (c62)
    u72 = concatenate([u72, c32])
    c72 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u72)
    c72 = Dropout(0.2) (c72)
    c72 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c72)

    u82 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same') (c72)
    u82 = concatenate([u82, c22])
    c82 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u82)
    c82 = Dropout(0.1) (c82)
    c82 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c82)


    u92 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same') (c82)
    u92 = concatenate([u92, c12], axis=3)
    c92 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u92)
    c92 = Dropout(0.1) (c92)
    c92 = Conv2D(1, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c92)

    # --------------------------------------------------------------------
    outputs = c92

    model = Model(inputs=[inputs], outputs=[outputs])
    return model
model = Sequential()

# Imitating the network of NVidia :-D

# Preprocess incoming data, centered around zero with small standard deviation
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160, 320, 3)))
# Crop image to only see section with road
model.add(Cropping2D(cropping=((70, 25), (0, 0))))  # sizes from tutorial video
# Convolution 5x5 Layers
model.add(Conv2D(24, 5, 5, subsample=(2, 2), activation='relu'))
model.add(Conv2D(36, 5, 5, subsample=(2, 2), activation='relu'))
model.add(Conv2D(48, 5, 5, subsample=(2, 2), activation='relu'))
# Convolution 3x3 Layers
model.add(Conv2D(64, 3, 3, activation='relu'))
model.add(Conv2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering="th"))
model.add(Dropout(keep_prob))
model.add(Flatten())
# Full-Connected Layers
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))

model.compile(loss='mse', optimizer='adam')

###################################################
############# Get Data and Start Computing ########
###################################################

# compile and train the model using the generator function
def faceRecoModel(input_shape):
    """
    Implementation of the Inception model used for FaceNet
    
    Arguments:
    input_shape -- shape of the images of the dataset

    Returns:
    model -- a Model() instance in Keras
    """

    # Define the input as a tensor with shape input_shape
    X_input = Input(input_shape)

    # Zero-Padding
    X = ZeroPadding2D((3, 3))(X_input)

    # First Block
    X = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(X)
    X = BatchNormalization(axis=1, name='bn1')(X)
    X = Activation('relu')(X)

    # Zero-Padding + MAXPOOL
    X = ZeroPadding2D((1, 1))(X)
    X = MaxPooling2D((3, 3), strides=2)(X)

    # Second Block
    X = Conv2D(64, (1, 1), strides=(1, 1), name='conv2')(X)
    X = BatchNormalization(axis=1, epsilon=0.00001, name='bn2')(X)
    X = Activation('relu')(X)

    # Zero-Padding + MAXPOOL
    X = ZeroPadding2D((1, 1))(X)

    # Second Block
    X = Conv2D(192, (3, 3), strides=(1, 1), name='conv3')(X)
    X = BatchNormalization(axis=1, epsilon=0.00001, name='bn3')(X)
    X = Activation('relu')(X)

    # Zero-Padding + MAXPOOL
    X = ZeroPadding2D((1, 1))(X)
    X = MaxPooling2D(pool_size=3, strides=2)(X)

    # Inception 1: a/b/c
    X = inception_block_1a(X)
    X = inception_block_1b(X)
    X = inception_block_1c(X)

    # Inception 2: a/b
    X = inception_block_2a(X)
    X = inception_block_2b(X)

    # Inception 3: a/b
    X = inception_block_3a(X)
    X = inception_block_3b(X)

    # Top layer
    X = AveragePooling2D(pool_size=(3, 3),
                         strides=(1, 1),
                         data_format='channels_first')(X)
    X = Flatten()(X)
    X = Dense(128, name='dense_layer')(X)

    # L2 normalization
    X = Lambda(lambda x: K.l2_normalize(x, axis=1))(X)

    # Create model instance
    model = Model(inputs=X_input, outputs=X, name='FaceRecoModel')

    return model
Beispiel #11
0
# 이미지 데이터를 불러오기 --- (※1)
X_train, X_test, y_train, y_test = np.load('newobj.npy')

# 읽어온 데이터 정규화하기
X_train = X_train.astype('float') / 256
X_test = X_test.astype('float') / 256

print('X_train shape:', X_train.shape)

# CNN 모델 구축하기
model = Sequential()
model.add(
    Convolution2D(32, 3, 3, border_mode='same', input_shape=X_train.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())  # --- (※3)
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))

model.add(Dense(nb_classes))  # 4개의 클래스
def inception_block_1a(X):
    """
    Implementation of an inception block
    """

    X_3x3 = Conv2D(96, (1, 1),
                   data_format='channels_first',
                   name='inception_3a_3x3_conv1')(X)
    X_3x3 = BatchNormalization(axis=1,
                               epsilon=0.00001,
                               name='inception_3a_3x3_bn1')(X_3x3)
    X_3x3 = Activation('relu')(X_3x3)
    X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)
    X_3x3 = Conv2D(128, (3, 3),
                   data_format='channels_first',
                   name='inception_3a_3x3_conv2')(X_3x3)
    X_3x3 = BatchNormalization(axis=1,
                               epsilon=0.00001,
                               name='inception_3a_3x3_bn2')(X_3x3)
    X_3x3 = Activation('relu')(X_3x3)

    X_5x5 = Conv2D(16, (1, 1),
                   data_format='channels_first',
                   name='inception_3a_5x5_conv1')(X)
    X_5x5 = BatchNormalization(axis=1,
                               epsilon=0.00001,
                               name='inception_3a_5x5_bn1')(X_5x5)
    X_5x5 = Activation('relu')(X_5x5)
    X_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)
    X_5x5 = Conv2D(32, (5, 5),
                   data_format='channels_first',
                   name='inception_3a_5x5_conv2')(X_5x5)
    X_5x5 = BatchNormalization(axis=1,
                               epsilon=0.00001,
                               name='inception_3a_5x5_bn2')(X_5x5)
    X_5x5 = Activation('relu')(X_5x5)

    X_pool = MaxPooling2D(pool_size=3, strides=2,
                          data_format='channels_first')(X)
    X_pool = Conv2D(32, (1, 1),
                    data_format='channels_first',
                    name='inception_3a_pool_conv')(X_pool)
    X_pool = BatchNormalization(axis=1,
                                epsilon=0.00001,
                                name='inception_3a_pool_bn')(X_pool)
    X_pool = Activation('relu')(X_pool)
    X_pool = ZeroPadding2D(padding=((3, 4), (3, 4)),
                           data_format='channels_first')(X_pool)

    X_1x1 = Conv2D(64, (1, 1),
                   data_format='channels_first',
                   name='inception_3a_1x1_conv')(X)
    X_1x1 = BatchNormalization(axis=1,
                               epsilon=0.00001,
                               name='inception_3a_1x1_bn')(X_1x1)
    X_1x1 = Activation('relu')(X_1x1)

    # CONCAT
    inception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)

    return inception
Beispiel #13
0
def get_unet_mod(input_img, n_filters = 16, dropout = 0.1, batchnorm = True):
    """Function to define the UNET Model"""
    # Contracting Path
    skip = Conv2D(filters = n_filters * 1, kernel_size = (3, 3),\
              kernel_initializer = 'he_normal', padding = 'same')(input_img)
    c1 = conv2d_block(skip, n_filters * 1, kernel_size = 3, batchnorm = batchnorm)
    c1 = layers.add([c1, skip])
    p1 = MaxPooling2D((2, 2))(c1)
    p1 = Dropout(dropout)(p1)
    
    skip = Conv2D(filters = n_filters * 2, kernel_size = (3, 3),\
              kernel_initializer = 'he_normal', padding = 'same')(p1)
    c2 = conv2d_block(skip, n_filters * 2, kernel_size = 3, batchnorm = batchnorm)
    c2 = layers.add([c2, skip])
    p2 = MaxPooling2D((2, 2))(c2)
    p2 = Dropout(dropout)(p2)
    
    skip = Conv2D(filters = n_filters * 4, kernel_size = (3, 3),\
              kernel_initializer = 'he_normal', padding = 'same')(p2)
    c3 = conv2d_block(skip, n_filters * 4, kernel_size = 3, batchnorm = batchnorm)
    c3 = layers.add([c3, skip])
    p3 = MaxPooling2D((2, 2))(c3)
    p3 = Dropout(dropout)(p3)
    
    skip = Conv2D(filters = n_filters * 8, kernel_size = (3, 3),\
              kernel_initializer = 'he_normal', padding = 'same')(p3)
    c4 = conv2d_block(skip, n_filters * 8, kernel_size = 3, batchnorm = batchnorm)
    c4 = layers.add([c4, skip])
    p4 = MaxPooling2D((2, 2))(c4)
    p4 = Dropout(dropout)(p4)
    
    skip = Conv2D(filters = n_filters * 16, kernel_size = (3, 3),\
              kernel_initializer = 'he_normal', padding = 'same')(p4)
    c5 = conv2d_block(skip, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm)
    
    c5 = aspp_block(c5,num_filters=256,rate_scale=1,output_stride=16,input_shape=(256,256,3))
    
    # Expansive Path
    
    u6 = Conv2DTranspose(n_filters * 8, (3, 3), strides = (2, 2), padding = 'same')(c5)
    u6 = concatenate([u6, c4])
    u6 = Dropout(dropout)(u6)
    skip = Conv2D(filters = n_filters * 8, kernel_size = (3, 3),\
              kernel_initializer = 'he_normal', padding = 'same')(u6)
    c6 = conv2d_block(skip, n_filters * 8, kernel_size = 3, batchnorm = batchnorm)
    c6 = layers.add([c6, skip])
    
    
    u7 = Conv2DTranspose(n_filters * 4, (3, 3), strides = (2, 2), padding = 'same')(c6)
    u7 = concatenate([u7, c3])
    u7 = Dropout(dropout)(u7)
    skip = Conv2D(filters = n_filters * 4, kernel_size = (3, 3),\
              kernel_initializer = 'he_normal', padding = 'same')(u7)
    c7 = conv2d_block(skip, n_filters * 4, kernel_size = 3, batchnorm = batchnorm)
    c6 = layers.add([c7, skip])
    
    
    u8 = Conv2DTranspose(n_filters * 2, (3, 3), strides = (2, 2), padding = 'same')(c7)
    u8 = concatenate([u8, c2])
    u8 = Dropout(dropout)(u8)
    skip = Conv2D(filters = n_filters * 2, kernel_size = (3, 3),\
              kernel_initializer = 'he_normal', padding = 'same')(u8)
    c8 = conv2d_block(skip, n_filters * 2, kernel_size = 3, batchnorm = batchnorm)
    c6 = layers.add([c8, skip])
    
    
    u9 = Conv2DTranspose(n_filters * 1, (3, 3), strides = (2, 2), padding = 'same')(c8)
    u9 = concatenate([u9, c1])
    u9 = Dropout(dropout)(u9)
    skip = Conv2D(filters = n_filters * 1, kernel_size = (3, 3),\
              kernel_initializer = 'he_normal', padding = 'same')(u9)
    c9 = conv2d_block(skip, n_filters * 1, kernel_size = 3, batchnorm = batchnorm)
    c6 = layers.add([c9, skip])
    
    #outputs = Conv2D(1, (1, 1), activation='sigmoid')(c5)
    #c9 = aspp_block(c5,num_filters=256,rate_scale=1,output_stride=16,input_shape=(256,256,3))
    outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9)
    model = Model(inputs=[input_img], outputs=[outputs])
    return model
Beispiel #14
0
def train(features_dir, top_model_filename, labels, test_indexes, batch_size, sample_size,
        learning_rate, momentum, epochs, kfolds, training_indexes_filename,
        verbose=False, num_classes=3):

    if verbose:
        print("Creating block 5 of VGG16..", end="")

    # Replication of block 5 of VGG16.  This is the layer that we're going
    # to retrain to become more attuned to daytime imagery.
    model = Sequential()
    model.add(Conv2D(
        filters=512,
        kernel_size=(3, 3),
        activation='relu',
        padding='same',
        name='block5_conv1',
        # Hard-coded input size.  This is the output size of `block4_pool` when
        # the input images are 400x400.
        input_shape=(25, 25, 512)
    ))
    model.add(Conv2D(
        filters=512,
        kernel_size=(3, 3),
        activation='relu',
        padding='same',
        name='block5_conv2'
    ))
    model.add(Conv2D(
        filters=512,
        kernel_size=(3, 3),
        activation='relu',
        padding='same',
        name='block5_conv3'
    ))
    model.add(MaxPooling2D(
        pool_size=(2, 2),
        strides=(2, 2),
        name='block5_pool'
    ))

    if verbose:
        print("done.")

    # Initialize the layers of block 5 with the VGG16 ImageNet weights.
    # Note: we should load weights *before* adding the top of the model,
    # as we might clobber some of the previously trained weights in the
    # top if we load weights after the top has been added.
    if verbose:
        print("Loading ImageNet weights into block 5...", end="")
    weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
        BLOCK5_WEIGHTS, cache_subdir='models')
    model.load_weights(weights_path, by_name=True)
    if verbose:
        print("done.")

    # Load the previously trained top model, and add it to the top of the net.
    if verbose:
        print("Loading the top of the model from %s..." % (top_model_filename,), end="")
    top_model = load_model(top_model_filename)
    model.add(top_model)
    if verbose:
        print("done.")

    if verbose:
        print("Compiling model...", end="")
    model.compile(
        loss=keras.losses.categorical_crossentropy,
        # Note: this learning rate should be pretty low (e.g., 1e-4, as
        # recommended in the referenced blog post, to keep previously-
        # learned features in tact.  Reference:
        # https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html
        optimizer=SGD(lr=learning_rate, momentum=momentum),
        metrics=['accuracy'],
    )
    if verbose:
        print("done.")

    # Sample for training indexes, or load from file
    if training_indexes_filename is not None:
        sampled_examples = []
        with open(training_indexes_filename) as training_indexes_file:
            for line in training_indexes_file:
                sampled_examples.append(int(line.strip()))
    else:
        sampled_examples = get_training_examples(
            features_dir, labels, test_indexes, sample_size)

    # Divide the sampled training data into folds
    folds = get_folds(sampled_examples, kfolds)

    # Convert labels to one-hot array for use in training.
    label_array = keras.utils.to_categorical(labels, num_classes)

    # Here, we fit the neural network for each fold
    for i, fold in enumerate(folds, start=1):

        training_examples = fold["training"]
        validation_examples = fold["validation"]

        if verbose:
            print("Training on fold %d of %d" % (i, len(folds)))
            print("Training set size: %d" % (len(training_examples)))
            print("Validation set size: %d" % (len(validation_examples)))

        # Do the actual fitting here
        model.fit_generator(
            FeatureExampleGenerator(training_examples, features_dir, label_array, batch_size),
            steps_per_epoch=math.ceil(float(len(training_examples)) / batch_size),
            epochs=epochs,
            verbose=(1 if verbose else 0),
            validation_data=FeatureExampleGenerator(validation_examples, features_dir, label_array, batch_size),
            validation_steps=math.ceil(float(len(validation_examples)) / batch_size),
        )
        if not os.path.exists("models"):
            os.makedirs("models")
        model.save(os.path.join(
            "models", "tuned-" + strftime("%Y%m%d-%H%M%S", gmtime()) + ".h5"))
def create_unet_twoOutputs(C=3):
    inputs = Input(shape=(None, None, C))
    s = Lambda(lambda x: x / 255.0)(inputs)

    c1 = Conv2D(8, (3, 3), activation='relu', padding='same')(s)
    c1 = Conv2D(8, (3, 3), activation='relu', padding='same')(c1)
    p1 = MaxPooling2D((2, 2))(c1)

    c2 = Conv2D(16, (3, 3), activation='relu', padding='same')(p1)
    c2 = Conv2D(16, (3, 3), activation='relu', padding='same')(c2)
    p2 = MaxPooling2D((2, 2))(c2)

    c3 = Conv2D(32, (3, 3), activation='relu', padding='same')(p2)
    c3 = Conv2D(32, (3, 3), activation='relu', padding='same')(c3)
    p3 = MaxPooling2D((2, 2))(c3)

    c4 = Conv2D(64, (3, 3), activation='relu', padding='same')(p3)
    c4 = Conv2D(64, (3, 3), activation='relu', padding='same')(c4)
    p4 = MaxPooling2D(pool_size=(2, 2))(c4)

    c5 = Conv2D(128, (3, 3), activation='relu', padding='same')(p4)
    c5 = Conv2D(128, (3, 3), activation='relu', padding='same')(c5)

    u6c = Conv2DTranspose(64, (3, 3), strides=(2, 2), padding='same')(c5)
    u6c = concatenate([u6c, c4])
    c6c = Conv2D(64, (3, 3), activation='relu', padding='same')(u6c)
    c6c = Conv2D(64, (3, 3), activation='relu', padding='same')(c6c)

    u7c = Conv2DTranspose(32, (3, 3), strides=(2, 2), padding='same')(c6c)
    u7c = concatenate([u7c, c3])
    c7c = Conv2D(32, (3, 3), activation='relu', padding='same')(u7c)
    c7c = Conv2D(32, (3, 3), activation='relu', padding='same')(c7c)

    u8c = Conv2DTranspose(16, (3, 3), strides=(2, 2), padding='same')(c7c)
    u8c = concatenate([u8c, c2])
    c8c = Conv2D(16, (3, 3), activation='relu', padding='same')(u8c)
    c8c = Conv2D(16, (3, 3), activation='relu', padding='same')(c8c)

    u9c = Conv2DTranspose(8, (3, 3), strides=(2, 2), padding='same')(c8c)
    u9c = concatenate([u9c, c1], axis=3)
    c9c = Conv2D(8, (3, 3), activation='relu', padding='same')(u9c)
    c9c = Conv2D(8, (3, 3), activation='relu', padding='same')(c9c)

    contours = Conv2D(1, (1, 1), activation='sigmoid', name='contours')(c9c)

    u6l = Conv2DTranspose(64, (3, 3), strides=(2, 2), padding='same')(c5)
    u6l = concatenate([u6l, c4])
    c6l = Conv2D(64, (3, 3), activation='relu', padding='same')(u6l)
    c6l = Conv2D(64, (3, 3), activation='relu', padding='same')(c6l)

    u7l = Conv2DTranspose(32, (3, 3), strides=(2, 2), padding='same')(c6l)
    u7l = concatenate([u7l, c3])
    c7l = Conv2D(32, (3, 3), activation='relu', padding='same')(u7l)
    c7l = Conv2D(32, (3, 3), activation='relu', padding='same')(c7l)

    u8l = Conv2DTranspose(16, (3, 3), strides=(2, 2), padding='same')(c7l)
    u8l = concatenate([u8l, c2])
    c8l = Conv2D(16, (3, 3), activation='relu', padding='same')(u8l)
    c8l = Conv2D(16, (3, 3), activation='relu', padding='same')(c8l)

    u9l = Conv2DTranspose(8, (3, 3), strides=(2, 2), padding='same')(c8l)
    u9l = concatenate([u9l, c1], axis=3)
    c9l = Conv2D(8, (3, 3), activation='relu', padding='same')(u9l)
    c9l = Conv2D(8, (3, 3), activation='relu', padding='same')(c9l)

    labels = Conv2D(1, (1, 1), activation='sigmoid', name='labels')(c9l)

    model = Model(inputs=[inputs], outputs=[labels, contours])
    return model
Beispiel #16
0
def build_UNet(unit_size=None, final_max_pooling=None):
    s = Lambda(lambda x: x / 255)(inputs)

    c1 = Conv2D(unit_size, (3, 3),
                activation=DEFAULT_ACTIVATION,
                kernel_initializer='he_normal',
                padding='same')(s)
    c1 = Dropout(dropout)(c1)
    c1 = Conv2D(unit_size, (3, 3),
                activation=DEFAULT_ACTIVATION,
                kernel_initializer='he_normal',
                padding='same')(c1)
    p1 = MaxPooling2D((2, 2))(c1)

    c2 = Conv2D(unit_size * 2, (3, 3),
                activation=DEFAULT_ACTIVATION,
                kernel_initializer='he_normal',
                padding='same')(p1)
    c2 = Dropout(dropout)(c2)
    c2 = Conv2D(unit_size * 2, (3, 3),
                activation=DEFAULT_ACTIVATION,
                kernel_initializer='he_normal',
                padding='same')(c2)
    p2 = MaxPooling2D((2, 2))(c2)

    c3 = Conv2D(unit_size * 4, (3, 3),
                activation=DEFAULT_ACTIVATION,
                kernel_initializer='he_normal',
                padding='same')(p2)
    c3 = Dropout(dropout)(c3)
    c3 = Conv2D(unit_size * 4, (3, 3),
                activation=DEFAULT_ACTIVATION,
                kernel_initializer='he_normal',
                padding='same')(c3)
    p3 = MaxPooling2D((2, 2))(c3)

    c4 = Conv2D(unit_size * 8, (3, 3),
                activation=DEFAULT_ACTIVATION,
                kernel_initializer='he_normal',
                padding='same')(p3)
    c4 = Dropout(dropout)(c4)
    c4 = Conv2D(unit_size * 8, (3, 3),
                activation=DEFAULT_ACTIVATION,
                kernel_initializer='he_normal',
                padding='same')(c4)
    p4 = MaxPooling2D((2, 2))(c4)

    c5 = Conv2D(unit_size * 16, (3, 3),
                activation=DEFAULT_ACTIVATION,
                kernel_initializer='he_normal',
                padding='same')(p4)
    c5 = Dropout(dropout)(c5)
    c5 = Conv2D(unit_size * 16, (3, 3),
                activation=DEFAULT_ACTIVATION,
                kernel_initializer='he_normal',
                padding='same')(c5)
    c5 = Dropout(dropout)(c5)
    c5 = Conv2D(unit_size * 16, (3, 3),
                activation=DEFAULT_ACTIVATION,
                kernel_initializer='he_normal',
                padding='same')(c5)

    u6 = Conv2DTranspose(unit_size * 8, (2, 2), strides=(2, 2),
                         padding='same')(c5)
    u6 = concatenate([u6, c4])
    c6 = Conv2D(unit_size * 8, (3, 3),
                activation=DEFAULT_ACTIVATION,
                kernel_initializer='he_normal',
                padding='same')(u6)
    c6 = Dropout(dropout)(c6)
    c6 = Conv2D(unit_size * 8, (3, 3),
                activation=DEFAULT_ACTIVATION,
                kernel_initializer='he_normal',
                padding='same')(c6)

    u7 = Conv2DTranspose(unit_size * 4, (2, 2), strides=(2, 2),
                         padding='same')(c6)
    u7 = concatenate([u7, c3])
    c7 = Conv2D(unit_size * 4, (3, 3),
                activation=DEFAULT_ACTIVATION,
                kernel_initializer='he_normal',
                padding='same')(u7)
    c7 = Dropout(dropout)(c7)
    c7 = Conv2D(unit_size * 4, (3, 3),
                activation=DEFAULT_ACTIVATION,
                kernel_initializer='he_normal',
                padding='same')(c7)

    u8 = Conv2DTranspose(unit_size * 2, (2, 2), strides=(2, 2),
                         padding='same')(c7)
    u8 = concatenate([u8, c2])
    c8 = Conv2D(unit_size * 2, (3, 3),
                activation=DEFAULT_ACTIVATION,
                kernel_initializer='he_normal',
                padding='same')(u8)
    c8 = Dropout(dropout)(c8)
    c8 = Conv2D(unit_size * 2, (3, 3),
                activation=DEFAULT_ACTIVATION,
                kernel_initializer='he_normal',
                padding='same')(c8)

    u9 = Conv2DTranspose(unit_size, (2, 2), strides=(2, 2), padding='same')(c8)
    u9 = concatenate([u9, c1], axis=3)
    c9 = Conv2D(unit_size, (3, 3),
                activation=DEFAULT_ACTIVATION,
                kernel_initializer='he_normal',
                padding='same')(u9)
    c9 = Dropout(dropout)(c9)
    c9 = Conv2D(unit_size, (3, 3),
                activation=DEFAULT_ACTIVATION,
                kernel_initializer='he_normal',
                padding='same')(c9)
    outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9)
    model = Model(inputs=[inputs], outputs=[outputs])

    return model
Beispiel #17
0
    image = np.fliplr(image)
    measurement = -measurement
    images.append(image)
    measurements.append(measurement)

X_train = np.array(images)
Y_train = np.array(measurements)

model = Sequential()
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160,320,3)))

model.add(Cropping2D(cropping=((70,25),(0,0))))
# Five Convolutional Layer
model.add(Convolution2D(24, 5, 5, border_mode='same'))
model.add(MaxPooling2D())
model.add(Activation('relu'))
model.add(Convolution2D(36, 5, 5, border_mode='same'))
model.add(MaxPooling2D())
model.add(Activation('relu'))
model.add(Convolution2D(48, 5, 5, border_mode='same'))
model.add(MaxPooling2D())
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(MaxPooling2D())
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(MaxPooling2D())
model.add(Activation('relu'))
          
# Five Fully-Connected Layer
Beispiel #18
0
def DenseNet(nb_dense_block=4,
             growth_rate=32,
             nb_filter=64,
             reduction=0.0,
             dropout_rate=0.0,
             weight_decay=1e-4,
             classes=1000,
             weights_path=None):
    '''Instantiate the DenseNet architecture,
        # Arguments
            nb_dense_block: number of dense blocks to add to end
            growth_rate: number of filters to add per dense block
            nb_filter: initial number of filters
            reduction: reduction factor of transition blocks.
            dropout_rate: dropout rate
            weight_decay: weight decay factor
            classes: optional number of classes to classify images
            weights_path: path to pre-trained weights
        # Returns
            A Keras model instance.
    '''
    eps = 1.1e-5

    # compute compression factor
    compression = 1.0 - reduction

    # Handle Dimension Ordering for different backends
    global concat_axis
    if K.image_dim_ordering() == 'tf':
        concat_axis = 3
        img_input = Input(shape=(224, 224, 3), name='data')
    else:
        concat_axis = 1
        img_input = Input(shape=(3, 224, 224), name='data')

    # From architecture for ImageNet (Table 1 in the paper)
    nb_filter = 64
    nb_layers = [6, 12, 32, 32]  # For DenseNet-169

    # Initial convolution
    x = ZeroPadding2D((3, 3), name='conv1_zeropadding')(img_input)
    x = Convolution2D(nb_filter,
                      7,
                      7,
                      subsample=(2, 2),
                      name='conv1',
                      bias=False)(x)
    x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv1_bn')(x)
    x = Scale(axis=concat_axis, name='conv1_scale')(x)
    x = Activation('relu', name='relu1')(x)
    x = ZeroPadding2D((1, 1), name='pool1_zeropadding')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2), name='pool1')(x)

    # Add dense blocks
    for block_idx in range(nb_dense_block - 1):
        stage = block_idx + 2
        x, nb_filter = dense_block(x,
                                   stage,
                                   nb_layers[block_idx],
                                   nb_filter,
                                   growth_rate,
                                   dropout_rate=dropout_rate,
                                   weight_decay=weight_decay)

        # Add transition_block
        x = transition_block(x,
                             stage,
                             nb_filter,
                             compression=compression,
                             dropout_rate=dropout_rate,
                             weight_decay=weight_decay)
        nb_filter = int(nb_filter * compression)

    final_stage = stage + 1
    x, nb_filter = dense_block(x,
                               final_stage,
                               nb_layers[-1],
                               nb_filter,
                               growth_rate,
                               dropout_rate=dropout_rate,
                               weight_decay=weight_decay)

    x = BatchNormalization(epsilon=eps,
                           axis=concat_axis,
                           name='conv' + str(final_stage) + '_blk_bn')(x)
    x = Scale(axis=concat_axis,
              name='conv' + str(final_stage) + '_blk_scale')(x)
    x = Activation('relu', name='relu' + str(final_stage) + '_blk')(x)
    x = GlobalAveragePooling2D(name='pool' + str(final_stage))(x)

    x = Dense(classes, name='fc6')(x)
    x = Activation('softmax', name='prob')(x)

    model = Model(img_input, x, name='densenet')

    if weights_path is not None:
        model.load_weights(weights_path)

    x = model.output
    x = Dense(6, name='fc61')(x)
    x = Activation('softmax', name='prob1')(x)
    model = Model(img_input, x, name='densenet1')

    return model
Beispiel #19
0
#    imgs.append(image_flipped)
#    measurements.append(measurement_flipped)
#
#print(len(imgs))
#print(len(measurements))
#
#X_train = np.array(imgs)
#y_train = np.array(measurements)

model = Sequential()

model.add(Cropping2D(cropping=((70, 25), (0, 0)), input_shape=[160, 320, 3]))
model.add(Lambda(lambda x: x / 255. - 0.5))
model.add(Conv2D(filters=6, kernel_size=(5, 5)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=2, strides=2))

model.add(Conv2D(filters=16, kernel_size=(5, 5)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=2, strides=2))

model.add(Dropout(0.5))

model.add(Conv2D(filters=32, kernel_size=(3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=2, strides=2))

model.add(Flatten())
model.add(Dense(120))
model.add(Activation('relu'))
Beispiel #20
0
def build_model(input_layer, start_neurons, DropoutRatio=0.5):
    # 101 -> 50
    conv1 = Conv2D(start_neurons * 1, (3, 3), activation=None,
                   padding="same")(input_layer)
    conv1 = residual_block(conv1, start_neurons * 1)
    conv1 = residual_block(conv1, start_neurons * 1, True)
    pool1 = MaxPooling2D((2, 2))(conv1)
    pool1 = Dropout(DropoutRatio / 2)(pool1)

    # 50 -> 25
    conv2 = Conv2D(start_neurons * 2, (3, 3), activation=None,
                   padding="same")(pool1)
    conv2 = residual_block(conv2, start_neurons * 2)
    conv2 = residual_block(conv2, start_neurons * 2, True)
    pool2 = MaxPooling2D((2, 2))(conv2)
    pool2 = Dropout(DropoutRatio)(pool2)

    # 25 -> 12
    conv3 = Conv2D(start_neurons * 4, (3, 3), activation=None,
                   padding="same")(pool2)
    conv3 = residual_block(conv3, start_neurons * 4)
    conv3 = residual_block(conv3, start_neurons * 4, True)
    pool3 = MaxPooling2D((2, 2))(conv3)
    pool3 = Dropout(DropoutRatio)(pool3)

    # 12 -> 6
    conv4 = Conv2D(start_neurons * 8, (3, 3), activation=None,
                   padding="same")(pool3)
    conv4 = residual_block(conv4, start_neurons * 8)
    conv4 = residual_block(conv4, start_neurons * 8, True)
    pool4 = MaxPooling2D((2, 2))(conv4)
    pool4 = Dropout(DropoutRatio)(pool4)

    # Middle
    convm = Conv2D(start_neurons * 16, (3, 3), activation=None,
                   padding="same")(pool4)
    convm = residual_block(convm, start_neurons * 16)
    convm = residual_block(convm, start_neurons * 16, True)

    # 6 -> 12
    deconv4 = Conv2DTranspose(start_neurons * 8, (3, 3),
                              strides=(2, 2),
                              padding="same")(convm)
    uconv4 = concatenate([deconv4, conv4])
    uconv4 = Dropout(DropoutRatio)(uconv4)

    uconv4 = Conv2D(start_neurons * 8, (3, 3), activation=None,
                    padding="same")(uconv4)
    uconv4 = residual_block(uconv4, start_neurons * 8)
    uconv4 = residual_block(uconv4, start_neurons * 8, True)

    # 12 -> 25
    #deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="same")(uconv4)
    deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3),
                              strides=(2, 2),
                              padding="valid")(uconv4)
    uconv3 = concatenate([deconv3, conv3])
    uconv3 = Dropout(DropoutRatio)(uconv3)

    uconv3 = Conv2D(start_neurons * 4, (3, 3), activation=None,
                    padding="same")(uconv3)
    uconv3 = residual_block(uconv3, start_neurons * 4)
    uconv3 = residual_block(uconv3, start_neurons * 4, True)

    # 25 -> 50
    deconv2 = Conv2DTranspose(start_neurons * 2, (3, 3),
                              strides=(2, 2),
                              padding="same")(uconv3)
    uconv2 = concatenate([deconv2, conv2])

    uconv2 = Dropout(DropoutRatio)(uconv2)
    uconv2 = Conv2D(start_neurons * 2, (3, 3), activation=None,
                    padding="same")(uconv2)
    uconv2 = residual_block(uconv2, start_neurons * 2)
    uconv2 = residual_block(uconv2, start_neurons * 2, True)

    # 50 -> 101
    #deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="same")(uconv2)
    deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3),
                              strides=(2, 2),
                              padding="valid")(uconv2)
    uconv1 = concatenate([deconv1, conv1])

    uconv1 = Dropout(DropoutRatio)(uconv1)
    uconv1 = Conv2D(start_neurons * 1, (3, 3), activation=None,
                    padding="same")(uconv1)
    uconv1 = residual_block(uconv1, start_neurons * 1)
    uconv1 = residual_block(uconv1, start_neurons * 1, True)

    #uconv1 = Dropout(DropoutRatio/2)(uconv1)
    #output_layer = Conv2D(1, (1,1), padding="same", activation="sigmoid")(uconv1)
    output_layer_noActi = Conv2D(1, (1, 1), padding="same",
                                 activation=None)(uconv1)
    output_layer = Activation('sigmoid')(output_layer_noActi)

    return output_layer
Beispiel #21
0
    X_train = np.array(images)
    y_train = np.array(angles)
    y_train = to_categorical(y_train, num_classes=NUM_CLASSES)

    # define model here
    # input -> crop image -> normalize -> 2 Conv layers ->
    # -> 2 fully connected layers -> output softmax to 3 classes
    model = Sequential()
    model.add(
        Cropping2D(cropping=((70, 50), (0, 0)), input_shape=(160, 320, 1)))
    model.add(Lambda(lambda x: x / 255. - 0.5))  #normalize

    model.add(Conv2D(32, (3, 3),
                     activation='relu'))  #input shape here is (40,320,1)
    model.add(MaxPooling2D((2, 2)))
    model.add(Dropout(0.5))

    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2)))
    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dense(64, activation='relu'))

    model.add(Dense(NUM_CLASSES, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
def __create_dense_net(nb_classes,
                       img_input,
                       include_top,
                       depth=40,
                       nb_dense_block=3,
                       growth_rate=12,
                       nb_filter=-1,
                       nb_layers_per_block=-1,
                       bottleneck=False,
                       reduction=0.0,
                       dropout_rate=None,
                       weight_decay=1e-4,
                       subsample_initial_block=False,
                       activation='softmax'):
    ''' Build the DenseNet model
    Args:
        nb_classes: number of classes
        img_input: tuple of shape (channels, rows, columns) or (rows, columns, channels)
        include_top: flag to include the final Dense layer
        depth: number or layers
        nb_dense_block: number of dense blocks to add to end (generally = 3)
        growth_rate: number of filters to add per dense block
        nb_filter: initial number of filters. Default -1 indicates initial number of filters is 2 * growth_rate
        nb_layers_per_block: number of layers in each dense block.
                Can be a -1, positive integer or a list.
                If -1, calculates nb_layer_per_block from the depth of the network.
                If positive integer, a set number of layers per dense block.
                If list, nb_layer is used as provided. Note that list size must
                be (nb_dense_block + 1)
        bottleneck: add bottleneck blocks
        reduction: reduction factor of transition blocks. Note : reduction value is inverted to compute compression
        dropout_rate: dropout rate
        weight_decay: weight decay rate
        subsample_initial_block: Set to True to subsample the initial convolution and
                add a MaxPool2D before the dense blocks are added.
        subsample_initial:
        activation: Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'.
                Note that if sigmoid is used, classes must be 1.
    Returns: keras tensor with nb_layers of conv_block appended
    '''

    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    if reduction != 0.0:
        assert reduction <= 1.0 and reduction > 0.0, 'reduction value must lie between 0.0 and 1.0'

    # layers in each dense block
    if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple:
        nb_layers = list(nb_layers_per_block)  # Convert tuple to list

        assert len(nb_layers) == (nb_dense_block), 'If list, nb_layer is used as provided. ' \
                                                   'Note that list size must be (nb_dense_block)'
        final_nb_layer = nb_layers[-1]
        nb_layers = nb_layers[:-1]
    else:
        if nb_layers_per_block == -1:
            assert (
                depth - 4
            ) % 3 == 0, 'Depth must be 3 N + 4 if nb_layers_per_block == -1'
            count = int((depth - 4) / 3)
            nb_layers = [count for _ in range(nb_dense_block)]
            final_nb_layer = count
        else:
            final_nb_layer = nb_layers_per_block
            nb_layers = [nb_layers_per_block] * nb_dense_block

    # compute initial nb_filter if -1, else accept users initial nb_filter
    if nb_filter <= 0:
        nb_filter = 2 * growth_rate

    # compute compression factor
    compression = 1.0 - reduction

    # Initial convolution
    if subsample_initial_block:
        initial_kernel = (7, 7)
        initial_strides = (2, 2)
    else:
        initial_kernel = (3, 3)
        initial_strides = (1, 1)

    x = Conv2D(nb_filter,
               initial_kernel,
               kernel_initializer='he_normal',
               padding='same',
               strides=initial_strides,
               use_bias=False,
               kernel_regularizer=l2(weight_decay))(img_input)

    if subsample_initial_block:
        x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
        x = Activation('relu')(x)
        x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    # Add dense blocks
    for block_idx in range(nb_dense_block - 1):
        x, nb_filter = __dense_block(x,
                                     nb_layers[block_idx],
                                     nb_filter,
                                     growth_rate,
                                     bottleneck=bottleneck,
                                     dropout_rate=dropout_rate,
                                     weight_decay=weight_decay)
        # add transition_block
        x = __transition_block(x,
                               nb_filter,
                               compression=compression,
                               weight_decay=weight_decay)
        nb_filter = int(nb_filter * compression)

    # The last dense_block does not have a transition_block
    x, nb_filter = __dense_block(x,
                                 final_nb_layer,
                                 nb_filter,
                                 growth_rate,
                                 bottleneck=bottleneck,
                                 dropout_rate=dropout_rate,
                                 weight_decay=weight_decay)

    x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
    x = Activation('relu')(x)
    x = GlobalAveragePooling2D()(x)

    if include_top:
        x = Dense(nb_classes, activation=activation)(x)

    return x
Beispiel #23
0
import os
import numpy as np
from numpy import genfromtxt
import pandas as pd
import tensorflow as tf
from utils import LRN2D
import utils

myInput = Input(shape=(96, 96, 3))

x = ZeroPadding2D(padding=(3, 3), input_shape=(96, 96, 3))(myInput)
x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name='bn1')(x)
x = Activation('relu')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = MaxPooling2D(pool_size=3, strides=2)(x)
x = Lambda(LRN2D, name='lrn_1')(x)
x = Conv2D(64, (1, 1), name='conv2')(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name='bn2')(x)
x = Activation('relu')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = Conv2D(192, (3, 3), name='conv3')(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name='bn3')(x)
x = Activation('relu')(x)
x = Lambda(LRN2D, name='lrn_2')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = MaxPooling2D(pool_size=3, strides=2)(x)

# Inception3a
inception_3a_3x3 = Conv2D(96, (1, 1), name='inception_3a_3x3_conv1')(x)
inception_3a_3x3 = BatchNormalization(
Beispiel #24
0
def loadModel(
        url='https://drive.google.com/uc?id=1LSe1YCV1x-BfNnfb7DFZTNpv_Q9jITxn'
):
    myInput = Input(shape=(96, 96, 3))

    x = ZeroPadding2D(padding=(3, 3), input_shape=(96, 96, 3))(myInput)
    x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
    x = BatchNormalization(axis=3, epsilon=0.00001, name='bn1')(x)
    x = Activation('relu')(x)
    x = ZeroPadding2D(padding=(1, 1))(x)
    x = MaxPooling2D(pool_size=3, strides=2)(x)
    x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name='lrn_1')(x)
    x = Conv2D(64, (1, 1), name='conv2')(x)
    x = BatchNormalization(axis=3, epsilon=0.00001, name='bn2')(x)
    x = Activation('relu')(x)
    x = ZeroPadding2D(padding=(1, 1))(x)
    x = Conv2D(192, (3, 3), name='conv3')(x)
    x = BatchNormalization(axis=3, epsilon=0.00001, name='bn3')(x)
    x = Activation('relu')(x)
    x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75),
               name='lrn_2')(x)  #x is equal added
    x = ZeroPadding2D(padding=(1, 1))(x)
    x = MaxPooling2D(pool_size=3, strides=2)(x)

    # Inception3a
    inception_3a_3x3 = Conv2D(96, (1, 1), name='inception_3a_3x3_conv1')(x)
    inception_3a_3x3 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3a_3x3_bn1')(inception_3a_3x3)
    inception_3a_3x3 = Activation('relu')(inception_3a_3x3)
    inception_3a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3a_3x3)
    inception_3a_3x3 = Conv2D(128, (3, 3),
                              name='inception_3a_3x3_conv2')(inception_3a_3x3)
    inception_3a_3x3 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3a_3x3_bn2')(inception_3a_3x3)
    inception_3a_3x3 = Activation('relu')(inception_3a_3x3)

    inception_3a_5x5 = Conv2D(16, (1, 1), name='inception_3a_5x5_conv1')(x)
    inception_3a_5x5 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3a_5x5_bn1')(inception_3a_5x5)
    inception_3a_5x5 = Activation('relu')(inception_3a_5x5)
    inception_3a_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3a_5x5)
    inception_3a_5x5 = Conv2D(32, (5, 5),
                              name='inception_3a_5x5_conv2')(inception_3a_5x5)
    inception_3a_5x5 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3a_5x5_bn2')(inception_3a_5x5)
    inception_3a_5x5 = Activation('relu')(inception_3a_5x5)

    inception_3a_pool = MaxPooling2D(pool_size=3, strides=2)(x)
    inception_3a_pool = Conv2D(
        32, (1, 1), name='inception_3a_pool_conv')(inception_3a_pool)
    inception_3a_pool = BatchNormalization(
        axis=3, epsilon=0.00001,
        name='inception_3a_pool_bn')(inception_3a_pool)
    inception_3a_pool = Activation('relu')(inception_3a_pool)
    inception_3a_pool = ZeroPadding2D(padding=((3, 4), (3,
                                                        4)))(inception_3a_pool)

    inception_3a_1x1 = Conv2D(64, (1, 1), name='inception_3a_1x1_conv')(x)
    inception_3a_1x1 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3a_1x1_bn')(inception_3a_1x1)
    inception_3a_1x1 = Activation('relu')(inception_3a_1x1)

    inception_3a = concatenate([
        inception_3a_3x3, inception_3a_5x5, inception_3a_pool, inception_3a_1x1
    ],
                               axis=3)

    # Inception3b
    inception_3b_3x3 = Conv2D(96, (1, 1),
                              name='inception_3b_3x3_conv1')(inception_3a)
    inception_3b_3x3 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3b_3x3_bn1')(inception_3b_3x3)
    inception_3b_3x3 = Activation('relu')(inception_3b_3x3)
    inception_3b_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3b_3x3)
    inception_3b_3x3 = Conv2D(128, (3, 3),
                              name='inception_3b_3x3_conv2')(inception_3b_3x3)
    inception_3b_3x3 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3b_3x3_bn2')(inception_3b_3x3)
    inception_3b_3x3 = Activation('relu')(inception_3b_3x3)

    inception_3b_5x5 = Conv2D(32, (1, 1),
                              name='inception_3b_5x5_conv1')(inception_3a)
    inception_3b_5x5 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3b_5x5_bn1')(inception_3b_5x5)
    inception_3b_5x5 = Activation('relu')(inception_3b_5x5)
    inception_3b_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3b_5x5)
    inception_3b_5x5 = Conv2D(64, (5, 5),
                              name='inception_3b_5x5_conv2')(inception_3b_5x5)
    inception_3b_5x5 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3b_5x5_bn2')(inception_3b_5x5)
    inception_3b_5x5 = Activation('relu')(inception_3b_5x5)

    inception_3b_pool = Lambda(lambda x: x**2, name='power2_3b')(inception_3a)
    inception_3b_pool = AveragePooling2D(pool_size=(3, 3),
                                         strides=(3, 3))(inception_3b_pool)
    inception_3b_pool = Lambda(lambda x: x * 9,
                               name='mult9_3b')(inception_3b_pool)
    inception_3b_pool = Lambda(lambda x: K.sqrt(x),
                               name='sqrt_3b')(inception_3b_pool)
    inception_3b_pool = Conv2D(
        64, (1, 1), name='inception_3b_pool_conv')(inception_3b_pool)
    inception_3b_pool = BatchNormalization(
        axis=3, epsilon=0.00001,
        name='inception_3b_pool_bn')(inception_3b_pool)
    inception_3b_pool = Activation('relu')(inception_3b_pool)
    inception_3b_pool = ZeroPadding2D(padding=(4, 4))(inception_3b_pool)

    inception_3b_1x1 = Conv2D(64, (1, 1),
                              name='inception_3b_1x1_conv')(inception_3a)
    inception_3b_1x1 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3b_1x1_bn')(inception_3b_1x1)
    inception_3b_1x1 = Activation('relu')(inception_3b_1x1)

    inception_3b = concatenate([
        inception_3b_3x3, inception_3b_5x5, inception_3b_pool, inception_3b_1x1
    ],
                               axis=3)

    # Inception3c
    inception_3c_3x3 = Conv2D(128, (1, 1),
                              strides=(1, 1),
                              name='inception_3c_3x3_conv1')(inception_3b)
    inception_3c_3x3 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3c_3x3_bn1')(inception_3c_3x3)
    inception_3c_3x3 = Activation('relu')(inception_3c_3x3)
    inception_3c_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3c_3x3)
    inception_3c_3x3 = Conv2D(256, (3, 3),
                              strides=(2, 2),
                              name='inception_3c_3x3_conv' +
                              '2')(inception_3c_3x3)
    inception_3c_3x3 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_3c_3x3_bn' +
                                          '2')(inception_3c_3x3)
    inception_3c_3x3 = Activation('relu')(inception_3c_3x3)

    inception_3c_5x5 = Conv2D(32, (1, 1),
                              strides=(1, 1),
                              name='inception_3c_5x5_conv1')(inception_3b)
    inception_3c_5x5 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3c_5x5_bn1')(inception_3c_5x5)
    inception_3c_5x5 = Activation('relu')(inception_3c_5x5)
    inception_3c_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3c_5x5)
    inception_3c_5x5 = Conv2D(64, (5, 5),
                              strides=(2, 2),
                              name='inception_3c_5x5_conv' +
                              '2')(inception_3c_5x5)
    inception_3c_5x5 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_3c_5x5_bn' +
                                          '2')(inception_3c_5x5)
    inception_3c_5x5 = Activation('relu')(inception_3c_5x5)

    inception_3c_pool = MaxPooling2D(pool_size=3, strides=2)(inception_3b)
    inception_3c_pool = ZeroPadding2D(padding=((0, 1), (0,
                                                        1)))(inception_3c_pool)

    inception_3c = concatenate(
        [inception_3c_3x3, inception_3c_5x5, inception_3c_pool], axis=3)

    #inception 4a
    inception_4a_3x3 = Conv2D(96, (1, 1),
                              strides=(1, 1),
                              name='inception_4a_3x3_conv' + '1')(inception_3c)
    inception_4a_3x3 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_4a_3x3_bn' +
                                          '1')(inception_4a_3x3)
    inception_4a_3x3 = Activation('relu')(inception_4a_3x3)
    inception_4a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_4a_3x3)
    inception_4a_3x3 = Conv2D(192, (3, 3),
                              strides=(1, 1),
                              name='inception_4a_3x3_conv' +
                              '2')(inception_4a_3x3)
    inception_4a_3x3 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_4a_3x3_bn' +
                                          '2')(inception_4a_3x3)
    inception_4a_3x3 = Activation('relu')(inception_4a_3x3)

    inception_4a_5x5 = Conv2D(32, (1, 1),
                              strides=(1, 1),
                              name='inception_4a_5x5_conv1')(inception_3c)
    inception_4a_5x5 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_4a_5x5_bn1')(inception_4a_5x5)
    inception_4a_5x5 = Activation('relu')(inception_4a_5x5)
    inception_4a_5x5 = ZeroPadding2D(padding=(2, 2))(inception_4a_5x5)
    inception_4a_5x5 = Conv2D(64, (5, 5),
                              strides=(1, 1),
                              name='inception_4a_5x5_conv' +
                              '2')(inception_4a_5x5)
    inception_4a_5x5 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_4a_5x5_bn' +
                                          '2')(inception_4a_5x5)
    inception_4a_5x5 = Activation('relu')(inception_4a_5x5)

    inception_4a_pool = Lambda(lambda x: x**2, name='power2_4a')(inception_3c)
    inception_4a_pool = AveragePooling2D(pool_size=(3, 3),
                                         strides=(3, 3))(inception_4a_pool)
    inception_4a_pool = Lambda(lambda x: x * 9,
                               name='mult9_4a')(inception_4a_pool)
    inception_4a_pool = Lambda(lambda x: K.sqrt(x),
                               name='sqrt_4a')(inception_4a_pool)

    inception_4a_pool = Conv2D(128, (1, 1),
                               strides=(1, 1),
                               name='inception_4a_pool_conv' +
                               '')(inception_4a_pool)
    inception_4a_pool = BatchNormalization(axis=3,
                                           epsilon=0.00001,
                                           name='inception_4a_pool_bn' +
                                           '')(inception_4a_pool)
    inception_4a_pool = Activation('relu')(inception_4a_pool)
    inception_4a_pool = ZeroPadding2D(padding=(2, 2))(inception_4a_pool)

    inception_4a_1x1 = Conv2D(256, (1, 1),
                              strides=(1, 1),
                              name='inception_4a_1x1_conv' + '')(inception_3c)
    inception_4a_1x1 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_4a_1x1_bn' +
                                          '')(inception_4a_1x1)
    inception_4a_1x1 = Activation('relu')(inception_4a_1x1)

    inception_4a = concatenate([
        inception_4a_3x3, inception_4a_5x5, inception_4a_pool, inception_4a_1x1
    ],
                               axis=3)

    #inception4e
    inception_4e_3x3 = Conv2D(160, (1, 1),
                              strides=(1, 1),
                              name='inception_4e_3x3_conv' + '1')(inception_4a)
    inception_4e_3x3 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_4e_3x3_bn' +
                                          '1')(inception_4e_3x3)
    inception_4e_3x3 = Activation('relu')(inception_4e_3x3)
    inception_4e_3x3 = ZeroPadding2D(padding=(1, 1))(inception_4e_3x3)
    inception_4e_3x3 = Conv2D(256, (3, 3),
                              strides=(2, 2),
                              name='inception_4e_3x3_conv' +
                              '2')(inception_4e_3x3)
    inception_4e_3x3 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_4e_3x3_bn' +
                                          '2')(inception_4e_3x3)
    inception_4e_3x3 = Activation('relu')(inception_4e_3x3)

    inception_4e_5x5 = Conv2D(64, (1, 1),
                              strides=(1, 1),
                              name='inception_4e_5x5_conv' + '1')(inception_4a)
    inception_4e_5x5 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_4e_5x5_bn' +
                                          '1')(inception_4e_5x5)
    inception_4e_5x5 = Activation('relu')(inception_4e_5x5)
    inception_4e_5x5 = ZeroPadding2D(padding=(2, 2))(inception_4e_5x5)
    inception_4e_5x5 = Conv2D(128, (5, 5),
                              strides=(2, 2),
                              name='inception_4e_5x5_conv' +
                              '2')(inception_4e_5x5)
    inception_4e_5x5 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_4e_5x5_bn' +
                                          '2')(inception_4e_5x5)
    inception_4e_5x5 = Activation('relu')(inception_4e_5x5)

    inception_4e_pool = MaxPooling2D(pool_size=3, strides=2)(inception_4a)
    inception_4e_pool = ZeroPadding2D(padding=((0, 1), (0,
                                                        1)))(inception_4e_pool)

    inception_4e = concatenate(
        [inception_4e_3x3, inception_4e_5x5, inception_4e_pool], axis=3)

    #inception5a
    inception_5a_3x3 = Conv2D(96, (1, 1),
                              strides=(1, 1),
                              name='inception_5a_3x3_conv' + '1')(inception_4e)
    inception_5a_3x3 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_5a_3x3_bn' +
                                          '1')(inception_5a_3x3)
    inception_5a_3x3 = Activation('relu')(inception_5a_3x3)
    inception_5a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_5a_3x3)
    inception_5a_3x3 = Conv2D(384, (3, 3),
                              strides=(1, 1),
                              name='inception_5a_3x3_conv' +
                              '2')(inception_5a_3x3)
    inception_5a_3x3 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_5a_3x3_bn' +
                                          '2')(inception_5a_3x3)
    inception_5a_3x3 = Activation('relu')(inception_5a_3x3)

    inception_5a_pool = Lambda(lambda x: x**2, name='power2_5a')(inception_4e)
    inception_5a_pool = AveragePooling2D(pool_size=(3, 3),
                                         strides=(3, 3))(inception_5a_pool)
    inception_5a_pool = Lambda(lambda x: x * 9,
                               name='mult9_5a')(inception_5a_pool)
    inception_5a_pool = Lambda(lambda x: K.sqrt(x),
                               name='sqrt_5a')(inception_5a_pool)

    inception_5a_pool = Conv2D(96, (1, 1),
                               strides=(1, 1),
                               name='inception_5a_pool_conv' +
                               '')(inception_5a_pool)
    inception_5a_pool = BatchNormalization(axis=3,
                                           epsilon=0.00001,
                                           name='inception_5a_pool_bn' +
                                           '')(inception_5a_pool)
    inception_5a_pool = Activation('relu')(inception_5a_pool)
    inception_5a_pool = ZeroPadding2D(padding=(1, 1))(inception_5a_pool)

    inception_5a_1x1 = Conv2D(256, (1, 1),
                              strides=(1, 1),
                              name='inception_5a_1x1_conv' + '')(inception_4e)
    inception_5a_1x1 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_5a_1x1_bn' +
                                          '')(inception_5a_1x1)
    inception_5a_1x1 = Activation('relu')(inception_5a_1x1)

    inception_5a = concatenate(
        [inception_5a_3x3, inception_5a_pool, inception_5a_1x1], axis=3)

    #inception_5b
    inception_5b_3x3 = Conv2D(96, (1, 1),
                              strides=(1, 1),
                              name='inception_5b_3x3_conv' + '1')(inception_5a)
    inception_5b_3x3 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_5b_3x3_bn' +
                                          '1')(inception_5b_3x3)
    inception_5b_3x3 = Activation('relu')(inception_5b_3x3)
    inception_5b_3x3 = ZeroPadding2D(padding=(1, 1))(inception_5b_3x3)
    inception_5b_3x3 = Conv2D(384, (3, 3),
                              strides=(1, 1),
                              name='inception_5b_3x3_conv' +
                              '2')(inception_5b_3x3)
    inception_5b_3x3 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_5b_3x3_bn' +
                                          '2')(inception_5b_3x3)
    inception_5b_3x3 = Activation('relu')(inception_5b_3x3)

    inception_5b_pool = MaxPooling2D(pool_size=3, strides=2)(inception_5a)

    inception_5b_pool = Conv2D(96, (1, 1),
                               strides=(1, 1),
                               name='inception_5b_pool_conv' +
                               '')(inception_5b_pool)
    inception_5b_pool = BatchNormalization(axis=3,
                                           epsilon=0.00001,
                                           name='inception_5b_pool_bn' +
                                           '')(inception_5b_pool)
    inception_5b_pool = Activation('relu')(inception_5b_pool)

    inception_5b_pool = ZeroPadding2D(padding=(1, 1))(inception_5b_pool)

    inception_5b_1x1 = Conv2D(256, (1, 1),
                              strides=(1, 1),
                              name='inception_5b_1x1_conv' + '')(inception_5a)
    inception_5b_1x1 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_5b_1x1_bn' +
                                          '')(inception_5b_1x1)
    inception_5b_1x1 = Activation('relu')(inception_5b_1x1)

    inception_5b = concatenate(
        [inception_5b_3x3, inception_5b_pool, inception_5b_1x1], axis=3)

    av_pool = AveragePooling2D(pool_size=(3, 3), strides=(1, 1))(inception_5b)
    reshape_layer = Flatten()(av_pool)
    dense_layer = Dense(128, name='dense_layer')(reshape_layer)
    norm_layer = Lambda(lambda x: K.l2_normalize(x, axis=1),
                        name='norm_layer')(dense_layer)

    # Final Model
    model = Model(inputs=[myInput], outputs=norm_layer)

    #-----------------------------------

    # home = str(Path.home())
    home = 'D:/newlandface'
    if os.path.isfile(home + '/weights/openface_weights.h5') != True:
        print("openface_weights.h5 will be downloaded...")

        output = home + '/weights/openface_weights.h5'
        gdown.download(url, output, quiet=False)

    #-----------------------------------

    model.load_weights(home + '/weights/openface_weights.h5')

    #-----------------------------------

    return model
Beispiel #25
0
def __create_dense_net(nb_classes,
                       img_input,
                       include_top,
                       depth=40,
                       nb_dense_block=3,
                       growth_rate=12,
                       nb_filter=-1,
                       nb_layers_per_block=-1,
                       bottleneck=False,
                       reduction=0.0,
                       dropout_rate=None,
                       weight_decay=1e-4,
                       subsample_initial_block=False,
                       activation='sigmoid'):

    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    if reduction != 0.0:
        assert reduction <= 1.0 and reduction > 0.0, 'reduction value must lie between 0.0 and 1.0'

    # layers in each dense block
    if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple:
        nb_layers = list(nb_layers_per_block)  # Convert tuple to list

        assert len(nb_layers) == (nb_dense_block), 'If list, nb_layer is used as provided. ' \
                                                   'Note that list size must be (nb_dense_block)'
        final_nb_layer = nb_layers[-1]
        nb_layers = nb_layers[:-1]
    else:
        if nb_layers_per_block == -1:
            assert (
                depth - 4
            ) % 3 == 0, 'Depth must be 3 N + 4 if nb_layers_per_block == -1'
            count = int((depth - 4) / 3)

            if bottleneck:
                count = count // 2

            nb_layers = [count for _ in range(nb_dense_block)]
            final_nb_layer = count
        else:
            final_nb_layer = nb_layers_per_block
            nb_layers = [nb_layers_per_block] * nb_dense_block

    # compute initial nb_filter if -1, else accept users initial nb_filter
    if nb_filter <= 0:
        nb_filter = 2 * growth_rate

# compute compression factor
    compression = 1.0 - reduction

    # Initial convolution
    if subsample_initial_block:
        initial_kernel = (7, 7)
        initial_strides = (2, 2)
    else:
        initial_kernel = (3, 3)
        initial_strides = (1, 1)

    x = Conv2D(nb_filter,
               initial_kernel,
               kernel_initializer='he_normal',
               padding='same',
               strides=initial_strides,
               use_bias=False,
               kernel_regularizer=l2(weight_decay))(img_input)

    if subsample_initial_block:
        x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
        x = Activation('relu')(x)
        x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    # Add dense blocks
    for block_idx in range(nb_dense_block - 1):
        x, nb_filter = __dense_block(x,
                                     nb_layers[block_idx],
                                     nb_filter,
                                     growth_rate,
                                     bottleneck=bottleneck,
                                     dropout_rate=dropout_rate,
                                     weight_decay=weight_decay)
        # add transition_block
        x = __transition_block(x,
                               nb_filter,
                               compression=compression,
                               weight_decay=weight_decay)
        nb_filter = int(nb_filter * compression)

    # The last dense_block does not have a transition_block
    x, nb_filter = __dense_block(x,
                                 final_nb_layer,
                                 nb_filter,
                                 growth_rate,
                                 bottleneck=bottleneck,
                                 dropout_rate=dropout_rate,
                                 weight_decay=weight_decay)

    x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
    x = Activation('relu')(x)
    x = GlobalAveragePooling2D()(x)

    if include_top:
        x = Dense(nb_classes, activation=activation)(x)

    return x
def end_to_end_nvidia(dropout = []):

    """This model attempts to mimic the model by NVIDIA in their paper End to End Learning for Self-Driving
    Cars:

    https://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf

    Parameters:
        dropout - list of dropout values for the 3 fully connected layers
        
    Returns:
        A model based on the End to End NVIDIA model"""
        
    # ensure the dropout list has enough values in it for the model
    # augment the values of some are missing
    if dropout == None or len(dropout) == 0:
        dropout = [0.0, 0.0, 0.0]
    elif len(dropout) == 1:
        dropout = dropout * 3
    elif len(dropout) == 2:
        dropout.append(dropout[1])

    # this hack gets the current function name and sets it to the name of the model
    model = Sequential(name=traceback.extract_stack(None, 2)[-1][2])

    # crop top 56 rows and bottom 24 rows from the images
    model.add(Cropping2D(cropping=((56, 24), (0, 0)), input_shape=(160, 320, 3), name='pp_crop'))

    # mean center the pixels
    model.add(Lambda(lambda x: (x / 255.0) - 0.5, name='pp_center'))

    # layer 1: convolution. Input 40x160x3. Output 36x156x24
    model.add(Convolution2D(24, 5, 5, border_mode='valid', name='conv1'))
    model.add(MaxPooling2D((2, 2), border_mode='valid', name='pool1'))
    model.add(Activation('relu', name='act1'))

    # layer 2: convolution + max pooling. Input 36x156x24. Output 16x76x36
    model.add(Convolution2D(36, 5, 5, border_mode='valid', name='conv2'))
    model.add(MaxPooling2D((2, 2), border_mode='valid', name='pool2'))
    model.add(Activation('relu', name='act2'))

    # layer 3: convolution + max pooling. Input 16x76x36. Output 6x36x48
    model.add(Convolution2D(48, 5, 5, border_mode='valid', name='conv3'))
    model.add(MaxPooling2D((2, 2), border_mode='valid', name='pool3'))
    model.add(Activation('relu', name='act3'))

    # layer 4: convolution. Input 6x36x48. Output 4x34x64
    model.add(Convolution2D(64, 3, 3, border_mode='valid', name='conv4'))
    model.add(Activation('relu', name='act4'))

    # layer 5: convolution. Input 4x34x64. Output 1x16x64
    model.add(Convolution2D(64, 3, 3, border_mode='valid', name='conv5'))
    model.add(MaxPooling2D((2, 2), border_mode='valid', name='pool5'))
    model.add(Activation('relu', name='act5'))

    # flatten: Input 1x16x64. Output 1024 
    model.add(Flatten(name='flat'))

    # layer 6: fully connected + dropout. Input 1024. Output 100
    model.add(Dense(100, name='fc6'))
    model.add(Dropout(dropout[0], name='drop6'))
    model.add(Activation('relu', name='act6'))

    # layer 7: fully connected + dropout. Input 100. Output 50
    model.add(Dense(50, name='fc7'))
    model.add(Dropout(dropout[1], name='drop7'))
    model.add(Activation('relu', name='act7'))

    # layer 8: fully connected + dropout. Input 50. Output 10
    model.add(Dense(10, name='fc8'))
    model.add(Dropout(dropout[2], name='drop8'))
    model.add(Activation('relu', name='act8'))

    # layer 9: fully connected. Input 10. Output 1.
    model.add(Dense(1, name='out'))

    return model