def keras_model(
):  #X_train, y_train, model_save_name, epochs=5, learning_rate=0.001):

    from keras.models import Sequential
    from keras.layers import Flatten, Dense, Lambda, Dropout, Cropping2D
    from keras.layers.convolutional import Convolution2D
    from keras.layers.core import Activation
    from keras.layers.advanced_activations import ELU

    model = Sequential()

    # Nvidia model architecture.
    model.add(Lambda(lambda x: x / 255 - 0.5, input_shape=(64, 64, 3)))
    model.add(Cropping2D(cropping=((10, 0), (0, 0))))
    model.add(
        Convolution2D(24,
                      5,
                      5,
                      activation='relu',
                      subsample=(2, 2),
                      border_mode='same'))
    model.add(
        Convolution2D(36,
                      5,
                      5,
                      activation='relu',
                      subsample=(2, 2),
                      border_mode='same'))
    model.add(
        Convolution2D(48,
                      5,
                      5,
                      activation='relu',
                      subsample=(2, 2),
                      border_mode='same'))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(Flatten())
    model.add(Dense(1164))
    model.add(ELU())
    model.add(Dense(100))
    model.add(ELU())
    model.add(Dense(50))
    model.add(ELU())
    model.add(Dense(10))
    model.add(ELU())
    model.add(Dense(1))
    '''

    model.add(Convolution2D(12,3,3))
    model.add(Activation('relu'))
    model.add(Convolution2D(24,3,3))
    model.add(Activation('relu'))
    model.add(Dropout(0.8))
    model.add(Flatten())
    model.add(Dense(100))
    model.add(Activation('relu'))
    model.add(Dropout(0.8))
    model.add(Dense(50))
    model.add(Activation('relu'))
    model.add(Dense(10))
    model.add(Activation('relu'))
    model.add(Dense(1))
    '''
    return model
Ejemplo n.º 2
0
    augmented_images.append(image)
    augmented_measurements.append(measurement)
    augmented_images.append(cv2.flip(image,1))
    augmented_measurements.append(measurement*-1.0)
    
# cv2.imwrite('1.jpg', cv2.cvtColor(augmented_images[0], cv2.COLOR_RGB2BGR))

X_train = np.array(augmented_images)
y_train = np.array(augmented_measurements)

from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Conv2D, MaxPool2D, Cropping2D, Dropout

model = Sequential()
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160,320,3)))
model.add(Cropping2D(cropping=((70,25), (0,0))))
model.add(Conv2D(24, (5,5), strides=(2,2), activation='relu'))
model.add(Conv2D(36, (5,5), strides=(2,2), activation='relu'))
model.add(Conv2D(48, (5,5), strides=(2,2), activation='relu'))
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(Flatten())
model.add(Dense(100))
model.add(Dropout(0.5))
model.add(Dense(50))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Dropout(0.5))
model.add(Dense(1))

model.compile(loss='mse', optimizer='adam')
            X_train = np.array(images)
            y_train = np.array(angles)
            yield shuffle(X_train, y_train)


batch_size = 32
train_generator = generator(train_samples,
                            train_steerings,
                            batch_size=batch_size)
validation_generator = generator(validation_samples,
                                 valid_steerings,
                                 batch_size=batch_size * 2)

inputs = Input(shape=(160, 320, 3))

x = Cropping2D(cropping=((60, 24), (0, 0)))(inputs)
# x = Lambda(lambda img: ktf.image.resize_images(img, (38, 160)))(x)
x = BatchNormalization()(x)
x = MaxPooling2D(pool_size=(2, 2))(x)

x = Conv2D(6, (5, 5), padding='valid', activation='relu')(x)
x = MaxPooling2D()(x)
x = BatchNormalization()(x)

x = Conv2D(24, (5, 5), padding='valid', activation='relu')(x)
x = MaxPooling2D()(x)
x = BatchNormalization()(x)

x = Flatten()(x)

x = Dense(units=200)(x)
Ejemplo n.º 4
0
print("The number of samples in the sample set: " + str(len(samples)))

# split the sample data into training and validation sets
train_samples, validation_samples = split_train_validation_samples(samples, .2)

# setup the generator functions
train_generator = generator(train_samples, batch_size=BATCH_SIZE)
validation_generator = generator(validation_samples, batch_size=BATCH_SIZE)

# define the input shape
input_shape = (160, 320, 3)

# define the model
model = Sequential()
model.add(Cropping2D(cropping=((60,25), (0,0)), input_shape=input_shape, name='cropping_layer'))
model.add(Lambda(lambda x: x / 255.0 - 0.5, name='normalization_layer'))
model.add(Convolution2D(24,5,5, subsample=(2,2), activation="relu", name='conv_layer1'))
model.add(Dropout(0.2))
model.add(Convolution2D(36,5,5, subsample=(2,2), activation="relu", name='conv_layer2'))
model.add(Dropout(0.2))
model.add(Convolution2D(48,5,5, subsample=(2,2), activation="relu", name='conv_layer3'))
model.add(Dropout(0.2))
model.add(Convolution2D(64,3,3, activation="relu", name='conv_layer4'))
model.add(Dropout(0.2))
model.add(Convolution2D(64,3,3, activation="relu", name='conv_layer5'))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(100, name='fullyconnected_layer1'))
model.add(Dropout(0.2))
model.add(Dense(50, name='fullyconnected_layer2'))
Ejemplo n.º 5
0
    def architecture(self):
        config = get_config()
        config = config["train"]["optimizers"]

        channels, height, weight = 3, 500, 500

        # Input
        input_shape = (height, weight, 3)
        img_input = Input(shape=self.input_shape)
        #img_input = Cropping2D((3,3))(img_input)

        # Add plenty of zero padding
        x = ZeroPadding2D(padding=(218, 218))(img_input)

        # VGG-16 convolution block 1
        x = Conv2D(64, (3, 3), activation='relu', padding='valid', name='conv1_1')(x)
        x = Conv2D(64, (3, 3), activation='relu', padding='same', name='conv1_2')(x)
        x = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(x)

        # VGG-16 convolution block 2
        x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2_1')(x)
        x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2_2')(x)
        x = MaxPooling2D((2, 2), strides=(2, 2), name='pool2', padding='same')(x)

        # VGG-16 convolution block output3
        x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_1')(x)
        x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_2')(x)
        x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_3')(x)
        x = MaxPooling2D((2, 2), strides=(2, 2), name='pool3', padding='same')(x)
        pool3 = x

        # VGG-16 convolution block 4
        x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_1')(x)
        x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_2')(x)
        x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_3')(x)
        x = MaxPooling2D((2, 2), strides=(2, 2), name='pool4', padding='same')(x)
        pool4 = x

        # VGG-16 convolution block 5
        x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_1')(x)
        x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_2')(x)
        x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_3')(x)
        x = MaxPooling2D((2, 2), strides=(2, 2), name='pool5', padding='same')(x)

        # Fully-connected layers converted to convolution layers
        x = Conv2D(128, (7, 7), activation='relu', padding='valid', name='fc6')(x)
        x = Dropout(0.5)(x)
        x = Conv2D(128, (1, 1), activation='relu', padding='valid', name='fc7')(x)
        x = Dropout(0.5)(x)
        x = Conv2D(21, (1, 1), padding='valid', name='score-fr')(x)

        # Deconvolution
        score2 = Conv2DTranspose(1, (4, 4), strides=2, name='score2')(x)

        # Skip connections from pool4
        score_pool4 = Conv2D(1, (1, 1), name='score-pool4')(pool4)
        score_pool4c = Cropping2D((5, 5))(score_pool4)
        score_fused = Add()([score2, score_pool4c])
        score4 = Conv2DTranspose(1, (4, 4), strides=2, name='score4', use_bias=False)(score_fused)

        # Skip connections from pool3
        score_pool3 = Conv2D(1, (1, 1), name='score-pool3')(pool3)
        score_pool3c = Cropping2D((9, 9))(score_pool3)
        score_pool3c = ZeroPadding2D(padding=((1,0), (1,0)))(score_pool3c)


        # Fuse things together
        score_final = Add()([score4, score_pool3c])

        # Final up-sampling and cropping
        upsample = Conv2DTranspose(1, (4, 4), strides=4, name='upsample', use_bias=False)(score_final)
        upscore = Cropping2D(((56, 56), (56, 56)))(upsample)
        upscore = Cropping2D(((4, 4), (4, 4)))(upscore)

        output = CrfRnnLayer(image_dims=(64, 64),
                            num_classes=1,
                            theta_alpha= config["crf_theta_alpha"], #3
                            theta_beta= config["crf_theta_beta"], #3
                            theta_gamma= config["crf_theta_gamma"], #3
                            num_iterations= config["crf_num_iterations"],
                            name='crfrnn')([upscore, img_input])


        classi = Add()([upscore, output])
        k = Flatten()(classi)

        k = Dense(128, activation='relu')(k)
        k = Dropout(.5)(k)
        k = Dense(256, activation='relu')(k)
        predictions = Dense(1, activation='sigmoid')(k)

        # Build the model
        model = Model(img_input, predictions, name='CRFVGG')

        return model
Ejemplo n.º 6
0
def get_do_unet(img_shape=None, weights_file=None, custom_load_func=False):

    dim_ordering = 'tf'
    inputs = Input(shape=img_shape)
    concat_axis = -1
    ### the size of convolutional kernels is defined here
    conv1 = Convolution2D(64,
                          5,
                          5,
                          border_mode='same',
                          dim_ordering=dim_ordering,
                          name='conv1_1')(inputs)
    ac = Activation('relu')(conv1)
    do = Dropout(0.2)(ac)
    conv1 = Convolution2D(64,
                          5,
                          5,
                          border_mode='same',
                          dim_ordering=dim_ordering)(do)
    ac = Activation('relu')(conv1)
    do = Dropout(0.2)(ac)
    pool1 = MaxPooling2D(pool_size=(2, 2), dim_ordering=dim_ordering)(do)
    conv2 = Convolution2D(96,
                          3,
                          3,
                          border_mode='same',
                          dim_ordering=dim_ordering)(pool1)
    ac2 = Activation('relu')(conv2)
    do2 = Dropout(0.2)(ac2)
    conv2 = Convolution2D(96,
                          3,
                          3,
                          border_mode='same',
                          dim_ordering=dim_ordering)(do2)
    ac2 = Activation('relu')(conv2)
    do2 = Dropout(0.2)(ac2)
    pool2 = MaxPooling2D(pool_size=(2, 2), dim_ordering=dim_ordering)(do2)

    conv3 = Convolution2D(128,
                          3,
                          3,
                          border_mode='same',
                          dim_ordering=dim_ordering)(pool2)
    ac3 = Activation('relu')(conv3)
    do3 = Dropout(0.2)(ac3)
    conv3 = Convolution2D(128,
                          3,
                          3,
                          border_mode='same',
                          dim_ordering=dim_ordering)(do3)
    ac3 = Activation('relu')(conv3)
    do3 = Dropout(0.2)(ac3)
    pool3 = MaxPooling2D(pool_size=(2, 2), dim_ordering=dim_ordering)(do3)

    conv4 = Convolution2D(256,
                          3,
                          3,
                          border_mode='same',
                          dim_ordering=dim_ordering)(pool3)
    ac4 = Activation('relu')(conv4)
    do4 = Dropout(0.2)(ac4)
    conv4 = Convolution2D(256,
                          4,
                          4,
                          border_mode='same',
                          dim_ordering=dim_ordering)(do4)
    ac4 = Activation('relu')(conv4)
    do4 = Dropout(0.2)(ac4)
    pool4 = MaxPooling2D(pool_size=(2, 2), dim_ordering=dim_ordering)(do4)

    conv5 = Convolution2D(512,
                          3,
                          3,
                          border_mode='same',
                          dim_ordering=dim_ordering)(pool4)
    ac5 = Activation('relu')(conv5)
    do5 = Dropout(0.2)(ac5)
    conv5 = Convolution2D(512,
                          3,
                          3,
                          border_mode='same',
                          dim_ordering=dim_ordering)(do5)
    ac5 = Activation('relu')(conv5)
    do5 = Dropout(0.2)(ac5)

    up_conv5 = UpSampling2D(size=(2, 2), dim_ordering=dim_ordering)(do5)
    ch, cw = get_crop_shape(conv4, up_conv5)
    crop_conv4 = Cropping2D(cropping=(ch, cw),
                            dim_ordering=dim_ordering)(conv4)
    up6 = concatenate(
        [up_conv5, crop_conv4],
        axis=concat_axis)  #Amalie chaned it from merge to concatenate
    conv6 = Convolution2D(256,
                          3,
                          3,
                          border_mode='same',
                          dim_ordering=dim_ordering)(up6)
    ac6 = Activation('relu')(conv6)
    do6 = Dropout(0.2)(ac6)
    conv6 = Convolution2D(256,
                          3,
                          3,
                          border_mode='same',
                          dim_ordering=dim_ordering)(do6)
    ac6 = Activation('relu')(conv6)
    do6 = Dropout(0.2)(ac6)

    up_conv6 = UpSampling2D(size=(2, 2), dim_ordering=dim_ordering)(do6)
    ch, cw = get_crop_shape(conv3, up_conv6)
    crop_conv3 = Cropping2D(cropping=(ch, cw),
                            dim_ordering=dim_ordering)(conv3)
    up7 = concatenate([up_conv6, crop_conv3], axis=concat_axis)
    conv7 = Convolution2D(128,
                          3,
                          3,
                          border_mode='same',
                          dim_ordering=dim_ordering)(up7)
    ac7 = Activation('relu')(conv7)
    do7 = Dropout(0.2)(ac7)
    conv7 = Convolution2D(128,
                          3,
                          3,
                          border_mode='same',
                          dim_ordering=dim_ordering)(do7)
    ac7 = Activation('relu')(conv7)
    do7 = Dropout(0.2)(ac7)

    up_conv7 = UpSampling2D(size=(2, 2), dim_ordering=dim_ordering)(do7)
    ch, cw = get_crop_shape(conv2, up_conv7)
    crop_conv2 = Cropping2D(cropping=(ch, cw),
                            dim_ordering=dim_ordering)(conv2)
    up8 = concatenate([up_conv7, crop_conv2], axis=concat_axis)
    conv8 = Convolution2D(96,
                          3,
                          3,
                          border_mode='same',
                          dim_ordering=dim_ordering)(up8)
    ac8 = Activation('relu')(conv8)
    do8 = Dropout(0.2)(ac8)
    conv8 = Convolution2D(96,
                          3,
                          3,
                          border_mode='same',
                          dim_ordering=dim_ordering)(do8)
    ac8 = Activation('relu')(conv8)
    do8 = Dropout(0.2)(ac8)

    up_conv8 = UpSampling2D(size=(2, 2), dim_ordering=dim_ordering)(do8)
    ch, cw = get_crop_shape(conv1, up_conv8)
    crop_conv1 = Cropping2D(cropping=(ch, cw),
                            dim_ordering=dim_ordering)(conv1)
    up9 = concatenate([up_conv8, crop_conv1], axis=concat_axis)
    conv9 = Convolution2D(64,
                          3,
                          3,
                          border_mode='same',
                          dim_ordering=dim_ordering)(up9)
    ac9 = Activation('relu')(conv9)
    do9 = Dropout(0.2)(ac9)
    conv9 = Convolution2D(64,
                          3,
                          3,
                          border_mode='same',
                          dim_ordering=dim_ordering)(do9)
    ac9 = Activation('relu')(conv9)
    do9 = Dropout(0.2)(ac9)

    ch, cw = get_crop_shape(inputs, do9)
    conv9 = ZeroPadding2D(padding=(ch, cw), dim_ordering=dim_ordering)(conv9)
    conv10 = Convolution2D(1,
                           1,
                           1,
                           activation='sigmoid',
                           dim_ordering=dim_ordering)(conv9)
    model = Model(input=inputs, output=conv10)

    if not weights_file == None:
        j = 0
        i = 0
        oldlayers = []
        if custom_load_func:
            #pass # TODO
            img_shape_old = (rows_standard, cols_standard, 3)
            model_old = get_unet(img_shape_old,
                                 weights_file,
                                 custom_load_func=False)
            for layer in model.layers:

                if layer.name.startswith('conv'):

                    for layer_old in model_old.layers:
                        if layer_old.name.startswith('conv'):
                            oldlayers.append(layer_old)
                        i += 1
                    old_weight = oldlayers[j].get_weights()
                    layer.set_weights(old_weight)

                    j += 1
        else:
            model.load_weights(weights_file)

    model.compile(optimizer=Adam(lr=(1e-5)),
                  loss=dice_coef_loss,
                  metrics=[dice_coef_for_training])

    return model
Ejemplo n.º 7
0
def get_crfrnn_model_def():
    """ Returns Keras CRN-RNN model definition.

    Currently, only 500 x 500 images are supported. However, one can get this to
    work with different image sizes by adjusting the parameters of the Cropping2D layers
    below.
    """

    channels, height, weight = 1, 500, 500

    # Input
    input_shape = (height, weight, channels)
    img_input = Input(shape=input_shape)

    # Add plenty of zero padding
    x = ZeroPadding2D(padding=(100, 100))(img_input)

    # VGG-16 convolution block 1
    x = Conv2D(64, (3, 3), activation='relu', padding='valid',
               name='conv1_1')(x)
    x = Conv2D(64, (3, 3), activation='relu', padding='same',
               name='conv1_2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(x)

    # VGG-16 convolution block 2
    x = Conv2D(128, (3, 3), activation='relu', padding='same',
               name='conv2_1')(x)
    x = Conv2D(128, (3, 3), activation='relu', padding='same',
               name='conv2_2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool2', padding='same')(x)

    # VGG-16 convolution block 3
    x = Conv2D(256, (3, 3), activation='relu', padding='same',
               name='conv3_1')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same',
               name='conv3_2')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same',
               name='conv3_3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool3', padding='same')(x)
    pool3 = x

    # VGG-16 convolution block 4
    x = Conv2D(512, (3, 3), activation='relu', padding='same',
               name='conv4_1')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same',
               name='conv4_2')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same',
               name='conv4_3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool4', padding='same')(x)
    pool4 = x

    # VGG-16 convolution block 5
    x = Conv2D(512, (3, 3), activation='relu', padding='same',
               name='conv5_1')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same',
               name='conv5_2')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same',
               name='conv5_3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool5', padding='same')(x)

    # Fully-connected layers converted to convolution layers
    x = Conv2D(4096, (7, 7), activation='relu', padding='valid', name='fc6')(x)
    x = Dropout(0.5)(x)
    x = Conv2D(4096, (1, 1), activation='relu', padding='valid', name='fc7')(x)
    x = Dropout(0.5)(x)
    x = Conv2D(2, (1, 1), padding='valid', name='score-fr')(x)

    # Deconvolution
    score2 = Conv2DTranspose(2, (4, 4), strides=2, name='score2')(x)

    # Skip connections from pool4
    score_pool4 = Conv2D(2, (1, 1), name='score-pool4')(pool4)
    score_pool4c = Cropping2D((5, 5))(score_pool4)
    score_fused = Add()([score2, score_pool4c])
    score4 = Conv2DTranspose(2, (4, 4),
                             strides=2,
                             name='score4',
                             use_bias=False)(score_fused)

    # Skip connections from pool3
    score_pool3 = Conv2D(2, (1, 1), name='score-pool3')(pool3)
    score_pool3c = Cropping2D((9, 9))(score_pool3)

    # Fuse things together
    score_final = Add()([score4, score_pool3c])

    # Final up-sampling and cropping
    upsample = Conv2DTranspose(2, (16, 16),
                               strides=8,
                               name='upsample',
                               use_bias=False)(score_final)
    upscore = Cropping2D(((31, 37), (31, 37)))(upsample)

    output = CrfRnnLayer(image_dims=(height, weight),
                         num_classes=2,
                         theta_alpha=160.,
                         theta_beta=3.,
                         theta_gamma=3.,
                         num_iterations=10,
                         name='crfrnn')([upscore, img_input])

    # Build the model
    model = Model(img_input, output, name='crfrnn_net')
    #model = Model(img_input, upscore, name='crfrnn_net')

    return model
Ejemplo n.º 8
0
def NVIDIA_CNN(learning_rate):
    # variables for le-net
    filter_1_num = 24
    filter_2_num = 36
    filter_3_num = 48
    filter_4_num = 64
    filter_5_num = 64
    fc_1_num = 100
    fc_2_num = 50
    fc_3_num = 10
    # model
    model = Sequential()
    # cropping, drop off 70 from the top and 25 from the bottom
    model.add(
        Cropping2D(cropping=((70, 25), (0, 0)), input_shape=(160, 320, 3)))
    # normaliation
    model.add(Lambda(lambda x: x / 127.5 - 1.))
    # convoluatinal layers
    model.add(
        Conv2D(filter_1_num,
               kernel_size=(5, 5),
               strides=(2, 2),
               activation='relu',
               name='CONV1'))
    model.add(
        Conv2D(filter_2_num,
               kernel_size=(5, 5),
               strides=(2, 2),
               activation='relu',
               name='CONV2'))
    model.add(
        Conv2D(filter_3_num,
               kernel_size=(5, 5),
               strides=(2, 2),
               activation='relu',
               name='CONV3'))
    model.add(
        Conv2D(filter_4_num,
               kernel_size=(3, 3),
               strides=(1, 1),
               activation='relu',
               name='CONV4'))
    model.add(
        Conv2D(filter_5_num,
               kernel_size=(3, 3),
               strides=(1, 1),
               activation='relu',
               name='CONV5'))
    # flatten the outputs of convolutional layer
    model.add(Flatten())
    # full connected layers
    model.add(Dense(fc_1_num))
    model.add(Dense(fc_2_num))
    model.add(Dense(fc_3_num))
    # output layer
    model.add(Dense(1))
    model.compile(loss='mse', optimizer=Nadam(lr=learning_rate))

    model.summary()

    return model
Ejemplo n.º 9
0
def main():
    # load the images for processing

    print("LOADING IMAGES...")

    # base folder where training data is located
    base_dir = "./training_data/"

    # sub folders to use for training
    tracks_to_process = ["Track1_4", "Track1_5R"]  #, "Track2"]

    images = []
    measurements = []

    for track_number in tracks_to_process:

        csv_filename = base_dir + track_number + '/driving_log.csv'

        lines = read_lines_from_csv(csv_filename)

        print(len(lines))
        lines = trim_repeated_lines(lines, 3)
        print(len(lines))

        drawn = 0

        for line in lines:
            for idx in range(
                    0, 3
            ):  # loop through the center, left and right images on each line of the file
                source_path = line[idx]
                filename = source_path.split('\\')[-1]
                current_path = base_dir + track_number + '/IMG/' + filename

                image = cv2.imread(current_path)

                if not image is None:

                    #note drive.py reads image in RGB format, imread reads in RGB,
                    #so we will flip it here
                    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

                    if drawn == 0:
                        plt.imshow(image)
                        plt.show()

                    # add random jitter in y axis to some images
                    if np.random.random() > .9:
                        image = rand_jitter(image)

                    # Add random brightness change to some images
                    if np.random.random() > .6:
                        image = Augment_Brightness(image)

                    # Add random shadow effect to some images
                    if np.random.random() > .8:
                        image = Add_Shadow(image)

                    # plot first image so we can see all going ok.
                    if drawn == 0:
                        imgplot = plt.imshow(image)
                        plt.show()

                        drawn = 1

                    # now we are done editing the image convert to YUV as this is what we will train the model with
                    yuv = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)

                    # add the image to the list of images to use
                    images.append(yuv)

                    # read measurement for this picture
                    measurement = float(line[3])

                    steering_adjustment = 0.2  # constant we add / subtract for the left and right camera images
                    if idx == 1:  # left image
                        measurement += steering_adjustment
                    elif idx == 2:  # right image
                        measurement -= steering_adjustment

                    # add the measurement to the list for processing
                    measurements.append(measurement)

        # run through all images and generate a new image flipped in y,
        # add flipped steering commands for those too! We are trying to remove bias
        # for the mostly left turn track 1
        add_flipped_images(images, measurements)

    images, measurements = shuffle(images, measurements)

    X_train = np.array(images)
    y_train = np.array(measurements)

    print(X_train.shape)

    print("IMAGES ARE LOADED... STARTING TRAINING...")

    # Training parameters
    batch_size = 256
    epochs = 15
    learning_rate = 0.0005

    early_stop = 0  # not using

    # Build the model
    model = Sequential()

    # add cropping layer
    model.add(
        Cropping2D(cropping=((60, 20), (0, 0)), input_shape=(160, 320, 3)))

    # normalize the data centered on 0.5
    model.add(Lambda(lambda x: x / 255.0))

    # use PilotNet
    model_to_use = 3

    if model_to_use == 1:
        print("using basic model")
        model = basic_model(model)

    elif model_to_use == 2:
        print("using LeNet")
        model = LeNet_model(model)

    elif model_to_use == 3:
        print("using NVIDIA PilotNet")
        model = NVIDIA_PilotNet_model(model)

    # add the output layer on end (the steering angle)
    model.add(Dense(1))

    # setup learning rate with Adam Optimizer
    adam = Adam(lr=learning_rate)

    # configure early stopping so we are not waiting longer than needed and also reduces overfitting

    early_stopping = EarlyStopping(monitor='val_loss', patience=2)

    model.compile(loss='mse', optimizer=adam)

    if early_stop > 0:
        model.fit(X_train,
                  y_train,
                  validation_split=0.2,
                  shuffle=True,
                  batch_size=batch_size,
                  epochs=epochs,
                  callbacks=[early_stopping])
    else:
        model.fit(X_train,
                  y_train,
                  validation_split=0.2,
                  shuffle=True,
                  batch_size=batch_size,
                  epochs=epochs)

    model.save('model2.h5')  # save the model out

    print("Training complete!")
Ejemplo n.º 10
0
def get_model():
    input_shape = (160, 320, 3)

    model = Sequential()
    # Cropping layer
    model.add(Cropping2D(cropping=((50, 20), (0, 0)), input_shape=input_shape))
    # Normalizing layer
    model.add(Lambda(lambda x: x / 255 - 0.5))

    model.add(
        Convolution2D(24,
                      5,
                      5,
                      border_mode='valid',
                      subsample=(2, 2),
                      activation='relu'))

    model.add(
        Convolution2D(36,
                      5,
                      5,
                      border_mode='valid',
                      subsample=(2, 2),
                      activation='relu'))

    model.add(
        Convolution2D(48,
                      5,
                      5,
                      border_mode='valid',
                      subsample=(2, 2),
                      activation='relu'))

    model.add(
        Convolution2D(64,
                      3,
                      3,
                      border_mode='valid',
                      subsample=(2, 2),
                      activation='relu'))

    model.add(
        Convolution2D(64,
                      3,
                      3,
                      border_mode='valid',
                      subsample=(2, 2),
                      activation='relu'))

    model.add(Flatten())

    model.add(Dense(64))
    model.add(Dropout(0.5))

    model.add(Dense(32))
    model.add(Dropout(0.5))

    model.add(Dense(16))
    model.add(Dropout(0.5))

    model.add(Dense(8))

    model.add(Dense(1))

    model.compile(loss='mse', optimizer='adam')

    return model
Ejemplo n.º 11
0
def main():
    samples = []  # Initialize list
    with open('./data/driving_log.csv') as csvfile:
        reader = csv.reader(csvfile)
        for row in reader:
            steering_center = float(row[3])
            # create adjusted steering measurements for the side camera images
            correction = 0.3  # this is a parameter to tune
            # Left image steering correction
            # Right image steering correction
            steering_left = steering_center + correction
            steering_right = steering_center - correction
            # read paths from center, left and right cameras
            imgcenter = row[0]
            imgleft = row[1]
            imgright = row[2]
            # add images and angles to data set as tuples
            samples.append((imgcenter, steering_center))
            samples.append((imgleft, steering_left))
            samples.append((imgright, steering_right))

    # Split 20% -> 80%
    train_samples, validation_samples = train_test_split(samples,
                                                         test_size=0.2)

    # Compile and train the model using the generator function
    train_generator = generator(train_samples, batch_size=32)
    validation_generator = generator(validation_samples, batch_size=32)

    # image format
    row, col, ch = 160, 320, 3

    # sequential model
    model = Sequential()
    # Preprocess incoming data, centered around zero and between -0.5 and 0.5 . Output: 160,320,3
    model.add(
        Lambda(lambda x: (x / 255.0) - 0.5,
               input_shape=(row, col, ch),
               output_shape=(row, col, ch)))
    # Preprocess incoming data, cropping image. New dimensions : 3x65X320

    model.add(
        Cropping2D(cropping=((70, 25), (0, 0)), input_shape=(row, col, ch)))

    # print(input_shape)
    # Add 2D convolutional layer with 5x5 filter size and depth 24. Add activation of type: "RELU"
    model.add(
        Convolution2D(24,
                      5,
                      5,
                      subsample=(2, 2),
                      border_mode="valid",
                      activation="relu"))
    # model.add(MaxPooling2D(pool_size=(2, 2)))
    # Add 2D convolutional layer with 5x5 filter size and depth 36.
    #Add activation of type: "RELU"
    model.add(
        Convolution2D(36,
                      5,
                      5,
                      subsample=(2, 2),
                      border_mode="valid",
                      activation="relu"))
    #model.add(MaxPooling2D(pool_size=(2, 2)))
    # Add 2D convolutional layer with 5x5 filter size and depth 48.
    #Add activation of type: "RELU"
    model.add(
        Convolution2D(48,
                      5,
                      5,
                      subsample=(2, 2),
                      border_mode="valid",
                      activation="relu"))
    #model.add(MaxPooling2D(pool_size=(2, 2)))
    # Add 2D convolutional layer with 3x3 filter size and depth 64.
    #Add activation of type: "RELU"
    model.add(Convolution2D(64, 3, 3, border_mode="valid", activation="relu"))
    #model.add(MaxPooling2D(pool_size=(2, 2)))
    # Add 2D convolutional layer with 3x3 filter size and depth 64.
    #Add activation of type: "RELU"
    model.add(Convolution2D(64, 3, 3, border_mode="valid", activation="relu"))
    #model.add(MaxPooling2D(pool_size=(2, 2)))
    # Flatten layer
    model.add(Flatten())
    # Add fully connected layer of size 100
    model.add(Dense(100))
    # Add activation of type: "RELU"
    model.add(Activation('relu'))
    # Add dropout layer with probability 0.5
    model.add(Dropout(0.5))
    # Add fully connected layer of size 50
    model.add(Dense(50))
    # Add activation of type: "RELU"
    model.add(Activation('relu'))
    # Add dropout layer with probability 0.5
    model.add(Dropout(0.5))
    # Add fully connected layer of size 10
    model.add(Dense(10))
    # Add activation of type: "RELU"
    model.add(Activation('relu'))
    # Add FC layer of one node to display the output (regression)
    model.add(Dense(1))
    # Select optimizer, metric and loss
    model.compile(loss='mse', optimizer='adam')
    # Begin training and evaluation
    history_object = model.fit_generator(
        train_generator,
        samples_per_epoch=len(train_samples),
        validation_data=validation_generator,
        nb_val_samples=len(validation_samples),
        nb_epoch=5,
        verbose=1)

    ### print the keys contained in the history object
    print(history_object.history.keys())

    # Save the model
    model.save('model.h5')
Ejemplo n.º 12
0
            yield shuffle(X_train, y_train)


# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=32)
validation_generator = generator(validation_samples, batch_size=32)

## 2. Model (data preprocessing incorporated into model)

model = Sequential()

# Crop 70 pixels from the top of the image and 25 from the bottom
model.add(
    Cropping2D(
        cropping=((70, 25), (0, 0)),
        dim_ordering='tf',  # default
        input_shape=(160, 320, 3)))

# Normalise the data
model.add(Lambda(lambda x: (x / 255.0) - 0.5))

# Conv layer 1
model.add(Convolution2D(16, 8, 8, subsample=(4, 4), border_mode="same"))
model.add(ELU())

# Conv layer 2
model.add(Convolution2D(32, 5, 5, subsample=(2, 2), border_mode="same"))
model.add(ELU())

# Conv layer 3
model.add(Convolution2D(64, 5, 5, subsample=(2, 2), border_mode="same"))
Ejemplo n.º 13
0
# convert the image and angle data into numpy arrays
X_train = np.array(car_images)
y_train = np.array(steering_angles)

from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Cropping2D
from keras.layers import Convolution2D
from keras.layers.pooling import MaxPooling2D

# This is a model derived by Nvidia to produce appropriate steering angles given
# image data as input
model = Sequential()
model.add(Lambda(lambda x: x / 255.0 - 0.5,
                 input_shape=(160, 320, 3)))  # normalize data
model.add(Cropping2D(cropping=((70, 25), (
    0, 0))))  # get rid of useless image data that distracts from the road
model.add(Convolution2D(24, 5, 5, subsample=(2, 2), activation="relu"))
model.add(Convolution2D(36, 5, 5, subsample=(2, 2), activation="relu"))
model.add(Convolution2D(48, 5, 5, subsample=(2, 2), activation="relu"))
model.add(Convolution2D(64, 3, 3, activation="relu"))
model.add(Convolution2D(64, 3, 3, activation="relu"))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))

model.compile(loss='mse', optimizer='adam')
model.fit(X_train, y_train, validation_split=0.2, shuffle=True, epochs=2)

model.save('model.h5')
Ejemplo n.º 14
0
HP_DICT = {
    'epochs': -1,
    'stop_patience': -1,
    'batchsize': -1,
    'start_lr': -1.0,
    'optimizer': None,
    'static_cam_angle_adjust': -1.0,
    'validation_split': -1.0,
    'freeze_feature_layers': False
}
ARCH_LAYERS = [
    # Input shape: (160,320)
    # Normalize YUV
    Lambda(lambda x: x / 255 - 0.5),
    # Crop top 50 pixels, bottom 20 pixels
    Cropping2D(cropping=((50, 20), (0, 0))),
    # Input shape: (90, 320)
    Conv2D(24, 5, strides=(2, 2), padding="valid", activation='relu'),  # SAME
    Conv2D(36, 5, strides=(2, 2), padding="valid", activation='relu'),  # SAME
    Conv2D(48, 5, strides=(2, 2), padding="valid", activation='relu'),  # SAME
    Conv2D(64, 3, padding="valid", activation='relu'),
    Conv2D(64, 3, padding="valid", activation='relu'),
    Conv2D(64, 3, padding="valid", activation='relu'),
    Flatten(),
    Concatenate(),
    Dense(200, activation='relu'),
    Dense(64, activation='relu'),
    Dense(1, activation='tanh')
]

IMAGE_SHAPE = (160, 320, 3)
Ejemplo n.º 15
0
def InceptionResnetV2(X_input):
    # 299 X 299 X 3  ->   # 149 X 149 X 32
    X = Cropping2D(cropping=((11, 11), (11, 11)))(X_input)
    X = conv2d_bn(X, 32, 3, strides=2, padding='valid')
    # 149 X 149 X 32   ->  # 147 x 147 X 32
    X = conv2d_bn(X, 32, 3, padding='valid')
    # 147 x 147 X 32   ->    # 147 X 147 X 64
    X = conv2d_bn(X, 64, 3)
    # 147 X 147 X 64   ->    # 73 X 73 X 64
    X = MaxPooling2D(3, strides=2)(X)
    # 73 X 73 X 64    ->    # 73 X 73 X 80
    X = conv2d_bn(X, 80, 1, padding='valid')
    # 73 X 73 X 80    ->    # 71 X 71 X 192
    X = conv2d_bn(X, 192, 3, padding='valid')
    # 71 X 71 X 192  ->  # 35 X 35 X 192
    X = MaxPooling2D(3, strides=2)(X)

    # 35 X 35 X 192 -> 35 X 35 X 96
    branch_0 = conv2d_bn(X, 96, 1)

    # 35 X 35 X 192 -> 35 X 35 X 64
    branch_1 = conv2d_bn(X, 48, 1)
    branch_1 = conv2d_bn(branch_1, 64, 5)

    # 35 X 35 X 192 -> 35 X 35 X 96
    branch_2 = conv2d_bn(X, 64, 1)
    branch_2 = conv2d_bn(branch_2, 96, 3)
    branch_2 = conv2d_bn(branch_2, 96, 3)

    # 35 X 35 X 192 -> 35 X 35 X 64
    branch_pool = AveragePooling2D(3, strides=1, padding='same')(X)
    branch_pool = conv2d_bn(branch_pool, 64, 1)

    branches = [branch_0, branch_1, branch_2, branch_pool]
    X = Concatenate(axis=3, name='mixed_5b')(branches)  # 35 X 35 X 320

    # 10x block35
    for block_idx in range(1, 11):
        X = inception_resnet_block(X,
                                   scale=0.17,
                                   block_type='block35',
                                   block_idx=block_idx)

    # Mixed 6a (Reduction-A block): 17 x 17 x 1088
    branch_0 = conv2d_bn(X, 384, 3, strides=2, padding='valid')
    branch_1 = conv2d_bn(X, 256, 1)
    branch_1 = conv2d_bn(branch_1, 256, 3)
    branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='valid')
    branch_pool = MaxPooling2D(3, strides=2, padding='valid')(X)
    branches = [branch_0, branch_1, branch_pool]
    X = Concatenate(axis=3, name='mixed_6a')(branches)

    for block_idx in range(1, 21):
        X = inception_resnet_block(X,
                                   scale=0.1,
                                   block_type='block17',
                                   block_idx=block_idx)

    # Mixed 7a (Reduction-B block): 8 x 8 x 2080
    branch_0 = conv2d_bn(X, 256, 1)
    branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='valid')
    branch_1 = conv2d_bn(X, 256, 1)
    branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='valid')
    branch_2 = conv2d_bn(X, 256, 1)
    branch_2 = conv2d_bn(branch_2, 288, 3)
    branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='valid')
    branch_pool = MaxPooling2D(3, strides=2, padding='valid')(X)
    branches = [branch_0, branch_1, branch_2, branch_pool]
    X = Concatenate(axis=3, name='mixed_7a')(branches)

    for block_idx in range(1, 10):
        X = inception_resnet_block(X,
                                   scale=0.2,
                                   block_type='block8',
                                   block_idx=block_idx)

    X = inception_resnet_block(X,
                               scale=1.,
                               activation=None,
                               block_type='block8',
                               block_idx=10)

    # Final convolution block: 8 x 8 x 1536
    X = conv2d_bn(X, 1536, 1, name='conv_7b')

    X = AveragePooling2D(K.int_shape(X)[1:3], strides=1, padding='valid')(X)
    X = Flatten()(X)
    X = Dropout(0.8)(X)
    X = Dense(128,
              kernel_initializer=TruncatedNormal(stddev=0.1),
              kernel_regularizer=l2(0.0001),
              name='embeddings')(X)
    X = Lambda(lambda x: K.l2_normalize(x, axis=1))(X)
    return X
Ejemplo n.º 16
0
def get_unet(img_shape=None):

    dim_ordering = 'tf'

    inputs = Input(shape=img_shape)
    concat_axis = -1

    conv1 = Convolution2D(64,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          dim_ordering=dim_ordering,
                          name='conv1_1')(inputs)
    conv1 = Convolution2D(64,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          dim_ordering=dim_ordering)(conv1)
    conv1 = Convolution2D(64,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          dim_ordering=dim_ordering)(conv1)
    conv1 = Convolution2D(64,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          dim_ordering=dim_ordering)(conv1)
    conv1 = Convolution2D(64,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          dim_ordering=dim_ordering)(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2), dim_ordering=dim_ordering)(conv1)
    conv2 = Convolution2D(96,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          dim_ordering=dim_ordering)(pool1)
    conv2 = Convolution2D(96,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          dim_ordering=dim_ordering)(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2), dim_ordering=dim_ordering)(conv2)

    conv3 = Convolution2D(128,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          dim_ordering=dim_ordering)(pool2)
    conv3 = Convolution2D(128,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          dim_ordering=dim_ordering)(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2), dim_ordering=dim_ordering)(conv3)

    conv4 = Convolution2D(256,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          dim_ordering=dim_ordering)(pool3)
    conv4 = Convolution2D(256,
                          4,
                          4,
                          activation='relu',
                          border_mode='same',
                          dim_ordering=dim_ordering)(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2), dim_ordering=dim_ordering)(conv4)

    conv5 = Convolution2D(416,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          dim_ordering=dim_ordering)(pool4)
    conv5 = Convolution2D(416,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          dim_ordering=dim_ordering)(conv5)

    up_conv5 = UpSampling2D(size=(2, 2), dim_ordering=dim_ordering)(conv5)
    ch, cw = get_crop_shape(conv4, up_conv5)
    crop_conv4 = Cropping2D(cropping=(ch, cw),
                            dim_ordering=dim_ordering)(conv4)
    up6 = merge([up_conv5, crop_conv4], mode='concat', concat_axis=concat_axis)
    conv6 = Convolution2D(256,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          dim_ordering=dim_ordering)(up6)
    conv6 = Convolution2D(256,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          dim_ordering=dim_ordering)(conv6)

    up_conv6 = UpSampling2D(size=(2, 2), dim_ordering=dim_ordering)(conv6)
    ch, cw = get_crop_shape(conv3, up_conv6)
    crop_conv3 = Cropping2D(cropping=(ch, cw),
                            dim_ordering=dim_ordering)(conv3)
    up7 = merge([up_conv6, crop_conv3], mode='concat', concat_axis=concat_axis)
    conv7 = Convolution2D(128,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          dim_ordering=dim_ordering)(up7)
    conv7 = Convolution2D(128,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          dim_ordering=dim_ordering)(conv7)

    up_conv7 = UpSampling2D(size=(2, 2), dim_ordering=dim_ordering)(conv7)
    ch, cw = get_crop_shape(conv2, up_conv7)
    crop_conv2 = Cropping2D(cropping=(ch, cw),
                            dim_ordering=dim_ordering)(conv2)
    up8 = merge([up_conv7, crop_conv2], mode='concat', concat_axis=concat_axis)
    conv8 = Convolution2D(96,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          dim_ordering=dim_ordering)(up8)
    conv8 = Convolution2D(96,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          dim_ordering=dim_ordering)(conv8)

    up_conv8 = UpSampling2D(size=(2, 2), dim_ordering=dim_ordering)(conv8)
    ch, cw = get_crop_shape(conv1, up_conv8)
    crop_conv1 = Cropping2D(cropping=(ch, cw),
                            dim_ordering=dim_ordering)(conv1)
    up9 = merge([up_conv8, crop_conv1], mode='concat', concat_axis=concat_axis)
    conv9 = Convolution2D(64,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          dim_ordering=dim_ordering)(up9)
    conv9 = Convolution2D(64,
                          3,
                          3,
                          activation='relu',
                          border_mode='same',
                          dim_ordering=dim_ordering)(conv9)

    ch, cw = get_crop_shape(inputs, conv9)
    conv9 = ZeroPadding2D(padding=(ch, cw), dim_ordering=dim_ordering)(conv9)
    conv10 = Convolution2D(1,
                           1,
                           1,
                           activation='sigmoid',
                           dim_ordering=dim_ordering)(conv9)
    model = Model(input=inputs, output=conv10)
    model.compile(optimizer=Adam(lr=(1e-4) * 2),
                  loss=dice_coef_loss,
                  metrics=[dice_coef_for_training])

    return model
Ejemplo n.º 17
0
def get_test_model_full():
    """Returns a maximally complex test model,
    using all supported layer types with different parameter combination.
    """
    input_shapes = [
        (26, 28, 3),
        (4, 4, 3),
        (4, 4, 3),
        (4, ),
        (2, 3),
        (27, 29, 1),
        (17, 1),
        (17, 4),
        (2, 3),
    ]
    inputs = [Input(shape=s) for s in input_shapes]

    outputs = []

    for inp in inputs[6:8]:
        for padding in ['valid', 'same']:
            for s in range(1, 6):
                for out_channels in [1, 2]:
                    for d in range(1, 4):
                        outputs.append(
                            Conv1D(out_channels,
                                   s,
                                   padding=padding,
                                   dilation_rate=d)(inp))
        for padding_size in range(0, 5):
            outputs.append(ZeroPadding1D(padding_size)(inp))
        for crop_left in range(0, 2):
            for crop_right in range(0, 2):
                outputs.append(Cropping1D((crop_left, crop_right))(inp))
        for upsampling_factor in range(1, 5):
            outputs.append(UpSampling1D(upsampling_factor)(inp))
        for padding in ['valid', 'same']:
            for pool_factor in range(1, 6):
                for s in range(1, 4):
                    outputs.append(
                        MaxPooling1D(pool_factor, strides=s,
                                     padding=padding)(inp))
                    outputs.append(
                        AveragePooling1D(pool_factor,
                                         strides=s,
                                         padding=padding)(inp))
        outputs.append(GlobalMaxPooling1D()(inp))
        outputs.append(GlobalAveragePooling1D()(inp))

    for inp in [inputs[0], inputs[5]]:
        for padding in ['valid', 'same']:
            for h in range(1, 6):
                for out_channels in [1, 2]:
                    for d in range(1, 4):
                        outputs.append(
                            Conv2D(out_channels, (h, 1),
                                   padding=padding,
                                   dilation_rate=(d, 1))(inp))
                        outputs.append(
                            SeparableConv2D(out_channels, (h, 1),
                                            padding=padding,
                                            dilation_rate=(d, 1))(inp))
                    for sy in range(1, 4):
                        outputs.append(
                            Conv2D(out_channels, (h, 1),
                                   strides=(1, sy),
                                   padding=padding)(inp))
                        outputs.append(
                            SeparableConv2D(out_channels, (h, 1),
                                            strides=(sy, sy),
                                            padding=padding)(inp))
                for sy in range(1, 4):
                    outputs.append(
                        DepthwiseConv2D((h, 1),
                                        strides=(sy, sy),
                                        padding=padding)(inp))
                    outputs.append(
                        MaxPooling2D((h, 1), strides=(1, sy),
                                     padding=padding)(inp))
            for w in range(1, 6):
                for out_channels in [1, 2]:
                    for d in range(1, 4) if sy == 1 else [1]:
                        outputs.append(
                            Conv2D(out_channels, (1, w),
                                   padding=padding,
                                   dilation_rate=(1, d))(inp))
                        outputs.append(
                            SeparableConv2D(out_channels, (1, w),
                                            padding=padding,
                                            dilation_rate=(1, d))(inp))
                    for sx in range(1, 4):
                        outputs.append(
                            Conv2D(out_channels, (1, w),
                                   strides=(sx, 1),
                                   padding=padding)(inp))
                        outputs.append(
                            SeparableConv2D(out_channels, (1, w),
                                            strides=(sx, sx),
                                            padding=padding)(inp))
                for sx in range(1, 4):
                    outputs.append(
                        DepthwiseConv2D((1, w),
                                        strides=(sy, sy),
                                        padding=padding)(inp))
                    outputs.append(
                        MaxPooling2D((1, w), strides=(1, sx),
                                     padding=padding)(inp))
    outputs.append(ZeroPadding2D(2)(inputs[0]))
    outputs.append(ZeroPadding2D((2, 3))(inputs[0]))
    outputs.append(ZeroPadding2D(((1, 2), (3, 4)))(inputs[0]))
    outputs.append(Cropping2D(2)(inputs[0]))
    outputs.append(Cropping2D((2, 3))(inputs[0]))
    outputs.append(Cropping2D(((1, 2), (3, 4)))(inputs[0]))
    for y in range(1, 3):
        for x in range(1, 3):
            outputs.append(UpSampling2D(size=(y, x))(inputs[0]))
    outputs.append(GlobalAveragePooling2D()(inputs[0]))
    outputs.append(GlobalMaxPooling2D()(inputs[0]))
    outputs.append(AveragePooling2D((2, 2))(inputs[0]))
    outputs.append(MaxPooling2D((2, 2))(inputs[0]))
    outputs.append(UpSampling2D((2, 2))(inputs[0]))
    outputs.append(Dropout(0.5)(inputs[0]))

    outputs.append(Concatenate([inputs[0], inputs[0]]))

    # same as axis=-1
    outputs.append(Concatenate()([inputs[1], inputs[2]]))
    outputs.append(Concatenate(axis=3)([inputs[1], inputs[2]]))
    # axis=0 does not make sense, since dimension 0 is the batch dimension
    outputs.append(Concatenate(axis=1)([inputs[1], inputs[2]]))
    outputs.append(Concatenate(axis=2)([inputs[1], inputs[2]]))

    outputs.append(BatchNormalization()(inputs[0]))
    outputs.append(BatchNormalization(center=False)(inputs[0]))
    outputs.append(BatchNormalization(scale=False)(inputs[0]))

    outputs.append(Conv2D(2, (3, 3), use_bias=True)(inputs[0]))
    outputs.append(Conv2D(2, (3, 3), use_bias=False)(inputs[0]))
    outputs.append(SeparableConv2D(2, (3, 3), use_bias=True)(inputs[0]))
    outputs.append(SeparableConv2D(2, (3, 3), use_bias=False)(inputs[0]))
    outputs.append(DepthwiseConv2D(2, (3, 3), use_bias=True)(inputs[0]))
    outputs.append(DepthwiseConv2D(2, (3, 3), use_bias=False)(inputs[0]))

    outputs.append(Dense(2, use_bias=True)(inputs[3]))
    outputs.append(Dense(2, use_bias=False)(inputs[3]))

    shared_conv = Conv2D(1, (1, 1),
                         padding='valid',
                         name='shared_conv',
                         activation='relu')

    up_scale_2 = UpSampling2D((2, 2))
    x1 = shared_conv(up_scale_2(inputs[1]))  # (1, 8, 8)
    x2 = shared_conv(up_scale_2(inputs[2]))  # (1, 8, 8)
    x3 = Conv2D(1, (1, 1), padding='valid')(up_scale_2(inputs[2]))  # (1, 8, 8)
    x = Concatenate([x1, x2, x3])  # (3, 8, 8)
    outputs.append(x)

    x = Conv2D(3, (1, 1), padding='same', use_bias=False)(x)  # (3, 8, 8)
    outputs.append(x)
    x = Dropout(0.5)(x)
    outputs.append(x)
    x = Concatenate([MaxPooling2D((2, 2))(x),
                     AveragePooling2D((2, 2))(x)])  # (6, 4, 4)
    outputs.append(x)

    x = Flatten()(x)  # (1, 1, 96)
    x = Dense(4, use_bias=False)(x)
    outputs.append(x)
    x = Dense(3)(x)  # (1, 1, 3)
    outputs.append(x)

    outputs.append(keras.layers.Add()([inputs[4], inputs[8], inputs[8]]))
    outputs.append(keras.layers.Subtract()([inputs[4], inputs[8]]))
    outputs.append(keras.layers.Multiply()([inputs[4], inputs[8], inputs[8]]))
    outputs.append(keras.layers.Average()([inputs[4], inputs[8], inputs[8]]))
    outputs.append(keras.layers.Maximum()([inputs[4], inputs[8], inputs[8]]))
    outputs.append(Concatenate()([inputs[4], inputs[8], inputs[8]]))

    intermediate_input_shape = (3, )
    intermediate_in = Input(intermediate_input_shape)
    intermediate_x = intermediate_in
    intermediate_x = Dense(8)(intermediate_x)
    intermediate_x = Dense(5)(intermediate_x)
    intermediate_model = Model(inputs=[intermediate_in],
                               outputs=[intermediate_x],
                               name='intermediate_model')
    intermediate_model.compile(loss='mse', optimizer='nadam')

    x = intermediate_model(x)  # (1, 1, 5)

    intermediate_model_2 = Sequential()
    intermediate_model_2.add(Dense(7, input_shape=(5, )))
    intermediate_model_2.add(Dense(5))
    intermediate_model_2.compile(optimizer='rmsprop',
                                 loss='categorical_crossentropy')

    x = intermediate_model_2(x)  # (1, 1, 5)

    x = Dense(3)(x)  # (1, 1, 3)

    shared_activation = Activation('tanh')

    outputs = outputs + [
        Activation('tanh')(inputs[3]),
        Activation('hard_sigmoid')(inputs[3]),
        Activation('selu')(inputs[3]),
        Activation('sigmoid')(inputs[3]),
        Activation('softplus')(inputs[3]),
        Activation('softmax')(inputs[3]),
        Activation('relu')(inputs[3]),
        LeakyReLU()(inputs[3]),
        ELU()(inputs[3]),
        PReLU()(inputs[2]),
        PReLU()(inputs[3]),
        PReLU()(inputs[4]),
        shared_activation(inputs[3]),
        inputs[4],
        inputs[1],
        x,
        shared_activation(x),
    ]

    print('Model has {} outputs.'.format(len(outputs)))

    model = Model(inputs=inputs, outputs=outputs, name='test_model_full')
    model.compile(loss='mse', optimizer='nadam')

    # fit to dummy data
    training_data_size = 1
    batch_size = 1
    epochs = 10
    data_in = generate_input_data(training_data_size, input_shapes)
    initial_data_out = model.predict(data_in)
    data_out = generate_output_data(training_data_size, initial_data_out)
    model.fit(data_in, data_out, epochs=epochs, batch_size=batch_size)
    return model
    measurements.append(measurement)

X_train = np.array(images)
y_train = np.array(measurements)
print(X_train)
print(y_train)

from keras.models import Sequential, Model
from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D

model = Sequential()
model.add(Lambda(lambda x: x / 255.0,
                 input_shape=(360, 540, 3)))  #Give Tensorflow experssion
model.add(Cropping2D(cropping=((70, 25), (0, 0))))  #Resize the input
model.add(Conv2D(24, (5, 5), activation="relu", strides=(2, 2)))
model.add(Dropout(0.5))
model.add(Conv2D(36, (5, 5), activation="relu", strides=(2, 2)))
model.add(Conv2D(48, (5, 5), activation="relu", strides=(2, 2)))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))

model.compile(loss='mse', optimizer='adam')
model.fit(X_train,
          y_train,
            y_train = np.array(measurements)
            yield sklearn.utils.shuffle(X_train, y_train)


batch_size = 32
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=batch_size)
validation_generator = generator(validation_samples, batch_size=batch_size)

from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Dropout
from keras.layers import Conv2D, MaxPooling2D, Cropping2D

model = Sequential()
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3)))
model.add(Cropping2D(cropping=((50, 20), (0, 0))))
model.add(Conv2D(6, 5, 5, activation="elu"))
model.add(MaxPooling2D())
model.add(Conv2D(6, 5, 5, activation="elu"))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(120))
model.add(Dense(84))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')

samples = 2 * 3 * len(train_samples)

history_object = model.fit_generator(train_generator, \
            samples_per_epoch= (samples//batch_size)*batch_size, \
            validation_data=validation_generator, \
Ejemplo n.º 20
0
def BlockModel(input_shape,filt_num=16,numBlocks=3):
    lay_input = Input(shape=(input_shape[1:]),name='input_layer')
        
     #calculate appropriate cropping
    mod = np.mod(input_shape[1:3],2**numBlocks)
    padamt = mod+2
    # calculate size reduction
    startsize = np.max(input_shape[1:3]-padamt)
    minsize = (startsize-np.sum(2**np.arange(1,numBlocks+1)))/2**numBlocks
    if minsize<4:
        raise ValueError('Too small of input for this many blocks. Use fewer blocks or larger input')
    
    crop = Cropping2D(cropping=((0,padamt[0]), (0,padamt[1])), data_format=None)(lay_input)
    
    # contracting block 1
    rr = 1
    lay_conv1 = Conv2D(filt_num*rr, (1, 1),padding='same',kernel_initializer=init,name='Conv1_{}'.format(rr))(crop)
    lay_conv3 = Conv2D(filt_num*rr, (3, 3),padding='same',kernel_initializer=init,name='Conv3_{}'.format(rr))(crop)
    lay_conv51 = Conv2D(filt_num*rr, (3, 3),padding='same',kernel_initializer=init,name='Conv51_{}'.format(rr))(crop)
    lay_conv52 = Conv2D(filt_num*rr, (3, 3),padding='same',kernel_initializer=init,name='Conv52_{}'.format(rr))(lay_conv51)
    lay_merge = concatenate([lay_conv1,lay_conv3,lay_conv52],name='merge_{}'.format(rr))
    lay_conv_all = Conv2D(filt_num*rr,(1,1),padding='valid',kernel_initializer=init,name='ConvAll_{}'.format(rr))(lay_merge)
#    bn = BatchNormalization()(lay_conv_all)
    lay_act = ELU(name='elu{}_1'.format(rr))(lay_conv_all)
    lay_stride = Conv2D(filt_num*rr,(4,4),padding='valid',strides=(2,2),kernel_initializer=init,name='ConvStride_{}'.format(rr))(lay_act)
    lay_act = ELU(name='elu{}_2'.format(rr))(lay_stride)
    act_list = [lay_act]
    
    # contracting blocks 2-n 
    for rr in range(2,numBlocks+1):
        lay_conv1 = Conv2D(filt_num*rr, (1, 1),padding='same',kernel_initializer=init,name='Conv1_{}'.format(rr))(lay_act)
        lay_conv3 = Conv2D(filt_num*rr, (3, 3),padding='same',kernel_initializer=init,name='Conv3_{}'.format(rr))(lay_act)
        lay_conv51 = Conv2D(filt_num*rr, (3, 3),padding='same',kernel_initializer=init,name='Conv51_{}'.format(rr))(lay_act)
        lay_conv52 = Conv2D(filt_num*rr, (3, 3),padding='same',kernel_initializer=init,name='Conv52_{}'.format(rr))(lay_conv51)
        lay_merge = concatenate([lay_conv1,lay_conv3,lay_conv52],name='merge_{}'.format(rr))
        lay_conv_all = Conv2D(filt_num*rr,(1,1),padding='valid',kernel_initializer=init,name='ConvAll_{}'.format(rr))(lay_merge)
#        bn = BatchNormalization()(lay_conv_all)
        lay_act = ELU(name='elu_{}'.format(rr))(lay_conv_all)
        lay_stride = Conv2D(filt_num*rr,(4,4),padding='valid',kernel_initializer=init,strides=(2,2),name='ConvStride_{}'.format(rr))(lay_act)
        lay_act = ELU(name='elu{}_2'.format(rr))(lay_stride)
        act_list.append(lay_act)
        
    # expanding block n
    dd=numBlocks
    lay_deconv1 = Conv2D(filt_num*dd,(1,1),padding='same',kernel_initializer=init,name='DeConv1_{}'.format(dd))(lay_act)
    lay_deconv3 = Conv2D(filt_num*dd,(3,3),padding='same',kernel_initializer=init,name='DeConv3_{}'.format(dd))(lay_act)
    lay_deconv51 = Conv2D(filt_num*dd, (3,3),padding='same',kernel_initializer=init,name='DeConv51_{}'.format(dd))(lay_act)
    lay_deconv52 = Conv2D(filt_num*dd, (3,3),padding='same',kernel_initializer=init,name='DeConv52_{}'.format(dd))(lay_deconv51)
    lay_merge = concatenate([lay_deconv1,lay_deconv3,lay_deconv52],name='merge_d{}'.format(dd))
    lay_deconv_all = Conv2D(filt_num*dd,(1,1),padding='valid',kernel_initializer=init,name='DeConvAll_{}'.format(dd))(lay_merge)
#    bn = BatchNormalization()(lay_deconv_all)
    lay_act = ELU(name='elu_d{}'.format(dd))(lay_deconv_all)
    lay_stride = Conv2DTranspose(filt_num*dd,(4,4),strides=(2,2),kernel_initializer=init,name='DeConvStride_{}'.format(dd))(lay_act)
    lay_act = ELU(name='elu_d{}_2'.format(dd))(lay_stride)
        
    # expanding blocks n-1
    expnums = list(range(1,numBlocks))
    expnums.reverse()
    for dd in expnums:
        lay_skip = concatenate([act_list[dd-1],lay_act],name='skip_connect_{}'.format(dd))
        lay_deconv1 = Conv2D(filt_num*dd,(1,1),padding='same',kernel_initializer=init,name='DeConv1_{}'.format(dd))(lay_skip)
        lay_deconv3 = Conv2D(filt_num*dd,(3,3),padding='same',kernel_initializer=init,name='DeConv3_{}'.format(dd))(lay_skip)
        lay_deconv51 = Conv2D(filt_num*dd, (3, 3),padding='same',kernel_initializer=init,name='DeConv51_{}'.format(dd))(lay_skip)
        lay_deconv52 = Conv2D(filt_num*dd, (3, 3),padding='same',kernel_initializer=init,name='DeConv52_{}'.format(dd))(lay_deconv51)
        lay_merge = concatenate([lay_deconv1,lay_deconv3,lay_deconv52],name='merge_d{}'.format(dd))
        lay_deconv_all = Conv2D(filt_num*dd,(1,1),padding='valid',kernel_initializer=init,name='DeConvAll_{}'.format(dd))(lay_merge)
#        bn = BatchNormalization()(lay_deconv_all)
        lay_act = ELU(name='elu_d{}'.format(dd))(lay_deconv_all)
        lay_stride = Conv2DTranspose(filt_num*dd,(4,4),strides=(2,2),kernel_initializer=init,name='DeConvStride_{}'.format(dd))(lay_act)
        lay_act = ELU(name='elu_d{}_2'.format(dd))(lay_stride)
                
    lay_pad = ZeroPadding2D(padding=((0,padamt[0]), (0,padamt[1])), data_format=None)(lay_act)
    lay_cleanup = Conv2D(filt_num,(3,3),padding='same',kernel_initializer=init,name='CleanUp_1')(lay_pad)
    lay_cleanup = Conv2D(filt_num,(3,3),padding='same',kernel_initializer=init,name='CleanUp_2')(lay_cleanup)
    # output
    lay_out = Conv2D(1,(1,1), activation='sigmoid',kernel_initializer=init,name='output_layer')(lay_cleanup)
    
    return Model(lay_input,lay_out)
Ejemplo n.º 21
0
    augmentated_images.append(image)
    augmentated_measurements.append(measurement)
    augmentated_images.append(cv2.flip(image, 1))
    augmentated_measurements.append(measurement * -1)

X_train = np.array(augmentated_images)
y_train = np.array(augmentated_measurements)

from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout
from keras.layers import Conv2D
from keras.layers.pooling import MaxPooling2D

# Nvidia's CNN architecture
model = Sequential()
model.add(Cropping2D(cropping=((70, 25), (0, 0)),
                     input_shape=X_train[0].shape))
model.add(Lambda(lambda x: (x / 255.0) - 0.5))
model.add(Dropout(0.2))
model.add(Conv2D(24, (5, 5), activation="relu", padding='same'))
model.add(MaxPooling2D())
model.add(Conv2D(36, (5, 5), activation="relu", padding='same'))
model.add(MaxPooling2D())
model.add(Conv2D(48, (5, 5), activation="relu", padding='same'))
model.add(MaxPooling2D())
model.add(Dropout(0.2))
model.add(Conv2D(64, (3, 3), activation="relu", padding='same'))
model.add(MaxPooling2D())
model.add(Conv2D(64, (3, 3), activation="relu", padding='same'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(1164))
def unet(input_shape=(240, 240, 2), bn=True, do=0, ki="he_normal", lr=0.001):
    '''
    bn: if use batchnorm layer
    do: dropout prob
    ki: kernel initializer (glorot_uniform, he_normal, ...)
    lr: learning rate of Adam
    '''
    concat_axis = -1  #the last axis (channel axis)

    inputs = Input(input_shape)  # channels is 2: <t1, flair>

    conv1 = Conv2D(64, (5, 5),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(inputs)
    conv1 = BatchNormalization()(conv1) if bn else conv1
    conv1 = Dropout(do)(conv1) if do else conv1
    conv1 = Conv2D(64, (5, 5),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(conv1)
    conv1 = BatchNormalization()(conv1) if bn else conv1

    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(96, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(pool1)
    conv2 = BatchNormalization()(conv2) if bn else conv2
    conv2 = Dropout(do)(conv2) if do else conv2
    conv2 = Conv2D(96, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(conv2)
    conv2 = BatchNormalization()(conv2) if bn else conv2

    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(128, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(pool2)
    conv3 = BatchNormalization()(conv3) if bn else conv3
    conv3 = Dropout(do)(conv3) if do else conv3
    conv3 = Conv2D(128, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(conv3)
    conv3 = BatchNormalization()(conv3) if bn else conv3

    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D(256, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(pool3)
    conv4 = BatchNormalization()(conv4) if bn else conv4
    conv4 = Dropout(do)(conv4) if do else conv4
    conv4 = Conv2D(256, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(conv4)
    conv4 = BatchNormalization()(conv4) if bn else conv4

    #######

    conv4 = Conv2D(512, (3, 3),
                   dilation_rate=2,
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(conv4)
    cat6 = conv4

    #    pool4 = MaxPooling2D(pool_size=(2,2))(conv4)
    #
    #    conv5 = Conv2D(512, (3,3), padding="same", activation="relu", kernel_initializer=ki)(pool4)
    #    conv5 = BatchNormalization()(conv5) if bn else conv5
    #    conv5 = Dropout(do)(conv5) if do else conv5
    #    conv5 = Conv2D(512, (3,3), padding="same", activation="relu", kernel_initializer=ki)(conv5)
    #    conv5 = BatchNormalization()(conv5) if bn else conv5
    #    upconv5 = Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same', kernel_initializer=ki)(conv5)
    #
    #    ch, cw = get_crop_shape(conv4, upconv5)
    #    crop_conv4 = Cropping2D(cropping=(ch,cw))(conv4)
    #    cat6 = concatenate([upconv5, crop_conv4], axis=concat_axis)

    conv6 = Conv2D(256, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(cat6)
    conv6 = BatchNormalization()(conv6) if bn else conv6
    conv6 = Dropout(do)(conv6) if do else conv6
    conv6 = Conv2D(256, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(conv6)
    conv6 = BatchNormalization()(conv6) if bn else conv6
    upconv6 = Conv2DTranspose(128, (2, 2),
                              strides=(2, 2),
                              padding='same',
                              kernel_initializer=ki)(conv6)

    ch, cw = get_crop_shape(conv3, upconv6)
    crop_conv3 = Cropping2D(cropping=(ch, cw))(conv3)
    up7 = concatenate([upconv6, crop_conv3], axis=concat_axis)

    conv7 = Conv2D(128, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(up7)
    conv7 = BatchNormalization()(conv7) if bn else conv7
    conv7 = Dropout(do)(conv7) if do else conv7
    conv7 = Conv2D(128, (3, 3), padding="same", activation="relu")(conv7)
    conv7 = BatchNormalization()(conv7) if bn else conv7
    upconv7 = Conv2DTranspose(96, (2, 2),
                              strides=(2, 2),
                              padding='same',
                              kernel_initializer=ki)(conv7)

    ch, cw = get_crop_shape(conv2, upconv7)
    crop_conv2 = Cropping2D(cropping=(ch, cw))(conv2)
    up8 = concatenate([upconv7, crop_conv2], axis=concat_axis)

    conv8 = Conv2D(96, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(up8)
    conv8 = BatchNormalization()(conv8) if bn else conv8
    conv8 = Dropout(do)(conv8) if do else conv8
    conv8 = Conv2D(96, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(conv8)
    conv8 = BatchNormalization()(conv8) if bn else conv8
    upconv8 = Conv2DTranspose(64, (2, 2),
                              strides=(2, 2),
                              padding='same',
                              kernel_initializer=ki)(conv8)

    ch, cw = get_crop_shape(conv1, upconv8)
    crop_conv1 = Cropping2D(cropping=(ch, cw))(conv1)
    up9 = concatenate([upconv8, crop_conv1], axis=concat_axis)

    conv9 = Conv2D(64, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(up9)
    conv9 = BatchNormalization()(conv9) if bn else conv9
    conv9 = Conv2D(64, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(conv9)
    conv9 = BatchNormalization()(conv9) if bn else conv9
    ch, cw = get_pad_shape(conv9, conv1)
    pad_conv9 = ZeroPadding2D(padding=(ch, cw))(conv9)
    conv9 = Conv2D(1, (1, 1),
                   padding="same",
                   activation="sigmoid",
                   kernel_initializer=ki)(pad_conv9)  #change to sigmoid

    model = Model(inputs=inputs, outputs=conv9)

    #    optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)   #default
    optimizer = Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    #    optimizer = Adagrad(lr=0.001, epsilon=1e-08, decay=0.0)
    #    model.compile(optimizer=optimizer, loss="binary_crossentropy", metrics=['accuracy'])

    #    optimizer = SGD(lr=0.1, momentum=0.8, decay=lr/30, nesterov=False)

    #    model.compile(optimizer=optimizer, loss=dice_coef_loss, metrics=[dice_coef, 'accuracy'])

    model.compile(optimizer=optimizer,
                  loss=dice_coef_loss,
                  metrics=[dice_coef, precision_xue, recall_xue])

    return model
Ejemplo n.º 23
0
            X_train = np.array(images)
            y_train = np.array(angles)
            yield sklearn.utils.shuffle(X_train, y_train)

# Set our batch size
batch_size=32

# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=batch_size)
validation_generator = generator(validation_samples, batch_size=batch_size)

print("Creating sequential model")
model = Sequential()

# Add some cropping which cuts upper and lower part of the image
model.add(Cropping2D(cropping=((50,27), (0,0)), input_shape=(160,320,3)))
model.add(Lambda(lambda x: (x / 255.0) - 0.5))

# Subsampling here is modified compared to NVIDIA network as there wouldn't be enough pixels
model.add(Conv2D(24,(5,5),subsample=(1,2),activation="relu"))
model.add(Conv2D(36,(5,5),subsample=(2,2),activation="relu"))
model.add(Conv2D(48,(5,5),subsample=(2,2),activation="relu"))

# Dropout to help overfitting
model.add(Dropout(0.5))
model.add(Conv2D(64,(5,5),subsample=(1,1),activation="relu"))
model.add(Conv2D(64,(5,5),subsample=(1,1),activation="relu"))
model.add(Flatten())
model.add(Dense(100))

# Another dropout to help overfitting
Ejemplo n.º 24
0
def _adjust_block(p, ip, filters, weights_decay=5e-5, block_id=None):
    '''
    # Functions: Adjusts the input 'p' to match the shape of the 'input'
    # Arguments:
        p: input tensor
        ip: input tensor matched
        filters: number of output filters
        weight_decay: l2 reg
        id: string
    '''
    channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
    img_dim = 2 if K.image_data_format() == 'channels_first' else -2
    with K.name_scope('adjust_block'):
        if p == None:
            p = ip

        elif p._keras_shape[img_dim] != ip._keras_shape[img_dim]:
            with K.name_scope('adjust_reduction_block_%s' % block_id):
                p = Activation('relu', name='adjust_relu_1_%s' % block_id)(p)

                p1 = AveragePooling2D(
                    (1, 1),
                    strides=2,
                    padding='valid',
                    name='adjust_avg_pool_1_%s' % block_id)(p)
                p1 = Conv2D(filters // 2, (1, 1),
                            padding='same',
                            use_bias=False,
                            kernel_regularizer=l2(weights_decay),
                            kernel_initializer='he_normal',
                            name='adjust_conv_1_%s' % block_id)(p1)

                p2 = ZeroPadding2D(padding=((0, 1), (0, 1)))(p)
                p2 = Cropping2D(cropping=((1, 0), (1, 0)))(p2)
                p2 = AveragePooling2D(
                    (1, 1),
                    strides=2,
                    padding='valid',
                    name='adjust_avg_pool_2_%s' % block_id)(p2)
                p2 = Conv2D(filters // 2, (1, 1),
                            padding='same',
                            use_bias=False,
                            kernel_regularizer=l2(weights_decay),
                            kernel_initializer='he_normal',
                            name='adjust_conv_2_%s' % block_id)(p2)
                p = concatenate([p1, p2], axis=channel_dim)
                p = BatchNormalization(axis=channel_dim,
                                       momentum=BN_DECAY,
                                       epsilon=BN_EPS,
                                       name='adjust_bn_%s' % block_id)(p)
        elif p._keras_shape[channel_dim] != filters:
            with K.name_scope('adjust_projection_block_%s' % block_id):
                p = Activation('relu')(p)
                p = Conv2D(filters, (1, 1),
                           strides=1,
                           padding='same',
                           use_bias=False,
                           kernel_regularizer=l2(weights_decay),
                           kernel_initializer='he_normal',
                           name='adjust_conv_projection_%s' % block_id)(p)
                p = BatchNormalization(axis=channel_dim,
                                       momentum=BN_DECAY,
                                       epsilon=BN_EPS,
                                       name='adjust_bn_%s' % block_id)(p)
    return p
Ejemplo n.º 25
0
def commaAiModelPrime(time_len=1):
    """
    Creates comma.ai enhanced autonomous car  model
    Replace dropout with regularization
    Add 3 additional convolution layers
    """
    model = Sequential()
    model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160, 320, 3)))
    model.add(Cropping2D(cropping=((50, 20), (0, 0))))

    # Add three 5x5 convolution layers (output depth 64, and 64)
    model.add(
        Convolution2D(16,
                      8,
                      8,
                      subsample=(4, 4),
                      border_mode="same",
                      W_regularizer=l2(0.001)))
    model.add(ELU())
    model.add(
        Convolution2D(32,
                      5,
                      5,
                      subsample=(2, 2),
                      border_mode="same",
                      W_regularizer=l2(0.001)))
    model.add(ELU())
    model.add(
        Convolution2D(48,
                      5,
                      5,
                      subsample=(2, 2),
                      border_mode="same",
                      W_regularizer=l2(0.001)))
    model.add(ELU())

    # Add two 3x3 convolution layers (output depth 64, and 64)
    model.add(
        Convolution2D(64, 3, 3, border_mode='valid', W_regularizer=l2(0.001)))
    model.add(ELU())
    model.add(
        Convolution2D(64, 3, 3, border_mode='valid', W_regularizer=l2(0.001)))
    model.add(ELU())

    model.add(Flatten())

    # model.add(Dropout(.2))
    model.add(Dense(100, W_regularizer=l2(0.001)))
    model.add(ELU())

    # model.add(Dropout(0.50))
    model.add(Dense(50, W_regularizer=l2(0.001)))
    model.add(ELU())

    # model.add(Dropout(0.50))
    model.add(Dense(10, W_regularizer=l2(0.001)))
    model.add(ELU())

    model.add(Dense(1))

    # model.compile(optimizer="adam", loss="mse")
    model.compile(optimizer=Adam(lr=1e-4), loss='mse')

    return model
    def get_unet_preenc(
        self,
        k=10,
        lr=1e-4,
        f_out=2,
    ):
        """
        
        :param k:
        :param lr:
        :param f_out:
        :return:
        """

        from keras.layers import Conv2D, UpSampling2D, Concatenate, Cropping2D, Conv2DTranspose, BatchNormalization
        from methods.examples import compile_segm

        model_encoder = self.get_encoder(k)

        b_double = False
        padding = 'valid'

        encoder_outputs = model_encoder.output

        l = encoder_outputs

        if self.depth == 2:
            list_w_crop = [12, 4]
        elif self.depth == 1:
            list_w_crop = [4]

        for i_d in range(self.depth)[::-1]:
            f = 2**i_d * k if b_double else k
            l = Conv2D(f, (3, 3),
                       activation='elu',
                       padding=padding,
                       name=f'dec{i_d+1}')(l)

            if self.batch_norm:
                l = BatchNormalization(name=f'batchnorm_dec{i_d+1}')(l)

            if 0:
                l = UpSampling2D(2)(l)
            else:
                l = Conv2DTranspose(f, (2, 2), strides=(2, 2))(l)
                if self.batch_norm:
                    l = BatchNormalization(name=f'batchnorm_up{i_d}')(l)

            # Combine
            l_left_crop = Cropping2D(list_w_crop[i_d], name=f'crop_enc{i_d}')(
                model_encoder.get_layer(f'enc{i_d}').output)
            l = Concatenate(name=f'conc_dec{i_d}')([l, l_left_crop])

        l = Conv2D(k, (3, 3),
                   activation='elu',
                   padding=padding,
                   name=f'dec{0}')(l)
        if self.batch_norm:
            l = BatchNormalization(name=f'batchnorm_dec{0}')(l)
        decoder_outputs = Conv2D(f_out, (1, 1),
                                 activation='softmax',
                                 padding=padding)(l)

        model_pretrained_unet = Model(model_encoder.input, decoder_outputs)
        compile_segm(model_pretrained_unet, lr=lr)

        model_pretrained_unet.summary()

        return model_pretrained_unet
def main(_):

    print("Data Path", FLAGS.data_path)
    print("Number of epochs", FLAGS.epochs)
    print("Batch size", FLAGS.batch)

    data_path = FLAGS.data_path
    image_path = data_path + 'IMG/'
    log_file = 'driving_log.csv'

    # Loading data
    samples = []
    with open(data_path + log_file, newline='') as csvfile:
        reader = csv.reader(csvfile, delimiter=',')
        for row in reader:
            samples.append(row)

    # Split data into train and validation subsets
    train_samples, validation_samples = train_test_split(samples,
                                                         test_size=0.2)
    # Multiplying by 3 because of usage left/right images
    train_samples_size = 3 * len(train_samples)
    validation_samples_size = 3 * len(validation_samples)

    # Generate data sets
    train = generator(train_samples, image_path, FLAGS.batch)
    validation = generator(validation_samples, image_path, FLAGS.batch)

    # Model
    model = Sequential()
    # Preprocessing - normalization
    model.add(Lambda(lambda x: (x / 255.0) - .5, input_shape=(160, 320, 3)))
    # Cropping not usable area like sky, trees, etc.
    model.add(
        Cropping2D(cropping=((75, 10), (0, 0)), input_shape=(160, 320, 3)))
    # NVIDIA CNN, recommended in lesson
    model.add(Convolution2D(24, (5, 5), strides=(2, 2), activation="relu"))
    # Possible Dropout to avoid over fitting
    #model.add(Dropout(rate=0.75))
    model.add(Convolution2D(36, (5, 5), strides=(2, 2), activation="relu"))
    # Possible Dropout to avoid over fitting
    #model.add(Dropout(rate=0.5))
    model.add(Convolution2D(48, (5, 5), strides=(2, 2), activation="relu"))
    # Possible Dropout to avoid over fitting
    model.add(Dropout(rate=0.5))
    model.add(Convolution2D(64, (3, 3), activation="relu"))
    # Possible Dropout to avoid over fitting
    #model.add(Dropout(rate=0.75))
    model.add(Convolution2D(64, (3, 3), activation="relu"))
    # Possible Dropout to avoid over fitting
    model.add(Dropout(rate=0.5))
    model.add(Flatten())
    model.add(Dense(100))
    model.add(Dense(50))
    model.add(Dense(10))
    model.add(Dense(1))

    # Used default Adam Optimizer
    model.compile(loss='mse', optimizer='adam')
    # Evaluation of steps since the interface was changed in Keras 2.0.6
    steps = int(train_samples_size / FLAGS.batch)
    # Training...
    model.fit_generator(train,
                        steps_per_epoch=steps,
                        validation_data=validation,
                        validation_steps=validation_samples_size,
                        epochs=FLAGS.epochs)
    #Storing the model in a file
    model.save(data_path + 'model.h5')
    #Plot model architecture
    from keras.utils import plot_model
    plot_model(model, to_file='model.png', show_shapes=True)
Ejemplo n.º 28
0
def imagePreprocessing(model):
    model.add(Lambda(lambda x: x/255.0 - 0.5, input_shape=(160,320,3)))                 # normalize and mean center the data
    model.add(Cropping2D(cropping=((70,25),(0,0))))                                     # crop the upper part of the images which is not of our interest
Ejemplo n.º 29
0
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=batch_size)
validation_generator = generator(validation_samples, batch_size=batch_size)

ch, row, col = 3, 160, 320  # Trimmed image format

from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Cropping2D, Activation

from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D

activation_relu = 'relu'
model = Sequential()
# model.add(Flatten(input_shape=(160,320,3)))
model.add(Cropping2D(cropping=((50, 20), (0, 0)), input_shape=(row, col, ch)))
model.add(Lambda(lambda x: x / 255.0 - 0.5))  # pixel_mean_centered

model.add(Conv2D(24, (5, 5), strides=(2, 2), padding='valid'))
model.add(Activation(activation_relu))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Conv2D(36, (5, 5), strides=(2, 2), padding='valid'))
model.add(Activation(activation_relu))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))

model.add(Conv2D(48, (5, 5), strides=(2, 2), padding='valid'))
model.add(Activation(activation_relu))

model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Conv2D(64, (3, 3), strides=(1, 1), padding='valid'))
model.add(Activation(activation_relu))
Ejemplo n.º 30
0
def get_model():
    model = Sequential()

    # Normalization and coping of the Input image
    model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160, 320, 3)))
    model.add(
        Cropping2D(cropping=((50, 30), (0, 0)), input_shape=(160, 320, 3)))

    # layer 1 - Convolution layer with 'relu' activation
    # Output: 32x32x32
    model.add(
        Conv2D(24,
               5,
               5,
               input_shape=(80, 320, 3),
               subsample=(2, 2),
               border_mode="valid"))
    #Layer Activation
    model.add(Activation('relu'))

    # layer 2 - Convolution layer with dropout, 'relu' activation & maxPooling
    # Output: 30x30x16
    model.add(Conv2D(36, 5, 5, subsample=(2, 2), border_mode="valid"))
    #Layer Activation
    model.add(Activation('relu'))
    #Apply MaxPooling
    #Output 15x15x16
    model.add(MaxPooling2D((2, 2)))
    model.add(BatchNormalization())

    # layer 3 - Convolution layer with dropout and 'relu' activation
    # Output 13x13x16
    model.add(Conv2D(48, 3, 3, border_mode="valid"))
    #Layer Activation
    model.add(Activation('relu'))
    model.add(Dropout(.5))
    model.add(BatchNormalization())

    # layer 3 - Convolution layer with dropout and 'relu' activation
    # Output 13x13x16
    model.add(Conv2D(64, 3, 3, border_mode="valid"))
    #Layer Activation
    model.add(Activation('relu'))
    model.add(BatchNormalization())

    # Flatten the output
    model.add(Flatten())

    ## layer 4 - - Fully Connected Layer with dropout an 'relu' activation
    model.add(Dense(1164))
    #Layer Activation
    model.add(Activation('relu'))
    #Apply Dropout
    model.add(Dropout(0.5))
    model.add(BatchNormalization())

    ## layer 5 - Fully Connected Layer with dropout an 'relu' activation
    model.add(Dense(100))
    #Layer Activation
    model.add(Activation('relu'))
    #Apply Dropout
    #model.add(Dropout(0.5))
    model.add(BatchNormalization())

    ## layer 6 - Fully Connected Layer with 'relu' activation
    model.add(Dense(50))
    model.add(Activation('relu'))
    #Apply Dropout
    # model.add(Dropout(0.5))
    model.add(BatchNormalization())

    # Final Layer with single output
    model.add(Dense(1))

    #Apply Adam-Optimizer and MSE loss fucntion
    model.compile(optimizer="adam", loss="mse")

    return model