Ejemplo n.º 1
0
    images.append(right_img)
    angles.append(right_angle)
    pbar.update(1)

pbar.close()    
X_train = np.array(images)
y_train = np.array(angles)
X_train, y_train = shuffle(X_train, y_train)
print(X_train.shape, y_train.shape)


# Model architecture

nvidia = Sequential()
nvidia.add(Lambda(lambda x: x/255. - 0.5, input_shape=(80, 80, 3)))
nvidia.add(Cropping2D(cropping=((35, 13), (0, 0))))
nvidia.add(Convolution2D(24, 3, 3, subsample=(2, 2), activation='relu'))
nvidia.add(Convolution2D(36, 3, 3, subsample=(2, 2), activation='relu'))
nvidia.add(Convolution2D(48, 3, 3, activation='relu'))
nvidia.add(Convolution2D(64, 3, 3, activation='relu'))
nvidia.add(Convolution2D(64, 3, 3, activation='relu'))
nvidia.add(Dropout(0.5))
nvidia.add(Flatten())
nvidia.add(Dense(100))
nvidia.add(Dense(50))
nvidia.add(Dense(10))
nvidia.add(Dense(1))


# Training method
Ejemplo n.º 2
0
            X_train = np.array(all_images)
            # Convert the image from BGR to RGB format using [...,::-1]
            #X_train = X_train[...,::-1]
            y_train = np.array(steering_measurements)
            yield sklearn.utils.shuffle(X_train, y_train)


# Define the generator function
train_generator = generator(train_lines, batch_size=1)
validation_generator = generator(validation_lines, batch_size=1)

# Define the model
model = Sequential()
# Preprocess the data
model.add(Cropping2D(cropping=((50, 20), (0, 0)), input_shape=(160, 320, 3)))
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(90, 320, 3)))

model.add(Conv2D(24, (5, 5), strides=(2, 2), activation='relu'))
model.add(Conv2D(36, (5, 5), strides=(2, 2), activation='relu'))
model.add(Conv2D(48, (5, 5), strides=(2, 2), activation='relu'))
model.add(Conv2D(64, (3, 3), strides=(1, 1), activation='relu'))
model.add(Conv2D(64, (3, 3), strides=(1, 1), activation='relu'))
model.add(Flatten())
model.add(Dense(100))
model.add(Dropout(0.25))
model.add(Dense(50))
model.add(Dropout(0.25))
model.add(Dense(1))

model.compile(loss='mse', optimizer='adam')
Ejemplo n.º 3
0
from keras.layers.core import Flatten, Dense, Lambda, Dropout
from keras.layers import Cropping2D
from keras.layers.convolutional import Convolution2D
#from keras.layers.pooling import MaxPooling2D

#use keras for CNN
model = Sequential()

#Lambda layer as additional layer plus normalization
model.add(
    Lambda(lambda x: (x / 127.5) - 1.0,
           input_shape=(160, 320, 3),
           output_shape=(160, 320, 3)))

#Crop image to throwout useless information
model.add(Cropping2D(cropping=((70, 25), (0, 0))))

#Multiple convolution layer, referenced from LeNet
model.add(Convolution2D(24, 5, 5, subsample=(2, 2), activation="relu"))
model.add(Convolution2D(36, 5, 5, subsample=(2, 2), activation="relu"))
model.add(Convolution2D(48, 5, 5, subsample=(2, 2), activation="relu"))
model.add(Convolution2D(64, 3, 3, activation="relu"))
#model.add(MaxPooling2D())
model.add(Convolution2D(64, 3, 3, activation="relu"))
#model.add(MaxPooling2D())

model.add(Flatten())
model.add(Dropout(0.25))
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
Ejemplo n.º 4
0
            yield sklearn.utils.shuffle(X_train, y_train)


# Generators
crop_top, crop_bottom = 40, 20
n_channels, n_rows, n_cols = 3, (160 - crop_top -
                                 crop_bottom), 320  # trimmed image
train_generator = generator(train_samples, batch_size)
validation_generator = generator(validation_samples, batch_size)

# Define model
model = Sequential()

# Preprocess model
model.add(
    Cropping2D(cropping=((crop_top, crop_bottom), (0, 0)),
               input_shape=(160, 320, 3)))
model.add(
    Lambda(lambda x: (x / 255.0) - 0.5,
           input_shape=(n_rows, n_cols, n_channels),
           output_shape=(n_rows, n_cols, n_channels)))

# Architecture
model.add(Conv2D(filters=24, kernel_size=5, strides=(2, 2), activation='relu'))
model.add(Dropout(0.5))
model.add(Conv2D(filters=36, kernel_size=5, strides=(2, 2), activation='relu'))
model.add(Dropout(0.5))
model.add(Conv2D(filters=48, kernel_size=5, strides=(2, 2), activation='relu'))
model.add(Dropout(0.5))
model.add(Conv2D(filters=64, kernel_size=3, strides=(2, 2), activation='relu'))
model.add(Conv2D(filters=64, kernel_size=3, strides=(1, 1), activation='relu'))
model.add(Flatten())  # 512
Ejemplo n.º 5
0
def model_train(data, filepath, learning_rate, epochs, batch_size=32):

    # train validation split
    train_samples, validation_samples = train_test_split(data, test_size=0.2)

    # generator runs for training and validation data
    train_generator = generator(train_samples, batch_size)
    validation_generator = generator(validation_samples, batch_size)

    #nvidia architecture
    # https://arxiv.org/pdf/1604.07316v1.pdf
    model = Sequential()
    model.add(
        Cropping2D(cropping=((60, 20), (0, 0)), input_shape=(160, 320, 3)))
    # model.add(Lambda(lambda x: x / 255.0))
    # model.add(Lambda(lambda x: x[:,:,::2,:]))
    model.add(BatchNormalization())

    model.add(
        Conv2D(filters=24,
               kernel_size=5,
               strides=2,
               padding='valid',
               activation='relu'))
    model.add(
        Conv2D(filters=36,
               kernel_size=5,
               strides=2,
               padding='valid',
               activation='relu'))
    model.add(
        Conv2D(filters=48,
               kernel_size=5,
               strides=2,
               padding='valid',
               activation='relu'))
    model.add(
        Conv2D(filters=64,
               kernel_size=3,
               strides=2,
               padding='valid',
               activation='relu'))
    model.add(
        Conv2D(filters=64,
               kernel_size=3,
               strides=1,
               padding='valid',
               activation='relu'))

    model.add(Flatten())

    model.add(Dense(1164, activation='relu'))
    model.add(Dense(100, activation='relu'))
    model.add(Dense(50, activation='relu'))
    model.add(Dense(10, activation='relu'))
    model.add(Dense(1, activation='linear'))

    print(model.summary())

    # saving best model; should prevent overfitting
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')
    # early stopping, should stop training, when no improvement happens
    early_stop = EarlyStopping(monitor='val_loss',
                               min_delta=0,
                               patience=2,
                               verbose=0,
                               mode='auto')
    #Default parameters follow those provided in the original paper.
    opt = Adam(lr=learning_rate)

    model.compile(loss='mse', optimizer=opt, metrics=['mse'])

    # run the keras model with the train & validation generators
    history = model.fit_generator(
        train_generator,
        steps_per_epoch=len(train_samples) // batch_size,
        validation_data=validation_generator,
        validation_steps=len(validation_samples) // batch_size,
        epochs=epochs,
        callbacks=[early_stop, checkpoint],
        verbose=1)

    return history
Ejemplo n.º 6
0
def get_crfrnn_model_def(nb_classes=21):
    """ Returns Keras CRF-RNN model definition.
    Currently, only 500 x 500 images are supported. However, one can get this to
    work with different image sizes by adjusting the parameters of the Cropping2D layers
    below.
    """
    channels, height, weight = 3, 500, 500

    # Input
    input_shape = (height, weight, 3)
    img_input = Input(shape=input_shape)

    # Add plenty of zero padding
    x = ZeroPadding2D(padding=(100, 100))(img_input)

    # VGG-16 convolution block 1
    x = Conv2D(64, (3, 3), activation='relu', padding='valid',
               name='conv1_1')(x)
    x = Conv2D(64, (3, 3), activation='relu', padding='same',
               name='conv1_2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(x)

    # VGG-16 convolution block 2
    x = Conv2D(128, (3, 3), activation='relu', padding='same',
               name='conv2_1')(x)
    x = Conv2D(128, (3, 3), activation='relu', padding='same',
               name='conv2_2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool2', padding='same')(x)

    # VGG-16 convolution block 3
    x = Conv2D(256, (3, 3), activation='relu', padding='same',
               name='conv3_1')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same',
               name='conv3_2')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same',
               name='conv3_3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool3', padding='same')(x)
    pool3 = x

    # VGG-16 convolution block 4
    x = Conv2D(512, (3, 3), activation='relu', padding='same',
               name='conv4_1')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same',
               name='conv4_2')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same',
               name='conv4_3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool4', padding='same')(x)
    pool4 = x

    # VGG-16 convolution block 5
    x = Conv2D(512, (3, 3), activation='relu', padding='same',
               name='conv5_1')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same',
               name='conv5_2')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same',
               name='conv5_3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool5', padding='same')(x)

    # Fully-connected layers converted to convolution layers
    x = Conv2D(4096, (7, 7), activation='relu', padding='valid', name='fc6')(x)
    x = Dropout(0.5)(x)
    x = Conv2D(4096, (1, 1), activation='relu', padding='valid', name='fc7')(x)
    x = Dropout(0.5)(x)
    x = Conv2D(nb_classes, (1, 1), padding='valid', name='score-fr')(x)

    # Deconvolution
    score2 = Conv2DTranspose(nb_classes, (4, 4), strides=2, name='score2')(x)

    # Skip connections from pool4
    score_pool4 = Conv2D(nb_classes, (1, 1), name='score-pool4')(pool4)
    score_pool4c = Cropping2D((5, 5))(score_pool4)
    score_fused = Add()([score2, score_pool4c])
    score4 = Conv2DTranspose(nb_classes, (4, 4),
                             strides=2,
                             name='score4',
                             use_bias=False)(score_fused)

    # Skip connections from pool3
    score_pool3 = Conv2D(nb_classes, (1, 1), name='score-pool3')(pool3)
    score_pool3c = Cropping2D((9, 9))(score_pool3)

    # Fuse things together
    score_final = Add()([score4, score_pool3c])

    # Final up-sampling and cropping
    upsample = Conv2DTranspose(nb_classes, (16, 16),
                               strides=8,
                               name='upsample',
                               use_bias=False)(score_final)
    upscore = Cropping2D(((31, 37), (31, 37)))(upsample)

    output = CrfRnnLayer(image_dims=(height, weight),
                         num_classes=nb_classes,
                         theta_alpha=160.,
                         theta_beta=3.,
                         theta_gamma=3.,
                         num_iterations=10,
                         name='crfrnn')([upscore, img_input])

    # Build the model
    model = Model(img_input, output, name='crfrnn_net')

    return model
log_dir = os.path.join('logs', experiment_name)
if os.path.isdir(log_dir) is False:
    os.makedirs(log_dir)

if False:
    for e in range(1):
        imgs, y = batch_gen.custom_next()
        for j, img in enumerate(imgs):
            img = img[60: img.shape[0]-20]
            #cv2.imwrite('img_aug' + str(j) + '.png', img)
            cv2.imshow('win', img.astype(np.uint8))
            cv2.waitKey(2000)

in_layer = Input(shape=img.shape)
in_layer = Cropping2D(cropping=((crop_top, crop_bottom), (0, 0)))(in_layer)
in_layer = Lambda(lambda in_img: ((in_img - 128.) / 128.))(in_layer)

filter_multiplicator = 1
x = nvidia_net(nb_classes=1, filter_multiplicator=filter_multiplicator, input_shape=None, dropout=0.2, input_tensor=in_layer)
# x = squeeze_net(nb_classes=1, input_shape=None, dropout=0.2, input_tensor=in_layer)
# x = mobile_net(nb_classes=1, filter_multiplicator=filter_multiplicator, input_shape=None, dropout=0.2, input_tensor=in_layer)
x.summary()

x.compile(optimizer=Adam(), loss='mse', metrics=['mae'])

experiment_name = experiment_name + \
                  '[' + str(filter_multiplicator)+']'+\
                  '[BS'+str(batch_size)+']'

callbacks = [TensorBoard(log_dir),
Ejemplo n.º 8
0
def lr(job_id,
       data_dir,
       j='all',
       weighted=True,
       num_epochs=10,
       pretrained=False,
       best_job_id=None,
       test_mode=False,
       data_dir_test=None,
       l96_var='XY',
       num_layers=1):

    time_start = time.time()
    np.random.seed(11)

    model = Sequential()

    if l96_var == 'Y':
        left_crop = 4
        right_crop = 0
    elif l96_var == 'Z':
        left_crop = 0
        right_crop = 16

    model.add(InputLayer(input_shape=(20, 20, 1)))
    if l96_var != 'XY':
        model.add(Cropping2D(cropping=((0, 0), (left_crop, right_crop))))  #,

    model.add(Flatten())
    model.add(Dense(3))

    if weighted == False:
        model.compile(loss='mse', optimizer='adam', metrics=[r2])
        loss_weights = 'none'
    elif weighted == True:
        if j == 0:
            loss_weights = 1 / 0.1
        else:
            loss_weights = 1 / 5
        model.compile(loss='mse',
                      optimizer='adam',
                      metrics=[r2],
                      loss_weights=[loss_weights])

    #load data
    train_generator, val_generator, test_generator = get_data_generators(
        data_dir, j=j)

    N = data_dir[data_dir.index('_N') + 2:-3]
    #callbacks
    path_log = 'experiments\\results_lr_' + str(num_layers) + '\\N' + str(
        N) + '_lr_' + str(num_layers) + '_checkpoints_' + str(
            test_mode) + '_' + str(l96_var) + '_'
    if os.path.exists(path_log[:25]) == False:
        os.makedirs(path_log[:25])
    checkpoints = ModelCheckpoint(path_log +
                                  'weights.{epoch:02d}-{val_loss:.2f}.hdf5',
                                  monitor='val_loss',
                                  verbose=0,
                                  save_best_only=True,
                                  save_weights_only=True)
    early_stopping = my_callback(
    )  #EarlyStopping(monitor='val_r2', patience=3, baseline= 0.95)
    csv_logger = CSVLogger(path_log + 'training_lr_' + str(num_layers) +
                           '.log',
                           separator=',',
                           append=True)

    if pretrained == True:
        path_weights = 'experiments\\results_lr\\N' + str(N) + '_lr_' + str(
            num_layers) + '_checkpoints_' + str(
                best_job_id
            ) + '.hdf5'  #+str(data_dir[17:-1])+str(best_job_id)+'\\'
        #        weights_id = [f for f in os.listdir(path_weights) if f.endswith('.hdf5')]
        #        weights_id = str(weights_id[0])
        model.load_weights(path_weights)

    if j != 'all':
        model.add(Dense(1))
    if pretrained == False:
        model.fit_generator(
            generator=train_generator,
            steps_per_epoch=train_generator.n // train_generator.batch_size,
            validation_data=val_generator,
            validation_steps=val_generator.n // val_generator.batch_size,
            epochs=num_epochs,
            callbacks=[checkpoints, early_stopping, csv_logger])

    #evaluate
    print('\nevaluating the train generator')
    loss_train, metric_train = model.evaluate_generator(
        generator=train_generator,
        steps=train_generator.n //
        train_generator.batch_size)  #len(train_generator))

    if test_mode == True:
        _, _, test_generator = get_data_generators(data_dir_test, j=j)
    print('\nevaluating the test generator')
    loss_test, metric_test = model.evaluate_generator(
        generator=test_generator,
        steps=test_generator.n //
        test_generator.batch_size)  #len(test_generator))

    #get r2 values
    print('r2_train = ' + str(metric_train) + '\nr2_test = ' +
          str(metric_test))

    #keep track of time
    time_exp = time.time() - time_start

    model_out = {
        'time_exp': time_exp / 60,
        'loss_train': loss_train,
        'loss_test': loss_test,
        'r2_train': metric_train,
        'r2_test': metric_test
    }

    model_params = {
        'model_name': 'lr_' + str(num_layers),
        'job_id': job_id,
        'pretrained': pretrained,
        'best_job_id': best_job_id,
        'data_dir': data_dir[17:],
        'test_mode': test_mode,
        'data_dir_test': data_dir_test[22:],
        #'N'         : N,
        #'input_shape': x_train.shape,
        'target': j,
        'weighted': weighted,
        'num_epochs': num_epochs,
        'l96_var': l96_var
    }

    return model, model_out, model_params
Ejemplo n.º 9
0
            images.append(
                cv2.flip(
                    readImage(base_data_path + line[headers.index('center')]),
                    1))
            steering.append(-steering_value)


#read all the data from all the directories
[readFile(x) for x in base_data_path]
#convert the data and the labels to numpy arrays
X_train = np.array(images)
y_train = np.array(steering)

#Nvidia model here we go
model = Sequential()
model.add(Cropping2D(cropping=((30, 9), (0, 0)), input_shape=(64, 64, 3)))
model.add(Lambda(lambda x: x / 255.0 - 0.5))
model.add(Convolution2D(24, kernel_size=(5, 5), strides=2, padding="same"))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Convolution2D(36, kernel_size=(5, 5), strides=2, padding="same"))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Convolution2D(48, kernel_size=(5, 5), strides=2, padding="same"))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Convolution2D(64, kernel_size=(3, 3), strides=1, padding="same"))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Convolution2D(64, kernel_size=(3, 3), strides=1, padding="valid"))
model.add(BatchNormalization())
    black_channel = tf.where(is_black, true_matrix, false_matrix) - 0.5

    #colors_match = tf.logical_or(is_black, tf.logical_or(brown_matches, tf.logical_or(yellow_matches, red_matches)))

    #return tf.where(colors_match, true_matrix, false_matrix) - 0.5

    with_extra_channels = tf.concat([normalized, s_channel, yellow_channel, red_channel, brown_channel, black_channel], 3)

    return with_extra_channels

inputs = Input(shape=(160, 320, 3))
preprocessed = Lambda(preprocess, input_shape=(160, 320, 3))(inputs)
y_crop_top_pixels = 100
y_crop_bottom_pixels = 30
x_crop_pixels = 90
cropped = Cropping2D(cropping=((y_crop_top_pixels, y_crop_bottom_pixels), (x_crop_pixels, x_crop_pixels)))(preprocessed)
def stretch_y(x):
    import tensorflow as tf
    return tf.image.resize_images(x, (140, 224))
#cropped = Lambda(stretch_y)(cropped)

#preprocessed = Lambda(preprocess)(inputs)

#resized = ZeroPadding2D(padding=((224 - 160 + y_crop_top_pixels + y_crop_bottom_pixels) // 2, 0))(cropped)

"""
base_model = InceptionV3(input_tensor=preprocessed, weights='imagenet', include_top=False)

x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='elu')(x)
Ejemplo n.º 11
0
# Save a visualization of the dataset (random images sample of images)
augment.save_dataset_visual(train_lines, output_dir)
print("Saved sample training set visualization")

# Save a visualization of sample data augmentation
augment.save_augmentation_visual(train_lines, output_dir)
print("Saves data augmentation example")

# Create a very basic model to output steering angle
model = Sequential()
# Normalize the data and mean shift to zero
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3)))
# Crop top and bottom of image
top_crop_px = 70
bot_crop_px = 20
model.add(Cropping2D(cropping=((top_crop_px, bot_crop_px), (0, 0))))
# Add a single convolutional layer with max pooling and dropout
drop_rate = 0.2
pool_size = 2
num_filters = 20
filter_size = 5
model.add(Conv2D(num_filters, filter_size, activation='relu'))
model.add(MaxPooling2D(pool_size=pool_size))
# Fully Connected layers
model.add(Flatten())
model.add(Dense(120))
model.add(Dense(84))
model.add(Dense(1))

model.compile(loss='mse', optimizer='adam')
Ejemplo n.º 12
0
def get_test_model_full():
    """Returns a maximally complex test model,
    using all supported layer types with different parameter combination.
    """
    input_shapes = [
        (26, 28, 3),
        (4, 4, 3),
        (4, 4, 3),
        (4, ),
        (2, 3),
        (27, 29, 1),
        (17, 1),
        (17, 4),
        (2, 3),
        (2, 3, 4, 5),
        (2, 3, 4, 5, 6),
        (2, 3, 4, 5, 6),
        (7, 8, 9, 10),
        (7, 8, 9, 10),
        (11, 12, 13),
        (11, 12, 13),
        (14, 15),
        (14, 15),
        (16, ),
        (16, ),
    ]

    inputs = [Input(shape=s) for s in input_shapes]

    outputs = []

    outputs.append(Flatten()(inputs[4]))
    outputs.append(Flatten()(inputs[5]))
    outputs.append(Flatten()(inputs[9]))
    outputs.append(Flatten()(inputs[10]))

    for axis in [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5]:
        outputs.append(Concatenate(axis=axis)([inputs[10], inputs[11]]))

    for axis in [-4, -3, -2, -1, 1, 2, 3, 4]:
        outputs.append(Concatenate(axis=axis)([inputs[12], inputs[13]]))

    for axis in [-3, -2, -1, 1, 2, 3]:
        outputs.append(Concatenate(axis=axis)([inputs[14], inputs[15]]))

    for axis in [-2, -1, 1, 2]:
        outputs.append(Concatenate(axis=axis)([inputs[16], inputs[17]]))

    for axis in [-1, 1]:
        outputs.append(Concatenate(axis=axis)([inputs[18], inputs[19]]))

    for inp in inputs[6:8]:
        for padding in ['valid', 'same', 'causal']:
            for s in range(1, 6):
                for out_channels in [1, 2]:
                    for d in range(1, 4):
                        outputs.append(
                            Conv1D(out_channels,
                                   s,
                                   padding=padding,
                                   dilation_rate=d)(inp))
        for padding_size in range(0, 5):
            outputs.append(ZeroPadding1D(padding_size)(inp))
        for crop_left in range(0, 2):
            for crop_right in range(0, 2):
                outputs.append(Cropping1D((crop_left, crop_right))(inp))
        for upsampling_factor in range(1, 5):
            outputs.append(UpSampling1D(upsampling_factor)(inp))
        for padding in ['valid', 'same']:
            for pool_factor in range(1, 6):
                for s in range(1, 4):
                    outputs.append(
                        MaxPooling1D(pool_factor, strides=s,
                                     padding=padding)(inp))
                    outputs.append(
                        AveragePooling1D(pool_factor,
                                         strides=s,
                                         padding=padding)(inp))
        outputs.append(GlobalMaxPooling1D()(inp))
        outputs.append(GlobalAveragePooling1D()(inp))

    for inp in [inputs[0], inputs[5]]:
        for padding in ['valid', 'same']:
            for h in range(1, 6):
                for out_channels in [1, 2]:
                    for d in range(1, 4):
                        outputs.append(
                            Conv2D(out_channels, (h, 1),
                                   padding=padding,
                                   dilation_rate=(d, 1))(inp))
                        outputs.append(
                            SeparableConv2D(out_channels, (h, 1),
                                            padding=padding,
                                            dilation_rate=(d, 1))(inp))
                    for sy in range(1, 4):
                        outputs.append(
                            Conv2D(out_channels, (h, 1),
                                   strides=(1, sy),
                                   padding=padding)(inp))
                        outputs.append(
                            SeparableConv2D(out_channels, (h, 1),
                                            strides=(sy, sy),
                                            padding=padding)(inp))
                for sy in range(1, 4):
                    outputs.append(
                        DepthwiseConv2D((h, 1),
                                        strides=(sy, sy),
                                        padding=padding)(inp))
                    outputs.append(
                        MaxPooling2D((h, 1), strides=(1, sy),
                                     padding=padding)(inp))
            for w in range(1, 6):
                for out_channels in [1, 2]:
                    for d in range(1, 4) if sy == 1 else [1]:
                        outputs.append(
                            Conv2D(out_channels, (1, w),
                                   padding=padding,
                                   dilation_rate=(1, d))(inp))
                        outputs.append(
                            SeparableConv2D(out_channels, (1, w),
                                            padding=padding,
                                            dilation_rate=(1, d))(inp))
                    for sx in range(1, 4):
                        outputs.append(
                            Conv2D(out_channels, (1, w),
                                   strides=(sx, 1),
                                   padding=padding)(inp))
                        outputs.append(
                            SeparableConv2D(out_channels, (1, w),
                                            strides=(sx, sx),
                                            padding=padding)(inp))
                for sx in range(1, 4):
                    outputs.append(
                        DepthwiseConv2D((1, w),
                                        strides=(sy, sy),
                                        padding=padding)(inp))
                    outputs.append(
                        MaxPooling2D((1, w), strides=(1, sx),
                                     padding=padding)(inp))
    outputs.append(ZeroPadding2D(2)(inputs[0]))
    outputs.append(ZeroPadding2D((2, 3))(inputs[0]))
    outputs.append(ZeroPadding2D(((1, 2), (3, 4)))(inputs[0]))
    outputs.append(Cropping2D(2)(inputs[0]))
    outputs.append(Cropping2D((2, 3))(inputs[0]))
    outputs.append(Cropping2D(((1, 2), (3, 4)))(inputs[0]))
    for y in range(1, 3):
        for x in range(1, 3):
            outputs.append(UpSampling2D(size=(y, x))(inputs[0]))
    outputs.append(GlobalAveragePooling2D()(inputs[0]))
    outputs.append(GlobalMaxPooling2D()(inputs[0]))
    outputs.append(AveragePooling2D((2, 2))(inputs[0]))
    outputs.append(MaxPooling2D((2, 2))(inputs[0]))
    outputs.append(UpSampling2D((2, 2))(inputs[0]))
    outputs.append(Dropout(0.5)(inputs[0]))

    # same as axis=-1
    outputs.append(Concatenate()([inputs[1], inputs[2]]))
    outputs.append(Concatenate(axis=3)([inputs[1], inputs[2]]))
    # axis=0 does not make sense, since dimension 0 is the batch dimension
    outputs.append(Concatenate(axis=1)([inputs[1], inputs[2]]))
    outputs.append(Concatenate(axis=2)([inputs[1], inputs[2]]))

    outputs.append(BatchNormalization()(inputs[0]))
    outputs.append(BatchNormalization(center=False)(inputs[0]))
    outputs.append(BatchNormalization(scale=False)(inputs[0]))

    outputs.append(Conv2D(2, (3, 3), use_bias=True)(inputs[0]))
    outputs.append(Conv2D(2, (3, 3), use_bias=False)(inputs[0]))
    outputs.append(SeparableConv2D(2, (3, 3), use_bias=True)(inputs[0]))
    outputs.append(SeparableConv2D(2, (3, 3), use_bias=False)(inputs[0]))
    outputs.append(DepthwiseConv2D(2, (3, 3), use_bias=True)(inputs[0]))
    outputs.append(DepthwiseConv2D(2, (3, 3), use_bias=False)(inputs[0]))

    outputs.append(Dense(2, use_bias=True)(inputs[3]))
    outputs.append(Dense(2, use_bias=False)(inputs[3]))

    shared_conv = Conv2D(1, (1, 1),
                         padding='valid',
                         name='shared_conv',
                         activation='relu')

    up_scale_2 = UpSampling2D((2, 2))
    x1 = shared_conv(up_scale_2(inputs[1]))  # (1, 8, 8)
    x2 = shared_conv(up_scale_2(inputs[2]))  # (1, 8, 8)
    x3 = Conv2D(1, (1, 1), padding='valid')(up_scale_2(inputs[2]))  # (1, 8, 8)
    x = Concatenate()([x1, x2, x3])  # (3, 8, 8)
    outputs.append(x)

    x = Conv2D(3, (1, 1), padding='same', use_bias=False)(x)  # (3, 8, 8)
    outputs.append(x)
    x = Dropout(0.5)(x)
    outputs.append(x)
    x = Concatenate()([MaxPooling2D((2, 2))(x),
                       AveragePooling2D((2, 2))(x)])  # (6, 4, 4)
    outputs.append(x)

    x = Flatten()(x)  # (1, 1, 96)
    x = Dense(4, use_bias=False)(x)
    outputs.append(x)
    x = Dense(3)(x)  # (1, 1, 3)
    outputs.append(x)

    outputs.append(keras.layers.Add()([inputs[4], inputs[8], inputs[8]]))
    outputs.append(keras.layers.Subtract()([inputs[4], inputs[8]]))
    outputs.append(keras.layers.Multiply()([inputs[4], inputs[8], inputs[8]]))
    outputs.append(keras.layers.Average()([inputs[4], inputs[8], inputs[8]]))
    outputs.append(keras.layers.Maximum()([inputs[4], inputs[8], inputs[8]]))
    outputs.append(Concatenate()([inputs[4], inputs[8], inputs[8]]))

    intermediate_input_shape = (3, )
    intermediate_in = Input(intermediate_input_shape)
    intermediate_x = intermediate_in
    intermediate_x = Dense(8)(intermediate_x)
    intermediate_x = Dense(5)(intermediate_x)
    intermediate_model = Model(inputs=[intermediate_in],
                               outputs=[intermediate_x],
                               name='intermediate_model')
    intermediate_model.compile(loss='mse', optimizer='nadam')

    x = intermediate_model(x)  # (1, 1, 5)

    intermediate_model_2 = Sequential()
    intermediate_model_2.add(Dense(7, input_shape=(5, )))
    intermediate_model_2.add(Dense(5))
    intermediate_model_2.compile(optimizer='rmsprop',
                                 loss='categorical_crossentropy')

    x = intermediate_model_2(x)  # (1, 1, 5)

    x = Dense(3)(x)  # (1, 1, 3)

    shared_activation = Activation('tanh')

    outputs = outputs + [
        Activation('tanh')(inputs[3]),
        Activation('hard_sigmoid')(inputs[3]),
        Activation('selu')(inputs[3]),
        Activation('sigmoid')(inputs[3]),
        Activation('softplus')(inputs[3]),
        Activation('softmax')(inputs[3]),
        Activation('relu')(inputs[3]),
        LeakyReLU()(inputs[3]),
        ELU()(inputs[3]),
        PReLU()(inputs[2]),
        PReLU()(inputs[3]),
        PReLU()(inputs[4]),
        shared_activation(inputs[3]),
        Activation('linear')(inputs[4]),
        Activation('linear')(inputs[1]),
        x,
        shared_activation(x),
    ]

    print('Model has {} outputs.'.format(len(outputs)))

    model = Model(inputs=inputs, outputs=outputs, name='test_model_full')
    model.compile(loss='mse', optimizer='nadam')

    # fit to dummy data
    training_data_size = 1
    batch_size = 1
    epochs = 10
    data_in = generate_input_data(training_data_size, input_shapes)
    initial_data_out = model.predict(data_in)
    data_out = generate_output_data(training_data_size, initial_data_out)
    model.fit(data_in, data_out, epochs=epochs, batch_size=batch_size)
    return model
    images = []
    measurements = []

    batch_size = 64

    # compile and train the model using the generator function
    train_generator = generator(train_samples, batch_size=batch_size)
    validation_generator = generator(validation_samples, batch_size=batch_size)

    from keras.models import Sequential
    from keras.layers import Flatten, Dense, Lambda, Conv2D, Cropping2D, Dropout
    #from keras.layers.pooling import MaxPool2D
    model = Sequential()
    model.add(
        Cropping2D(cropping=((70, 25), (0, 0)), input_shape=(160, 320, 3)))
    model.add(Lambda(lambda x: x / 255.0 - 0.5))
    model.add(Dropout(0.2))
    model.add(Conv2D(24, 5, 5, activation="relu", subsample=(2, 2)))
    model.add(Conv2D(36, 5, 5, activation="relu", subsample=(2, 2)))
    model.add(Conv2D(48, 5, 5, activation="relu", subsample=(2, 2)))
    model.add(Conv2D(64, 3, 3, activation="relu"))
    model.add(Conv2D(64, 3, 3, activation="relu"))
    model.add(Flatten())
    model.add(Dropout(0.5))
    model.add(Dense(100))
    model.add(Dropout(0.5))
    model.add(Dense(50))
    model.add(Dense(10))
    model.add(Dense(1))
Ejemplo n.º 14
0
#model.add(Activation("relu"))

#model.add(Conv2DTranspose(2,kernel_size=(2,2)))
#model.add(Activation("relu"))
#model.add(Conv2DTranspose(2,kernel_size=(2,2)))
#model.add(Activation("relu"))
#model.add(Conv2DTranspose(2,kernel_size=(8,8)))
#model.add(Activation("relu"))

# maybe add some deconv (or in keras terms conv2dtranspose layers in between the upsampling)
model.add(UpSampling2D(2))
model.add(UpSampling2D(2))
model.add(UpSampling2D(8))

# cropping neccassary for fitting the input image's size to the target truth map
model.add(Cropping2D((2, 8)))

#model.add(UpSampling2D(size=(2,2)))
#model.add(Activation("relu"))
#model.add(UpSampling2D(size=(8,8)))

# addition layer 1
# deconvolution (2x)
# addition layer 2
# deconvolution (8x)
# cropping (8x)

#model.add(Cropping2D(cropping=((0, 0), (0, 0))))
#model.add(Activation("softmax"))
#model.add(Conv2DTranspose(2,kernel_size=(2,2)))
#model.add(Conv2DTranspose(2,kernel_size=(2,2)))
Ejemplo n.º 15
0
def create_model(input_shape=(240, 240, 2), bn=True, do=0, ki="he_normal"):
    '''
    bn: if use batchnorm layer
    do: dropout prob
    ki: kernel initializer (glorot_uniform, he_normal, ...)
    lr: learning rate of Adam
    '''
    concat_axis = -1  # the last axis (channel axis)

    inputs = Input(input_shape)  # channels is 2: <t1, flair>

    conv1 = Conv2D(64, (5, 5),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(inputs)
    conv1 = BatchNormalization()(conv1) if bn else conv1
    conv1 = Dropout(do)(conv1) if do else conv1
    conv1 = Conv2D(64, (5, 5),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(conv1)
    conv1 = BatchNormalization()(conv1) if bn else conv1

    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(96, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(pool1)
    conv2 = BatchNormalization()(conv2) if bn else conv2
    conv2 = Dropout(do)(conv2) if do else conv2
    conv2 = Conv2D(96, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(conv2)
    conv2 = BatchNormalization()(conv2) if bn else conv2

    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(128, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(pool2)
    conv3 = BatchNormalization()(conv3) if bn else conv3
    conv3 = Dropout(do)(conv3) if do else conv3
    conv3 = Conv2D(128, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(conv3)
    conv3 = BatchNormalization()(conv3) if bn else conv3

    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D(256, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(pool3)
    conv4 = BatchNormalization()(conv4) if bn else conv4
    conv4 = Dropout(do)(conv4) if do else conv4
    conv4 = Conv2D(256, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(conv4)
    conv4 = BatchNormalization()(conv4) if bn else conv4

    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Conv2D(512, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(pool4)
    conv5 = BatchNormalization()(conv5) if bn else conv5
    conv5 = Dropout(do)(conv5) if do else conv5
    conv5 = Conv2D(512, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(conv5)
    conv5 = BatchNormalization()(conv5) if bn else conv5
    upconv5 = Conv2DTranspose(256, (2, 2),
                              strides=(2, 2),
                              padding='same',
                              kernel_initializer=ki)(conv5)

    ch, cw = get_crop_shape(conv4, upconv5)
    crop_conv4 = Cropping2D(cropping=(ch, cw))(conv4)
    cat6 = concatenate([upconv5, crop_conv4], axis=concat_axis)

    conv6 = Conv2D(256, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(cat6)
    conv6 = BatchNormalization()(conv6) if bn else conv6
    conv6 = Dropout(do)(conv6) if do else conv6
    conv6 = Conv2D(256, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(conv6)
    conv6 = BatchNormalization()(conv6) if bn else conv6
    upconv6 = Conv2DTranspose(128, (2, 2),
                              strides=(2, 2),
                              padding='same',
                              kernel_initializer=ki)(conv6)

    ch, cw = get_crop_shape(conv3, upconv6)
    crop_conv3 = Cropping2D(cropping=(ch, cw))(conv3)
    up7 = concatenate([upconv6, crop_conv3], axis=concat_axis)

    conv7 = Conv2D(128, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(up7)
    conv7 = BatchNormalization()(conv7) if bn else conv7
    conv7 = Dropout(do)(conv7) if do else conv7
    conv7 = Conv2D(128, (3, 3), padding="same", activation="relu")(conv7)
    conv7 = BatchNormalization()(conv7) if bn else conv7
    upconv7 = Conv2DTranspose(96, (2, 2),
                              strides=(2, 2),
                              padding='same',
                              kernel_initializer=ki)(conv7)

    ch, cw = get_crop_shape(conv2, upconv7)
    crop_conv2 = Cropping2D(cropping=(ch, cw))(conv2)
    up8 = concatenate([upconv7, crop_conv2], axis=concat_axis)

    conv8 = Conv2D(96, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(up8)
    conv8 = BatchNormalization()(conv8) if bn else conv8
    conv8 = Dropout(do)(conv8) if do else conv8
    conv8 = Conv2D(96, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(conv8)
    conv8 = BatchNormalization()(conv8) if bn else conv8
    upconv8 = Conv2DTranspose(64, (2, 2),
                              strides=(2, 2),
                              padding='same',
                              kernel_initializer=ki)(conv8)

    ch, cw = get_crop_shape(conv1, upconv8)
    crop_conv1 = Cropping2D(cropping=(ch, cw))(conv1)
    up9 = concatenate([upconv8, crop_conv1], axis=concat_axis)

    conv9 = Conv2D(64, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(up9)
    conv9 = BatchNormalization()(conv9) if bn else conv9
    conv9 = Conv2D(64, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(conv9)
    conv9 = BatchNormalization()(conv9) if bn else conv9
    ch, cw = get_pad_shape(conv9, conv1)
    pad_conv9 = ZeroPadding2D(padding=(ch, cw))(conv9)
    conv9 = Conv2D(1, (1, 1),
                   padding="same",
                   activation="sigmoid",
                   kernel_initializer=ki)(pad_conv9)

    model = Model(inputs=inputs, outputs=conv9)
    return model
Ejemplo n.º 16
0
# next I am going to build the most basic network possible just to make sure everything is working
# this single output node will predict my steering angle, which makes this
# a regression network, so I don't have to apply an activation function

from keras.models import Sequential
from keras.layers import Flatten, Dense, Activation, Lambda, Cropping2D
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D

# Building Net Architecture based on Nvidia Self Driving Car Neural Network
model = Sequential()
# Layer 1: Normalization
# Data preprocessing to normalize input images
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(180, 320, 3)))
# Crop2D layer used to remove top 40 pixels, bottom 30 pixels of image
model.add(Cropping2D(cropping=((40, 30), (0, 0))))
# Layer 2: Convolutional. 24 filters, 5 kernel, 5 stride, relu activation function
model.add(Conv2D(24, 5, 5, subsample=(2, 2), activation="relu"))
# Layer 3: Convolutional. 36 filters
model.add(Conv2D(36, 5, 5, subsample=(2, 2), activation="relu"))
# Layer 4: Convolutional. 48 filters
model.add(Conv2D(48, 5, 5, subsample=(2, 2), activation="relu"))
# Layer 5: Convolutional. 64 filters
model.add(Conv2D(64, 5, 5, activation="relu"))
# Layer 6: Convolutional. 64 filters
model.add(Conv2D(64, 5, 5, activation="relu"))
### Flatten output into a vector
model.add(Flatten())
# Layer 7: Fully Connected
model.add(Dense(100))
# Layer 8: Fully Connected
Ejemplo n.º 17
0
def FCN_8_helper():
    model = Sequential()
    model.add(Permute((1, 2, 3), input_shape=(512, 512, 3)))

    for l in Convblock(64, 1, 2):
        model.add(l)

    for l in Convblock(128, 2, 2):
        model.add(l)

    for l in Convblock(256, 3, 3):
        model.add(l)

    for l in Convblock(512, 4, 3):
        model.add(l)

    for l in Convblock(512, 5, 3):
        model.add(l)

    model.add(
        Convolution2D(4096,
                      kernel_size=(7, 7),
                      padding="same",
                      activation="relu",
                      name="fc6"))

    #Replacing fully connnected layers of VGG Net using convolutions
    model.add(
        Convolution2D(4096,
                      kernel_size=(1, 1),
                      padding="same",
                      activation="relu",
                      name="fc7"))

    # Gives the classifications scores for each of the 21 classes including background
    model.add(
        Convolution2D(21,
                      kernel_size=(1, 1),
                      padding="same",
                      activation="relu",
                      name="score_fr"))

    Conv_size = model.layers[-1].output_shape[2]  #16 if image size if 512
    #print(Conv_size)

    model.add(
        Deconvolution2D(21,
                        kernel_size=(4, 4),
                        strides=(2, 2),
                        padding="valid",
                        activation=None,
                        name="score2"))

    # O = ((I-K+2*P)/Stride)+1
    # O = Output dimesnion after convolution
    # I = Input dimnesion
    # K = kernel Size
    # P = Padding

    # I = (O-1)*Stride + K
    Deconv_size = model.layers[-1].output_shape[
        2]  #34 if image size is 512*512

    #print(Deconv_size)
    # 2 if image size is 512*512
    Extra = (Deconv_size - 2 * Conv_size)

    #print(Extra)

    #Cropping to get correct size
    model.add(Cropping2D(cropping=((0, Extra), (0, Extra))))

    return model
Ejemplo n.º 18
0
## Importing keras libraries for model architecture

from keras.models import Sequential, Model
from keras.layers.core import Flatten, Dense, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
from keras.layers import Lambda, Cropping2D,
from keras import optimizers, Input

##########################################################################
#### Model Architecture

inp = Input(shape=(160, 320, 3))  # Input layer
x = Cropping2D(cropping=((50, 20),
                         (0,
                          0)))(inp)  # Cropping irrelevant portion of the Image
x = Lambda(lambda x: (x / 255.0) - 0.5)(x)  # Images normalization

## 1st convolutional block of layers
x = Convolution2D(10, (5, 5), activation='relu',
                  kernel_initializer='normal')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Convolution2D(5, (1, 1), activation='relu', kernel_initializer='normal')(x)

## 2nd convolutional block of layers
x = Convolution2D(30, (5, 5), activation='relu',
                  kernel_initializer='normal')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Convolution2D(15, (1, 1), activation='relu',
                  kernel_initializer='normal')(x)
Ejemplo n.º 19
0
def main():
    samples = []
    # If we do different types of training data in different folders, grab all of it and put
    # into one list
    dirs = [
        dI for dI in os.listdir('./drive_data/')
        if os.path.isdir(os.path.join('./drive_data', dI))
    ]
    print("Number of Training Directories: ", len(dirs))
    for l in dirs:
        path = 'drive_data/' + l + '/driving_log.csv'
        print(path)
        with open(path) as csvfile:
            reader = csv.reader(csvfile)
            l = 0
            for line in reader:
                # samples.append(line)
                if line != ' ':
                    if abs(float(line[3])) < 0.85:
                        l += 1
                        if abs(float(line[3])) < 0.0005:
                            if l == 55:
                                samples.append(line)
                                l = 0
                        else:
                            if l % 2 == 0:
                                samples.append(line)

    print("Total Samples: ", len(samples))

    samplearray = np.array(samples)[:, 3].astype(np.float)
    print(samplearray)
    unique, counts = np.unique(samplearray, return_counts=True)
    n_classes = len(unique)
    print("Number of classes: ", n_classes)

    fig, ax = plt.subplots()
    n, bins, patches = ax.hist(samplearray,
                               n_classes,
                               normed=1,
                               edgecolor='black')
    ax.set_xlabel('Angles')
    ax.set_ylabel('Density')
    ax.set_title('Histogram of sample data')
    fig.tight_layout()
    plt.show()

    # Split the data into training and validation samples
    samples = shuffle(samples)
    train_samples, validation_samples = train_test_split(samples,
                                                         test_size=0.2)
    correction = 0.2

    def getImage(sample, item=0):
        source_path = sample[item]
        filename = source_path.split('/')[-1]
        if source_path.split('/')[0].replace(' ', '') == 'IMG':
            path = 'udacitydata'
        else:
            path = source_path.split('/')[-3]
        filepath = 'drive_data/' + path + '/IMG/' + filename
        img = cv2.imread(filepath, 1)
        rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        # imgplot = plt.imshow(rgb_img)
        # plt.show()
        return rgb_img

    # Our generator that grabs the images and steering angles and batching the set
    def generator(samples, batch_size=32):
        num_samples = len(samples)
        while 1:  # Loop forever so the generator never terminates
            samples = shuffle(samples)
            for offset in range(0, num_samples, batch_size):
                batch_samples = samples[offset:offset + batch_size]
                images = []
                angles = []
                for sample in batch_samples:
                    # create adjusted steering measurements for the side camera images
                    correction = 0.1  # this is a parameter to tune
                    steering_center = float(sample[3])
                    steering_left = steering_center + correction
                    steering_right = steering_center - correction

                    # read in images from center, left and right cameras
                    # Grab the filenames for each camera

                    img_center = getImage(sample, 0)
                    img_left = getImage(sample, 1)
                    img_right = getImage(sample, 2)

                    # add images and angles to data set
                    images.extend([img_center, img_left, img_right])
                    angles.extend(
                        [steering_center, steering_left, steering_right])

                    # add flipped image
                    images.append(cv2.flip(img_center, 1))
                    angles.append(steering_center * -1.0)

                X_train = np.array(images)
                y_train = np.array(angles)
                yield shuffle(X_train, y_train)

    #
    train_generator = generator(train_samples, batch_size=32)
    validation_generator = generator(validation_samples, batch_size=32)

    def resize_function(image):
        from keras.backend import tf as ktf
        # resized = ktf.image.resize_images(image, (35, 160))
        resized = image / 255.0 - 0.5
        return resized

    # If we load an existing model, reload it and use existing weights to continue training.
    print("file: ", args.weights)
    if args.weights != ' ':
        print("Continuing training from model.h5...")
        model = load_model(args.weights)
        adam = Adam(lr=0.00001)
        model.compile(loss='mse', optimizer=adam)
    else:
        print("Training from scratch...")
        model = Sequential()
        # Preprocess incoming data, centered around zero with small standard deviation
        model.add(
            Cropping2D(cropping=((60, 20), (5, 5)), input_shape=(160, 320, 3)))
        model.add(Lambda(resize_function))
        # Network based on the NVIDIA Model
        model.add(Convolution2D(24, 5, 5, subsample=(2, 2), activation='relu'))
        model.add(Dropout(0.5))
        model.add(Convolution2D(36, 5, 5, subsample=(2, 2), activation='relu'))
        model.add(Convolution2D(48, 5, 5, subsample=(2, 2), activation='relu'))
        model.add(Convolution2D(64, 3, 3, activation='relu'))
        model.add(Convolution2D(64, 3, 3, activation='relu'))
        model.add(Flatten())
        model.add(Dense(100))
        model.add(Dropout(0.5))
        model.add(Dense(50))
        model.add(Dense(10))
        model.add(Dense(1))
        model.compile(loss='mse', optimizer='adam')

    print(model.summary())
    history_object = model.fit_generator(
        train_generator,
        samples_per_epoch=len(train_samples * 4),
        validation_data=validation_generator,
        nb_val_samples=len(validation_samples),
        nb_epoch=args.epochs,
        verbose=1)
    model.save('model.h5')

    ### print the keys contained in the history object
    plt.ioff()
    print(history_object.history.keys())
    plt.plot(history_object.history['loss'])
    plt.plot(history_object.history['val_loss'])
    plt.title('model mean squared error loss')
    plt.ylabel('mean squared error loss')
    plt.xlabel('epoch')
    plt.legend(['training set', 'validation set'], loc='upper right')
    plt.show()
Ejemplo n.º 20
0
X_valid_gen, y_valid_gen = image_generator(X_validation, y_validation)
assert len(X_train_gen) == len(
    y_train_gen), "ERROR: Total training example size error".format(
        len(X_train_gen), len(y_train_gen))
assert len(X_valid_gen) == len(
    y_valid_gen), "ERROR: Total validation example size error".format(
        len(X_valid_gen), len(y_valid_gen))
print("Total training examples: {}\nTotal validation examples: {}".format(
    len(X_train_gen), len(X_valid_gen)))
show_steering(y_train_gen, y_valid_gen)

# Model
model = Sequential()
model.add(
    Cropping2D(cropping=((75, 25), (0, 0)),
               input_shape=(X_train_gen[0].shape[0], X_train_gen[0].shape[1],
                            X_train_gen[0].shape[2]),
               data_format="channels_last"))
model.add(
    Lambda(resize_img, input_shape=(160, 320, 3), output_shape=(60, 120, 3)))

model.add(Conv2D(3, (1, 1), padding='same'))
model.add(ELU())
model.add(BatchNormalization())

model.add(Conv2D(16, (5, 5), strides=(2, 2), padding='same'))
model.add(ELU())
model.add(BatchNormalization())

model.add(Conv2D(32, (5, 5), strides=(2, 2), padding='same'))
model.add(ELU())
model.add(BatchNormalization())
Ejemplo n.º 21
0

train_generator = generator(train_samples, batch_size=32)
validation_generator = generator(validation_samples, batch_size=32)

from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
import matplotlib.pyplot as plt
from keras import backend as K
#bulid model wit keras
model = Sequential()
model.add(Lambda(lambda x: x / 127.5 - 1.0,
                 input_shape=(160, 320, 3)))  #normalize use lambda
model.add(Cropping2D(cropping=((70, 25), (0, 0))))  #Cropping the picture
model.add(Convolution2D(24, 5, 5, subsample=(2, 2),
                        activation="relu"))  #Conv1 layer
model.add(Convolution2D(36, 5, 5, subsample=(2, 2),
                        activation="relu"))  #Conv2 layer
model.add(Convolution2D(48, 5, 5, subsample=(2, 2),
                        activation="relu"))  #Conv3 layer
model.add(Convolution2D(64, 3, 3, activation="relu"))  #Conv4
model.add(Convolution2D(64, 3, 3, activation="relu"))  #Conv5
model.add(Flatten())  #Flatten layer
model.add(Dense(100))  #Full connect layer
model.add(Dense(50))  #Full connect layer
model.add(Dense(10))  #Full connect layer
model.add(Dense(1))  #Full connect layer

model.compile(loss='mse', optimizer='adam')
def _adjust_block(p, ip, filters, weight_decay=5e-5, id=None):
    '''
    Adjusts the input `p` to match the shape of the `input`
    or situations where the output number of filters needs to
    be changed
    # Arguments:
        p: input tensor which needs to be modified
        ip: input tensor whose shape needs to be matched
        filters: number of output filters to be matched
        weight_decay: l2 regularization weight
        id: string id
    # Returns:
        an adjusted Keras tensor
    '''
    channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
    img_dim = 2 if K.image_data_format() == 'channels_first' else -2

    with K.name_scope('adjust_block'):
        if p is None:
            p = ip

        elif p._keras_shape[img_dim] != ip._keras_shape[img_dim]:
            with K.name_scope('adjust_reduction_block_%s' % id):
                p = Activation('relu', name='adjust_relu_1_%s' % id)(p)

                p1 = AveragePooling2D((1, 1),
                                      strides=(2, 2),
                                      padding='valid',
                                      name='adjust_avg_pool_1_%s' % id)(p)
                p1 = Conv2D(filters // 2, (1, 1),
                            padding='same',
                            use_bias=False,
                            kernel_regularizer=l2(weight_decay),
                            name='adjust_conv_1_%s' % id,
                            kernel_initializer='he_normal')(p1)

                p2 = ZeroPadding2D(padding=((0, 1), (0, 1)))(p)
                p2 = Cropping2D(cropping=((1, 0), (1, 0)))(p2)
                p2 = AveragePooling2D((1, 1),
                                      strides=(2, 2),
                                      padding='valid',
                                      name='adjust_avg_pool_2_%s' % id)(p2)
                p2 = Conv2D(filters // 2, (1, 1),
                            padding='same',
                            use_bias=False,
                            kernel_regularizer=l2(weight_decay),
                            name='adjust_conv_2_%s' % id,
                            kernel_initializer='he_normal')(p2)

                p = concatenate([p1, p2], axis=channel_dim)
                p = BatchNormalization(axis=channel_dim,
                                       momentum=_BN_DECAY,
                                       epsilon=_BN_EPSILON,
                                       name='adjust_bn_%s' % id)(p)

        elif p._keras_shape[channel_dim] != filters:
            with K.name_scope('adjust_projection_block_%s' % id):
                p = Activation('relu')(p)
                p = Conv2D(filters, (1, 1),
                           strides=(1, 1),
                           padding='same',
                           name='adjust_conv_projection_%s' % id,
                           use_bias=False,
                           kernel_regularizer=l2(weight_decay),
                           kernel_initializer='he_normal')(p)
                p = BatchNormalization(axis=channel_dim,
                                       momentum=_BN_DECAY,
                                       epsilon=_BN_EPSILON,
                                       name='adjust_bn_%s' % id)(p)
    return p
Ejemplo n.º 23
0
    def generator(self):

        if self.G:
            return self.G
        ''''''
        #Style FC, I only used 2 fully connected layers instead of 8 for faster training
        # inp_s = Input(shape = [im_size, im_size, 3])                        # noise
        vgg = VGG16(include_top=False, input_shape=(256, 256, 3))  #(inp_s)
        vgg_output = vgg.get_layer('block5_pool').output
        vgg.trainable = False

        flat = Flatten()(vgg_output)
        ''''''

        sty = Dense(512, kernel_initializer='he_normal')(flat)
        sty = LeakyReLU(0.1)(sty)
        sty = Dense(512, kernel_initializer='he_normal')(sty)
        sty = LeakyReLU(0.1)(sty)
        sty = Dense(512, kernel_initializer='he_normal')(sty)
        sty = LeakyReLU(0.1)(sty)
        sty = Dense(512, kernel_initializer='he_normal')(sty)
        sty = LeakyReLU(0.1)(sty)
        sty = Dense(512, kernel_initializer='he_normal')(sty)
        sty = LeakyReLU(0.1)(sty)
        sty = Dense(512, kernel_initializer='he_normal')(sty)
        sty = LeakyReLU(0.1)(sty)
        sty = Dense(512, kernel_initializer='he_normal')(sty)
        sty = LeakyReLU(0.1)(sty)
        sty = Dense(512, kernel_initializer='he_normal')(sty)
        sty = LeakyReLU(0.1)(sty)

        #Get the noise image and crop for each size
        inp_n = Input(shape=[im_size, im_size, 1])  # noiseImage
        noi = [Activation('linear')(inp_n)]
        curr_size = im_size
        while curr_size > 4:
            curr_size = int(curr_size / 2)
            noi.append(Cropping2D(int(curr_size / 2))(noi[-1]))

        #Here do the actual generation stuff                        # 실제 생성 작업
        inp = Input(shape=[1])
        x = Dense(4 * 4 * 512, kernel_initializer='he_normal')(inp)
        x = Reshape([4, 4, 512])(x)
        x = g_block(x, sty, noi[-1], 512, u=False)

        if (im_size >= 1024):
            x = g_block(x, sty, noi[7], 512)  # Size / 64
        if (im_size >= 512):
            x = g_block(x, sty, noi[6], 384)  # Size / 64
        if (im_size >= 256):
            x = g_block(x, sty, noi[5], 256)  # Size / 32
        if (im_size >= 128):
            x = g_block(x, sty, noi[4], 192)  # Size / 16
        if (im_size >= 64):
            x = g_block(x, sty, noi[3], 128)  # Size / 8

        x = g_block(x, sty, noi[2], 64)  # Size / 4
        x = g_block(x, sty, noi[1], 32)  # Size / 2
        x = g_block(x, sty, noi[0], 16)  # Size

        x = Conv2D(filters=3,
                   kernel_size=1,
                   padding='same',
                   activation='sigmoid')(x)

        self.G = Model(inputs=[vgg.input, inp_n, inp], outputs=x)

        return self.G
Ejemplo n.º 24
0
                         steps_per_epoch=5,
                         epochs=EPOCHS,
                         validation_data=valid_generate,
                         validation_steps=100,
                         callbacks=[checkpoint],
                         max_q_size=1,
                         pickle_safe=True)
    model_save(model_, MODEL_NAME)
# Create new model if flag is set
else:
    ##################### Build Model Architecture ###########################
    # Crop 50 pixels from the top of the image and 20 from the bottom
    Model_ = Sequential()
    Model_.add(
        Cropping2D(data_format="channels_last",
                   cropping=((50, 20), (0, 0)),
                   input_shape=(160, 320, 3)))
    Model_.add(Lambda(resize_image))
    Model_.add(Lambda(lambda x: x / 255 - 0.5))

    ################## Block-1 ########################
    Model_.add(
        Conv2D(24, (5, 5),
               kernel_initializer='he_normal',
               strides=(2, 2),
               padding='valid'))
    Model_.add(BatchNormalization())
    Model_.add(ELU())
    ################# Block-2 ########################
    Model_.add(
        Conv2D(36, (5, 5),
Ejemplo n.º 25
0
# Set our batch size
batch_size = 32

# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=batch_size)
validation_generator = generator(validation_samples, batch_size=batch_size)

#Nvidia CNN Model
model = Sequential()

# Preprocess incoming data, centered around zero with small standard deviation
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160, 320, 3)))

# trim image to only see section with road
model.add(Cropping2D(cropping=((50, 20), (0, 0))))

#layer 1- Convolution, no of filters- 24, filter size= 5x5, stride= 2x2
model.add(Conv2D(24, (5, 5), strides=(2, 2)))
model.add(Activation('elu'))

#layer 2- Convolution, no of filters- 36, filter size= 5x5, stride= 2x2
model.add(Conv2D(36, (5, 5), strides=(2, 2)))
model.add(Activation('elu'))

#layer 3- Convolution, no of filters- 48, filter size= 5x5, stride= 2x2
model.add(Conv2D(48, (5, 5), strides=(2, 2)))
model.add(Activation('elu'))

#layer 4- Convolution, no of filters- 64, filter size= 3x3, stride= 1x1
model.add(Conv2D(64, (3, 3)))
Ejemplo n.º 26
0
def LenetTest():
    """The Lenet from class videos. not use it after test"""
    images = []
    measurements = []
    path = './data/IMG/'
    lines = []
    with open(path + '/driving_log.csv') as csvfile:
        reader = csv.reader(csvfile)
        for line in reader:
            lines.append(line)
    for line in lines:
        image_center = cv2.imread(path + line[0].split('/')[-1])
        image_left = cv2.imread(path + line[0].split('/')[-1])
        image_right = cv2.imread(path + line[0].split('/')[-1])
        images.append(image_center)
        images.append(image_left)
        images.append(image_right)
        measurement_center = float(line[3])
        correction = 0.2
        measurement_left = measurement_center + correction
        measurement_right = measurement_center - correction
        measurements.append(measurement_center)
        measurements.append(measurement_left)
        measurements.append(measurement_right)
    #Flipping Images And Steering Measurements
    augmented_images, augmented_measurements = [], []
    for image, measurement in zip(images, measurements):
        augmented_images.append(image)
        augmented_images.append(cv2.flip(image, 1))
        augmented_measurements.append(measurement)
        augmented_measurements.append(measurement * (-1.0))
    # X_train = np.array(images)
    # y_train = np.array(measurements)
    X_train = np.array(augmented_images)
    y_train = np.array(augmented_measurements)
    print(X_train.shape)
    print(y_train.shape)
    #train the data
    # model = Sequential()
    # model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160, 320, 3))) #normalizing the data and mean centering the data
    # model.add(Flatten())
    # model.add(Dense(1))
    model = Sequential()
    model.add(Lambda(
        lambda x: (x / 255.0) - 0.5,
        input_shape=(160, 320,
                     3)))  #normalizing the data and mean centering the data
    model.add(Cropping2D(
        cropping=((70, 25), (0,
                             0))))  #remove the top 70 pixels and the bottom 25
    model.add(Convolution2D(6, 5, 5, activation="relu"))
    model.add(MaxPooling2D())
    model.add(Convolution2D(6, 5, 5, activation="relu"))
    model.add(MaxPooling2D())
    model.add(Flatten())
    model.add(Dense(120))
    model.add(Dense(84))
    model.add(Dense(1))

    model.compile(loss='mse', optimizer='adam')
    model.fit(X_train, y_train, validation_split=0.2, shuffle=True,
              nb_epoch=2)  #default epoch= 10
    model.save('model.h5')
Ejemplo n.º 27
0
steering_angles_left_flip, steering_angles_right_flip), axis=0)
#shuffle the data prior to concatenation
dataset_inputs, dataset_labels = shuffle(dataset_inputs, dataset_labels)
print("total number of inputs and labels is:", dataset_inputs.shape,
      dataset_labels.shape)
#crop values for the input images keeping the same suggested ratio in the lesson
crop_t = int((50 / 160) * image_size[0])
crop_b = int((20 / 160) * image_size[1])
#define the architecture of the model.
#similar to the Nvidia model in that it is Normalisation followed by convolutions
#followed by flat layers and a single output node
model = Sequential()
#normalisation to a mean of 0 by dividing with pixel value 255 and subtracting 0.5
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=image_size_input))
#crop the input of this layer by crop_t from the top and crop_b from the bottom
model.add(Cropping2D(cropping=((crop_t, crop_b), (0, 0))))
#convolution layer with input shape 60,160,3
#Traffic Sign Classifier model adapted to generally flow like the Nvidia model
#normalisation>convolutions>flat>single output
#convolutional layer, filters=32@3x3 stride, border mode is set to valid
model.add(Convolution2D(32, 3, 3, border_mode='valid'))
#ELU activation
model.add(ELU())
#maxpool layer with 2x2 filter
model.add(MaxPooling2D(pool_size=(2, 2)))
#convolutional layer, filters=32@3x3 stride
model.add(Convolution2D(32, 3, 3))
#ELU activation
model.add(ELU())
#maxpool layer with 2x2 filter
model.add(MaxPooling2D(pool_size=(2, 2)))
Ejemplo n.º 28
0
def get_unet(img_shape=None, first5=True):
    inputs = Input(shape=img_shape)
    concat_axis = -1

    if first5: filters = 5
    else: filters = 3
    conv1 = conv_bn_relu(64, filters, inputs)  ##batch normiliztion
    conv1 = conv_bn_relu(64, filters, conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    conv2 = conv_bn_relu(96, 3, pool1)
    conv2 = conv_bn_relu(96, 3, conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = conv_bn_relu(128, 3, pool2)
    conv3 = conv_bn_relu(128, 3, conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = conv_bn_relu(256, 3, pool3)
    conv4 = conv_bn_relu(256, 4, conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = conv_bn_relu(512, 3, pool4)
    conv5 = conv_bn_relu(512, 3, conv5)

    up_conv5 = UpSampling2D(size=(2, 2))(conv5)
    ch, cw = get_crop_shape(conv4, up_conv5)
    crop_conv4 = Cropping2D(cropping=(ch, cw))(conv4)
    up6 = concatenate([up_conv5, crop_conv4], axis=concat_axis)
    conv6 = conv_bn_relu(256, 3, up6)
    conv6 = conv_bn_relu(256, 3, conv6)

    up_conv6 = UpSampling2D(size=(2, 2))(conv6)
    ch, cw = get_crop_shape(conv3, up_conv6)
    crop_conv3 = Cropping2D(cropping=(ch, cw))(conv3)
    up7 = concatenate([up_conv6, crop_conv3], axis=concat_axis)
    conv7 = conv_bn_relu(128, 3, up7)
    conv7 = conv_bn_relu(128, 3, conv7)

    up_conv7 = UpSampling2D(size=(2, 2))(conv7)
    ch, cw = get_crop_shape(conv2, up_conv7)
    crop_conv2 = Cropping2D(cropping=(ch, cw))(conv2)
    up8 = concatenate([up_conv7, crop_conv2], axis=concat_axis)
    conv8 = conv_bn_relu(96, 3, up8)
    conv8 = conv_bn_relu(96, 3, conv8)

    up_conv8 = UpSampling2D(size=(2, 2))(conv8)
    ch, cw = get_crop_shape(conv1, up_conv8)
    crop_conv1 = Cropping2D(cropping=(ch, cw))(conv1)
    up9 = concatenate([up_conv8, crop_conv1], axis=concat_axis)
    conv9 = conv_bn_relu(64, 3, up9)
    conv9 = conv_bn_relu(64, 3, conv9)

    ch, cw = get_crop_shape(inputs, conv9)
    conv9 = ZeroPadding2D(padding=(ch, cw))(conv9)
    conv10 = Conv2D(1, 1, activation='sigmoid',
                    padding='same')(conv9)  #, kernel_initializer='he_normal'
    model = Model(inputs=inputs, outputs=conv10)  ###important model
    model.compile(optimizer=Adam(lr=(2e-4)),
                  loss=dice_coef_loss)  ###important model

    return model
Ejemplo n.º 29
0
def get_decoder_try(embedding,
                    skip_connections,
                    n_filters=16,
                    dropout=0.5,
                    batchnorm=True,
                    channels_out=3,
                    out_clf=5):
    # load skip connections with shape: (16, 128), (32, 128), (64, 128), (128, 64), (256, 32), (512, 16))
    c5, c4, c3, c2, c1, c0, in_enc = skip_connections[0], skip_connections[1], skip_connections[2], skip_connections[3], \
                                     skip_connections[4], skip_connections[5], skip_connections[-1]

    # reshape to image
    x = layers.Reshape((8, 8, 32))(embedding)
    print("Decoder input shape:", embedding.shape, "reshaped:", x.shape)

    # extra expansive path
    x = Conv2DTranspose(n_filters * 8, (3, 3),
                        strides=(2, 2),
                        padding='same',
                        name='u4')(x)  # 16
    x = concatenate([x, c5])
    x = Dropout(dropout)(x)
    x = conv2d_block(x,
                     n_filters=n_filters * 8,
                     kernel_size=3,
                     batchnorm=batchnorm,
                     name='c4')

    x = Conv2DTranspose(n_filters * 8, (3, 3),
                        strides=(2, 2),
                        padding='same',
                        name='u5')(x)  # 32
    x = concatenate([x, c4])
    x = Dropout(dropout)(x)
    x = conv2d_block(x,
                     n_filters=n_filters * 8,
                     kernel_size=3,
                     batchnorm=batchnorm,
                     name='c5')

    # expansive path:
    u6 = Conv2DTranspose(n_filters * 8, (3, 3),
                         strides=(2, 2),
                         padding='same',
                         name='u6')(x)  # 64
    u6 = concatenate([u6, c3])
    u6 = Dropout(dropout)(u6)
    c6 = conv2d_block(u6,
                      n_filters=n_filters * 8,
                      kernel_size=3,
                      batchnorm=batchnorm,
                      name='c6')

    u7 = Conv2DTranspose(n_filters * 4, (3, 3),
                         strides=(2, 2),
                         padding='same',
                         name='u7')(c6)  # 128
    u7 = concatenate([u7, c2])
    u7 = Dropout(dropout)(u7)
    c7 = conv2d_block(u7,
                      n_filters=n_filters * 4,
                      kernel_size=3,
                      batchnorm=batchnorm,
                      name='c7')

    u8 = Conv2DTranspose(n_filters * 2, (3, 3),
                         strides=(2, 2),
                         padding='same',
                         name='u8')(c7)  # 256
    u8 = concatenate([u8, c1])
    u8 = Dropout(dropout)(u8)
    c8 = conv2d_block(u8,
                      n_filters=n_filters * 2,
                      kernel_size=3,
                      batchnorm=batchnorm,
                      name='c8')

    u9 = Conv2DTranspose(n_filters * 1, (3, 3),
                         strides=(2, 2),
                         padding='same',
                         name='u9')(c8)  # 512
    u9 = concatenate([u9, c0])
    u9 = Dropout(dropout)(u9)
    c9 = conv2d_block(u9,
                      n_filters=n_filters * 1,
                      kernel_size=3,
                      batchnorm=batchnorm,
                      name='c9')

    # crop to original size
    c9 = Cropping2D(cropping=((8, 9), (38, 38)))(c9)
    c9 = concatenate([c9, in_enc], name='c9_concat')  # new concat with input

    # classification layer
    clf = conv2d_block(c9,
                       n_filters=out_clf,
                       kernel_size=3,
                       batchnorm=batchnorm,
                       name='clf_layer')
    c9 = concatenate([c9, clf])

    # output regression layer
    outputs = conv2d_block(c9,
                           n_filters=channels_out,
                           kernel_size=3,
                           batchnorm=batchnorm,
                           name='out_dec')

    return Model(inputs=[embedding, c5, c4, c3, c2, c1, c0, in_enc],
                 outputs=[outputs, clf],
                 name='decoder')
Ejemplo n.º 30
0
def UpSampling2D_BN_Act(kSize, crop, outPad, concateInp, inp):
    C = UpSampling2D(size=kSize)(inp)
    C_Crop = Cropping2D(cropping=crop)(C)
    C_Zpad = ZeroPadding2D(padding=outPad)(C_Crop)
    C_Con = concatenate([C_Zpad, concateInp], axis=-1)
    return C_Con