예제 #1
0
# memory
use_generators=False

# Hyperparameters
validation_size = 0.2 # ratio of images used as validation set
correction=0.3 # correction applied to the left and right camera images
batch_size=32
dropout = 0.2 #probability of droping out
epochs = 5

#data laoder, used to load all the sample images and labels (steering angles)
data_loader = DataLoader()

if use_generators:
    X_train, X_valid, y_train, y_valid = data_loader.load_samples_v1(validation_size, correction)
    training_generator = data_loader.generator(X_train, y_train, batch_size=batch_size)
    validation_generator = data_loader.generator(X_valid, y_valid, batch_size=batch_size)
    print("Trainig samples:", len(X_train)) 
    print("Validation samples:", len(X_valid))
else:
    X_train, y_train = data_loader.load_samples_v2(correction)

#Creates the model
model = Sequential()
model.add(Lambda(lambda x: x/127.5 -1, input_shape=(65,320,3))) # Add normalization layer
model.add(Conv2D(24,(5,5), activation="relu", strides=(2,2))) # Conv layer with kernel size 5x5 + RELU activation layer. Outputs 24 feature maps of 31x158
model.add(Dropout(dropout))
model.add(Conv2D(36,(5,5), activation="relu", strides=(2,2))) # Conv layer with kernel size 5x5 + RELU activation layer. Outputs 36 feature maps of 14x77
model.add(Dropout(dropout))
model.add(Conv2D(48,(5,5), activation="relu", strides=(2,2))) # Conv layer with kernel size 5x5 + RELU activation layer. Outputs 48 feature maps of 5x37
model.add(Dropout(dropout))
예제 #2
0
def main(_):

    print('Configuration = (PP: {}, R: {}, C: {}, B: {}, RT: {})'.format(
        FLAGS.preprocess, 
        FLAGS.regenerate, 
        FLAGS.clahe,
        FLAGS.blur,
        FLAGS.random_transform
    ))

    data_loader = DataLoader(TRAIN_FILE, LOG_FILE, IMG_DIR, 
                             angle_correction = FLAGS.angle_correction,
                             mirror_min_angle = FLAGS.mirror_min_angle,
                             normalize_factor = FLAGS.normalize_factor,
                             normalize_bins = FLAGS.normalize_bins)

    images, measurements = data_loader.load_dataset(regenerate = FLAGS.regenerate)

    print('Total samples: {}'.format(images.shape[0]))

    # Split in training and validation
    X_train, X_valid, Y_train, Y_valid = data_loader.split_train_test(images, measurements)

    print('Training samples: {}'.format(X_train.shape[0]))
    print('Validation samples: {}'.format(X_valid.shape[0]))

    plots.plot_distribution(Y_train[:,0], 'Training set distribution', save_path = os.path.join('images', 'train_distribution'))
    plots.plot_distribution(Y_valid[:,0], 'Validation set distribution', save_path = os.path.join('images', 'valid_distribution'))

    train_generator = data_loader.generator(X_train, Y_train, FLAGS.batch_size, 
                                            preprocess = FLAGS.preprocess, 
                                            random_transform = FLAGS.random_transform)
    valid_generator = data_loader.generator(X_valid, Y_valid, FLAGS.batch_size, 
                                            preprocess = FLAGS.preprocess, 
                                            random_transform = False)

    # The image processor gives us the input shape for the model (e.g. after cropping and resizing)
    model = build_model(ip.output_shape())

    print(model.summary())

    model.compile(optimizer = Adam(lr = FLAGS.learning_rate), loss = FLAGS.loss)

    date_time_str = time.strftime('%Y%m%d-%H%M%S')

    callbacks = [
        # To be used with tensorboard, creates the logs for the losses in the logs dir
        TensorBoard(log_dir = os.path.join(LOGS_DIR, date_time_str), 
                    histogram_freq = 0,
                    write_graph = False,
                    write_images = False),
        # Early stopping guard
        EarlyStopping(monitor='val_loss', 
                    patience = 3,
                    verbose = 0, 
                    mode = 'min')
    ]
    
    model_name = 'model_{}'.format(date_time_str)

    print('Training {} on {} samples (EP: {}, BS: {}, LR: {}, DO: {}, BN: {}, A: {}, L: {})...'.format(
        model_name, X_train.shape[0], FLAGS.epochs, FLAGS.batch_size, FLAGS.learning_rate, FLAGS.dropout, 
        '{}'.format(FLAGS.batch_norm if FLAGS.batch_norm > 0 else 'OFF'),
        FLAGS.activation, FLAGS.loss
    ))
    
    # Train the model
    history = model.fit_generator(train_generator, 
                                  nb_epoch = FLAGS.epochs,
                                  samples_per_epoch = X_train.shape[0],
                                  validation_data = valid_generator,
                                  nb_val_samples = X_valid.shape[0],
                                  callbacks = callbacks)

    model.save(os.path.join(MODELS_DIR, model_name + '.h5'))

    plots.plot_history(model_name, history)