Exemplo n.º 1
0
def go(model_name, epochs=50, batch_size=128, inputs='./log/*.jpg', limit=None, aug_mult=1, aug_perc=0.0, resume=False):

    print('working on model', model_name)

    '''
    modify config.json to select the model to train.
    '''
    if conf.model_selection == "nvidia_transposed_inputs":
        model = models.get_nvidia_model()
    elif conf.model_selection == "nvidia_standard_inputs":
        model = models.get_nvidia_model2()
    elif conf.model_selection == "simple":
        model = models.get_simple_model()
    else:
        model = models.get_nvidia_model()

    if resume:
        print("resuming training of", model_name)
        model = keras.models.load_model(model_name)
        model.ch_order = 'channel_last'

    transposeImages = (model.ch_order == 'channel_first')
    
    callbacks = [
        #keras.callbacks.EarlyStopping(monitor='val_loss', patience=conf.training_patience, verbose=0),
        keras.callbacks.ModelCheckpoint(model_name, monitor='val_loss', save_best_only=True, verbose=0),
    ]


    #Train on session images
    train_generator, validation_generator, n_train, n_val = make_generators(inputs, limit=limit, batch_size=batch_size, aug_perc=aug_perc, transposeImages=transposeImages)

    history = model.fit_generator(train_generator, 
        samples_per_epoch = n_train,
        validation_data = validation_generator,
        nb_val_samples = n_val,
        nb_epoch=epochs,
        verbose=1,
        callbacks=callbacks)
    
    try:
        if do_plot:
            # summarize history for loss
            plt.plot(history.history['loss'])
            plt.plot(history.history['val_loss'])
            plt.title('model loss')
            plt.ylabel('loss')
            plt.xlabel('epoch')
            plt.legend(['train', 'test'], loc='upper left')
            plt.savefig('loss.png')
    except:
        print("problems with loss graph")
Exemplo n.º 2
0
def go(model_name, epochs=50, inputs='./log/*.jpg', limit=None):

    print('working on model', model_name)
    '''
    modify config.json to select the model to train.
    '''
    model = models.get_nvidia_model(conf.num_outputs)
    '''
    display layer summary and weights info
    '''
    #models.show_model_summary(model)

    callbacks = [
        keras.callbacks.EarlyStopping(monitor='val_loss',
                                      patience=conf.training_patience,
                                      verbose=0),
        keras.callbacks.ModelCheckpoint(model_name,
                                        monitor='val_loss',
                                        save_best_only=True,
                                        verbose=0),
    ]

    batch_size = conf.training_batch_size

    #Train on session images
    train_generator, validation_generator, n_train, n_val = make_generators(
        inputs, limit=limit, batch_size=batch_size)

    if n_train == 0:
        print('no training data found')
        return

    steps_per_epoch = n_train // batch_size
    validation_steps = n_val // batch_size

    print("steps_per_epoch", steps_per_epoch, "validation_steps",
          validation_steps)

    history = model.fit_generator(train_generator,
                                  steps_per_epoch=steps_per_epoch,
                                  validation_data=validation_generator,
                                  validation_steps=validation_steps,
                                  epochs=epochs,
                                  verbose=1,
                                  callbacks=callbacks)

    try:
        if do_plot:
            # summarize history for loss
            plt.plot(history.history['loss'])
            plt.plot(history.history['val_loss'])
            plt.title('model loss')
            plt.ylabel('loss')
            plt.xlabel('epoch')
            plt.legend(['train', 'test'], loc='upper left')
            plt.savefig(model_name + 'loss.png')
            plt.show()
    except:
        print("problems with loss graph")
Exemplo n.º 3
0
def go(model_name, epochs=50, inputs='./log/*.jpg', limit=None, aug_mult=1, aug_perc=0.0):

    print('working on model', model_name)

    '''
    modify config.json to select the model to train.
    '''
    if conf.model_selection == "nvidia_transposed_inputs":
        model = models.get_nvidia_model()
    elif conf.model_selection == "nvidia_standard_inputs":
        model = models.get_nvidia_model_sw()
    else:
        model = models.get_nvidia_model()

    transposeImages = (model.ch_order == 'channel_first')
    
    callbacks = [
        keras.callbacks.EarlyStopping(monitor='val_loss', patience=conf.training_patience, verbose=0),
        keras.callbacks.ModelCheckpoint(model_name + "_best", monitor='val_loss', save_best_only=True, verbose=0),
    ]
    
    batch_size = conf.training_batch_size

    #Train on session images
    X, Y = load_dataset(inputs, limit=limit, transposeImages=transposeImages, augmentMult=aug_mult, aug_perc=aug_perc)
    history = model.fit(X,Y, nb_epoch=epochs, batch_size=batch_size, validation_split=conf.training_validation_split, callbacks=callbacks)

    model.save(model_name)

    
    try:
        if do_plot:
            # summarize history for loss
            plt.plot(history.history['loss'])
            plt.plot(history.history['val_loss'])
            plt.title('model loss')
            plt.ylabel('loss')
            plt.xlabel('epoch')
            plt.legend(['train', 'test'], loc='upper left')
            plt.savefig('loss.png')
    except:
        print("problems with loss graph")
Exemplo n.º 4
0
def go(model_name, epochs=50, inputs="./log/*.jpg", limit=None):

    print("working on model", model_name)
    """
    modify config.json to select the model to train.
    """
    model = models.get_nvidia_model(conf.num_outputs)
    """
    display layer summary and weights info
    """
    # models.show_model_summary(model)

    callbacks = [
        keras.callbacks.EarlyStopping(monitor="val_loss",
                                      patience=conf.training_patience,
                                      verbose=0),
        keras.callbacks.ModelCheckpoint(model_name,
                                        monitor="val_loss",
                                        save_best_only=True,
                                        verbose=0),
    ]

    batch_size = conf.training_batch_size

    # Train on session images
    train_generator, validation_generator, n_train, n_val = make_generators(
        inputs, limit=limit, batch_size=batch_size)

    if n_train == 0:
        print("no training data found")
        return

    steps_per_epoch = n_train // batch_size
    validation_steps = n_val // batch_size

    print("steps_per_epoch", steps_per_epoch, "validation_steps",
          validation_steps)

    history = model.fit_generator(
        train_generator,
        steps_per_epoch=steps_per_epoch,
        validation_data=validation_generator,
        validation_steps=validation_steps,
        epochs=epochs,
        verbose=1,
        callbacks=callbacks,
    )

    try:
        if do_plot:
            # summarize history for loss
            plt.plot(history.history["loss"])
            plt.plot(history.history["val_loss"])
            plt.title("model loss")
            plt.ylabel("loss")
            plt.xlabel("epoch")
            plt.legend(["train", "test"], loc="upper left")
            plt.savefig(model_name + "loss.png")
            plt.show()
    except:  # noqa: E722, B001
        print("problems with loss graph")
                    type=str,
                    default='model.h5',
                    help='Name for the output model H5 file.')
args = parser.parse_args()

driving_log_path = os.path.join(args.basedir, args.datadir, driving_log_fname)
driving_data_dir = os.path.join(args.basedir, args.datadir,
                                driving_data_subdir)
max_augmentations = args.augmentations

print("driving_log_path = '%s'" % driving_log_path)
print("driving_data_dir = '%s'" % driving_data_dir)
print("max_augmentations = %s" % max_augmentations)

# Load the Keras model and print its summary
model = get_nvidia_model()
print(model.summary())

# Read in the drive log csv
lines = []
with open(driving_log_path) as csvfile:
    reader = csv.reader(csvfile)
    header_line = next(reader)
    for line in reader:
        lines.append(line)

# Load the training data and augment it (by using both left, center, and right cameras and flipping images vertically)
images = []
measurements = []
for j, line in enumerate(lines):
    correction = 0.2  # parameter to offset the steering for left/right camera