Пример #1
0
    if summary:
        model.summary()

    return model


if __name__ == '__main__':

    # split udacity csv data into training and validation
    train_data, val_data = split_train_val(csv_driving_data='data/driving_log.csv')

    # get network model and compile it (default Adam opt)
    nvidia_net = get_nvidia_model(summary=True)
    nvidia_net.compile(optimizer='adam', loss='mse')

    # json dump of model architecture
    with open('logs/model.json', 'w') as f:
        f.write(nvidia_net.to_json())

    # define callbacks to save history and weights
    checkpointer = ModelCheckpoint('checkpoints/weights.{epoch:02d}-{val_loss:.3f}.hdf5')
    logger = CSVLogger(filename='logs/history.csv')

    # start the training
    nvidia_net.fit_generator(generator=generate_data_batch(train_data, augment_data=True, bias=CONFIG['bias']),
                         samples_per_epoch=300*CONFIG['batchsize'],
                         nb_epoch=50,
                         validation_data=generate_data_batch(val_data, augment_data=False, bias=1.0),
                         nb_val_samples=100*CONFIG['batchsize'],
                         callbacks=[checkpointer, logger])
    if summary:
        model.summary()

    return model


if __name__ == '__main__':

    # split udacity csv data into training and validation
    train_data, val_data = split_train_val(csv_driving_data='data/driving_log.csv')

    # get network model and compile it (default Adam opt)
    nvidia_net = get_nvidia_model(summary=True)
    nvidia_net.compile(optimizer='adam', loss='mse')

    # json dump of model architecture
    with open('logs/model.json', 'w') as f:
        f.write(nvidia_net.to_json())

    # define callbacks to save history and weights
    checkpointer = ModelCheckpoint('checkpoints/weights.{epoch:02d}-{val_loss:.3f}.hdf5')
    logger = CSVLogger(filename='logs/history.csv')

    # start the training
    nvidia_net.fit_generator(generator=generate_data_batch(train_data, augment_data=True, bias=CONFIG['bias']),
                         samples_per_epoch=300*CONFIG['batchsize'],
                         nb_epoch=50,
                         validation_data=generate_data_batch(val_data, augment_data=False, bias=1.0),
                         nb_val_samples=100*CONFIG['batchsize'],
callbacks=[checkpointer, logger])
Пример #3
0
    x = Dropout(0.2)(x)
    x = Conv2D(48, (5, 5), strides=(2, 2))(x)
    x = ELU()(x)
    x = Dropout(0.2)(x)
    x = Conv2D(64, (3, 3))(x)
    x = ELU()(x)
    x = Dropout(0.2)(x)
    x = Conv2D(64, (3, 3))(x)
    x = ELU()(x)
    x = Dropout(0.2)(x)
    x = Flatten()(x)
    x = Dense(100)(x)
    x = ELU()(x)
    x = Dense(10)(x)
    x = ELU()(x)
    out = Dense(1)(x)
    model = Model(inputs=input_frame, outputs=out)
    model.compile(optimizer='adam', loss='mse')
    model.summary()
    return model


if __name__ == '__main__':
    train, test = split_train_val('/Users/dwang/self-driving-car/project_3_behavioral_cloning/data/driving_log.csv')
    model = get_model()
    model.fit_generator(generator=generate_data_batch(train, augment_data=True, bias=BIAS),
                        steps_per_epoch=BATCH_SIZE,
                        epochs=50,
                        validation_data=generate_data_batch(test, augment_data=False, bias=1.0),
                        validation_steps=BATCH_SIZE*100)