x = Dense(10, init=init)(x)
    x = ELU()(x)
    out = Dense(1, init=init)(x)

    model = Model(input=input_frame, output=out)

    if summary:
        model.summary()

    return model


if __name__ == '__main__':

    # split udacity csv data into training and validation
    train_data, val_data = split_train_val(csv_driving_data='data/driving_log.csv')

    # get network model and compile it (default Adam opt)
    nvidia_net = get_nvidia_model(summary=True)
    nvidia_net.compile(optimizer='adam', loss='mse')

    # json dump of model architecture
    with open('logs/model.json', 'w') as f:
        f.write(nvidia_net.to_json())

    # define callbacks to save history and weights
    checkpointer = ModelCheckpoint('checkpoints/weights.{epoch:02d}-{val_loss:.3f}.hdf5')
    logger = CSVLogger(filename='logs/history.csv')

    # start the training
    nvidia_net.fit_generator(generator=generate_data_batch(train_data, augment_data=True, bias=CONFIG['bias']),
Exemple #2
0
    # Load data
    df_vehicles1 = load_data('object-detection-crowdai', 'labels.csv', verbose=False)
    df_vehicles2 = load_data('object-dataset', 'labels.csv', verbose=False)

    # Concatenate

    df_vehicles = pd.concat([df_vehicles1,df_vehicles2]).reset_index()
    df_vehicles = df_vehicles.drop('index', 1)
    df_vehicles.columns =['File_Path','Frame','Label','ymin','xmin','ymax','xmax']

    print(len(df_vehicles))

    # split udacity csv data into training and validation

    train_data, val_data = split_train_val(df_vehicles)

    ### Generator



    model = get_unet()
    model.summary()
    if config.mode == 'train':
        # training_gen = generate_data_batch(train_data, df_vehicles, config.batch_size)
        training_gen = generate_train_batch(train_data, config.batch_size)

        eval_gen = generate_train_batch(val_data, config.batch_size)

        smooth = 1.
Exemple #3
0
    x = Dense(10, init=init)(x)
    x = ELU()(x)
    out = Dense(1, init=init)(x)

    model = Model(input=input_frame, output=out)

    if summary:
        model.summary()

    return model


if __name__ == '__main__':

    # split udacity csv data into training and validation
    train_data, val_data = split_train_val(csv_driving_data='data/driving_log.csv')

    # get network model and compile it (default Adam opt)
    nvidia_net = get_nvidia_model(summary=True)
    nvidia_net.compile(optimizer='adam', loss='mse')

    # json dump of model architecture
    with open('logs/model.json', 'w') as f:
        f.write(nvidia_net.to_json())

    # define callbacks to save history and weights
    checkpointer = ModelCheckpoint('checkpoints/weights.{epoch:02d}-{val_loss:.3f}.hdf5')
    logger = CSVLogger(filename='logs/history.csv')

    # start the training
    nvidia_net.fit_generator(generator=generate_data_batch(train_data, augment_data=True, bias=CONFIG['bias']),
Exemple #4
0
    x = Dropout(0.2)(x)
    x = Conv2D(48, (5, 5), strides=(2, 2))(x)
    x = ELU()(x)
    x = Dropout(0.2)(x)
    x = Conv2D(64, (3, 3))(x)
    x = ELU()(x)
    x = Dropout(0.2)(x)
    x = Conv2D(64, (3, 3))(x)
    x = ELU()(x)
    x = Dropout(0.2)(x)
    x = Flatten()(x)
    x = Dense(100)(x)
    x = ELU()(x)
    x = Dense(10)(x)
    x = ELU()(x)
    out = Dense(1)(x)
    model = Model(inputs=input_frame, outputs=out)
    model.compile(optimizer='adam', loss='mse')
    model.summary()
    return model


if __name__ == '__main__':
    train, test = split_train_val('/Users/dwang/self-driving-car/project_3_behavioral_cloning/data/driving_log.csv')
    model = get_model()
    model.fit_generator(generator=generate_data_batch(train, augment_data=True, bias=BIAS),
                        steps_per_epoch=BATCH_SIZE,
                        epochs=50,
                        validation_data=generate_data_batch(test, augment_data=False, bias=1.0),
                        validation_steps=BATCH_SIZE*100)