Esempio n. 1
0
#import data_generator
import model
from keras.optimizers import Adam
import matplotlib.pyplot as plt
import dataset_preparator
import generator
import numpy as np
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
import keras

print("Creating the model.")
# Create Instance of Custom Nvidia Model
model = model.nvidia_model((160, 320, 3), ((60, 20), (0, 0)))

print("Creating the optimizer.")
# define training optimizer
adam = Adam(lr=5e-4)
optimizer = adam

print("Compiling the model.")
# Compiling model
model.compile(optimizer, loss="mse")

# Print Model Summary.
model.summary()
dataset_paths = []

dataset_paths.append(r'./training_data/Forward_track/driving_log.csv')
dataset_paths.append(r'./training_data/Backward_track/driving_log.csv')
dataset_paths.append(r'./training_data/Sides_recovery/driving_log.csv')
Esempio n. 2
0

from model import nvidia_model
from opticalHelpers import opticalFlowDenseDim3
from keras.models import Sequential
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.core import Activation, Dropout, Flatten, Dense, Lambda
from keras.layers import ELU
from keras.optimizers import Adam
tf.python.control_flow_ops = tf
N_img_height = 66
N_img_width = 220
N_img_channels = 3

model = nvidia_model()
model.load_weights(WEIGHTS)

COUNT = 0
data = pd.read_csv(DRIVE_TEST_CSV_PATH)
for idx in tqdm(range(1, len(data) - 1)):
    row_now = data.iloc[[idx]].reset_index()
    row_prev = data.iloc[[idx - 1]].reset_index()
    row_next = data.iloc[[idx + 1]].reset_index()

    # Find the 3 respective times to determine frame order (current -> next)

    time_now = row_now['time'].values[0]
    time_prev = row_prev['time'].values[0]
    time_next = row_next['time'].values[0]
Esempio n. 3
0
            yield X_train, y_train

# splitting the data in training and validation data, using the train_test_split function from sklearn.model_selection
train_samples, valid_samples = train_test_split(record_df, test_size=0.2)

# create the generators for training and validation  
train_generator = batch_generator(train_samples)
valid_generator = batch_generator(valid_samples)

# choosing optimizer: Adam because of adaptive learning rates 
optimizer=Adam(1e-4, decay=0.0)

# choosing batchsize
batchsize = 32
epochs = 60

# loading model
nvidia_model = model.nvidia_model(optimizer)

# adding callbacks: early stopping and saving of best fitted model per epoch
callback_es = EarlyStopping(monitor='val_loss', mode='auto', verbose=1, patience=6)
callback_cp = ModelCheckpoint('best_model.h5', monitor='val_loss', verbose=1, save_best_only=True, mode='auto')

# start training of the model
net_history = nvidia_model.fit_generator(train_generator, 
                    steps_per_epoch= ceil(len(train_samples)/batchsize),
                    validation_data=valid_generator, 
                    validation_steps= ceil(len(valid_samples)/batchsize),
                    epochs=epochs, callbacks=[callback_es, callback_cp])