args = parser.parse_args() if not os.path.exists(args.output): os.makedirs(args.output) print('-------------') print('BATCH : {}'.format(args.batch)) print('EPOCH : {}'.format(args.epoch)) print('ALPA : {}'.format(args.alpha)) print('DROPOUT : {}'.format(args.dropout)) print('Load Weights?: {}'.format(args.weights)) print('Dataset : {}'.format(args.dataset)) print('OUTPUT : {}'.format(args.output)) print('-------------') df_train, df_val = getDataFromFolder(args.dataset, args.output) print('TRAIN:', len(df_train)) print('VALIDATION:', len(df_val)) model = NvidiaModel(args.alpha, args.dropout) print(model.summary()) # Saves the model... with open(os.path.join(args.output, 'model.json'), 'w') as f: f.write(model.to_json()) try: if args.weights: print('Loading weights from file ...') model.load_weights(args.weights)
print('BATCH: {}'.format(args.batch)) print('EPOCH: {}'.format(args.epoch)) print('ALPA: {}'.format(args.alpha)) print('DROPOUT: {}'.format(args.dropout)) print('Load Weights?: {}'.format(args.loadWeights)) print('Dataset: {}'.format(args.dataset)) print('Model: {}'.format(args.output)) print('-------------') if not os.path.exists(args.output): os.makedirs(args.output) # ROOT = '/Users/nando/Downloads/thunderhill_data/dataset_sim_000_km_few_laps' # split data into training and testing # df_train, df_val = __train_test_split('{}/driving_log.csv'.format(ROOT), False) df_train, df_val = getDataFromFolder(args.dataset) print('TRAIN:', len(df_train)) print('VALIDATION:', len(df_val)) model = NvidiaModel(args.alpha, args.dropout) # Saves the model... with open(os.path.join(args.output, 'model.json'), 'w') as f: f.write(model.to_json()) try: if args.weights: print('Loading weights from file ...') model.load_weights(args.weights) except IOError: print("No model found")
if not os.path.exists(args.output): os.makedirs(args.output) print('-------------') print('BATCH : {}'.format(args.batch)) print('EPOCH : {}'.format(args.epoch)) print('ALPA : {}'.format(args.alpha)) print('DROPOUT : {}'.format(args.dropout)) print('Load Weights?: {}'.format(args.weights)) print('Dataset : {}'.format(args.dataset)) print('OUTPUT : {}'.format(args.output)) print('-------------') df = getDataFromFolder(args.dataset, args.output, randomize=False, balance=True, split=False) extractCNNFeatures(df, args.weights) train_size = int(0.7 * len(df)) df_train = df.iloc[:train_size] df_val = df.iloc[train_size:] print('TRAIN:', len(df_train)) print('VALIDATION:', len(df_val)) model = Lstm(args.batch, args.seq, CNN_INPUT_SIZE, args.dropout) print(model.summary()) # Saves the model...
os.makedirs(args.output) print('-------------') print('BATCH : {}'.format(args.batch)) print('EPOCH : {}'.format(args.epoch)) print('ALPA : {}'.format(args.alpha)) print('DROPOUT : {}'.format(args.dropout)) print('Load Weights?: {}'.format(args.weights)) print('Dataset : {}'.format(args.dataset)) print('OUTPUT : {}'.format(args.output)) print('-------------') # TODO: abstract method to normalize speed. df = getDataFromFolder(args.dataset, args.output, randomize=False, split=True, normalize=True)[0] df_train, df_val = getDataFromFolder(args.dataset, args.output) print('TRAIN:', len(df_train)) print('VALIDATION:', len(df_val)) model = NvidiaModel(args.dropout) print(model.summary()) # Saves the model... with open(os.path.join(args.output, 'model.json'), 'w') as f: f.write(model.to_json()) try: if args.weights: