def gen(hwm, host, port): for tup in client_generator(hwm=hwm, host=host, port=port): X, Y, _ = tup Y = Y[:, -1] if X.shape[1] == 1: # no temporal context X = X[:, -1] yield X, Y
def gen(hwm, host, port): for tup in client_generator(hwm=hwm, host=host, port=port): X, Y, _ = tup Y = Y[:, -1] if X.shape[1] == 1: # no temporal context X = X[:, -1] #print(X.shape) # X = np.swapaxes(X, 1, 3) # X = np.array([cv2.resize(x, (40, 40), interpolation = cv2.INTER_AREA).flatten() for x in X]) # #X = np.swapaxes(X, 3, 1) # #print(X.shape) # X = X-128 / 128 yield X, Y
def gen(hwm, host, port): for tup in client_generator(hwm=hwm, host=host, port=port): Image, Steer, Speed, Gas, Gear, Brake = tup if Image.shape[1] == 1: # no temporal context Image = Image[:, -1] #Steer = Steer[:, -1] #Speed = Speed[:,-1] Gas = Gas[:, -1] Gear = Gear[:, -1] Brake = Brake[:, -1] yield [Image, Speed], Steer else: yield [ Image, Speed ], Steer #Need to change according to the modelling parameter
sess = tf.InteractiveSession(config=tfconfig) tf.global_variables_initializer().run() # saver saver = tf.train.Saver(max_to_keep=40) if config.pretrained_model_path is not None: saver.restore(sess, config.pretrained_model_path) print("Loaded the pretrained model: %s" % (config.pretrained_model_path)) # Summary writer summary_writer = tf.summary.FileWriter(args.log_path, graph=tf.get_default_graph()) # Train over the dataset data_train = client_generator(hwm=20, host="localhost", port=args.port) data_val = client_generator(hwm=20, host="localhost", port=args.val_port) curr_loss = 0 for i in range(config.epochsize): feats_batch, angle_batch, speed_batch = next(data_train) # Preprocessing feats, curvatures, angles = pre_processor.process( sess, feats_batch, angle_batch, speed_batch) # Training if config.use_curvature: feed_dict = {VA_model.features: feats, VA_model.y: curvatures} else: feed_dict = {VA_model.features: feats, VA_model.y: angles}
g_train, d_train, sampler, saver, loader, [G, E, T] = get_model( sess=sess, name=args.name, batch_size=args.batch, gpu=args.gpu) print("loading weights...") G.load_weights("./outputs/results_autoencoder/G_weights.keras".format( args.name)) E.load_weights("./outputs/results_autoencoder/E_weights.keras".format( args.name)) checkpoint_dir = './outputs/results_' + args.name T.load_weights(checkpoint_dir + "/T_weights.keras") if not os.path.exists("./video_" + args.name): os.makedirs("./video_" + args.name) # get data data = client_generator(hwm=20, host="localhost", port=5557) X = next(data)[0] # [:, ::2] sh = X.shape X = X.reshape((-1, 3, 160, 320)) X = np.asarray( [cv2.resize(x.transpose(1, 2, 0), (160, 80)) for x in X]) X = X / 127.5 - 1. x = X.reshape((sh[0], args.time, 80, 160, 3)) # estimate frames z_dim = 512 I = E.input E_out = E(I) O = G.input G_out = G(O) print "Sampling..."
def gen(hwm, host, port): for tup in client_generator(hwm=hwm, host=host, port=port): X = cleanup(tup) yield X
os.makedirs(samples_path) if not os.path.exists("./outputs/logs_" + args.name): os.system("rm -rf ./outputs/logs_" + args.name) #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) #with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess: with tf.Session() as sess: ftrain, ftest, loader, saver, extras = get_model(sess=sess, name=args.name, batch_size=args.batch, time_len=args.time) # start from checkpoint if args.loadweights: loader() train_gen = client_generator(hwm=20, host=args.host, port=args.port) train_model(args.name, ftrain, train_gen, samples_per_epoch=args.epochsize, ftest=None, validation_data=None, test_data=None, nb_val_samples=100, nb_epoch=args.epoch, verbose=1, saver=saver, gif=args.gif)
with tf.Session() as sess: K.set_session(sess) g_train, d_train, sampler, saver, loader, [G, E, T] = get_model(sess=sess, name=args.name, batch_size=args.batch, gpu=args.gpu) print("loading weights...") G.load_weights("./outputs/results_autoencoder/G_weights.keras".format(args.name)) E.load_weights("./outputs/results_autoencoder/E_weights.keras".format(args.name)) checkpoint_dir = './outputs/results_' + args.name T.load_weights(checkpoint_dir+"/T_weights.keras") if not os.path.exists("./video_"+args.name): os.makedirs("./video_"+args.name) # get data data = client_generator(hwm=20, host="localhost", port=5557) X = next(data)[0] # [:, ::2] sh = X.shape X = X.reshape((-1, 3, 160, 320)) X = np.asarray([cv2.resize(x.transpose(1, 2, 0), (160, 80)) for x in X]) X = X/127.5 - 1. x = X.reshape((sh[0], args.time, 80, 160, 3)) # estimate frames z_dim = 512 I = E.input E_out = E(I) O = G.input G_out = G(O) print "Sampling..." for i in tqdm(range(128)):