def load_imgs(): global imgs global wheels for p in purposes: for epoch_id in epochs[p]: print 'processing and loading "{}" datasets {} into memory, current num of imgs is {}...'.format( p, epoch_id, len(imgs[p])) vid_path = data_dir + "/dataset{0}/out-mencoder.avi".format( epoch_id) assert os.path.isfile(vid_path) frame_count = cm.frame_count(vid_path) cap = cv2.VideoCapture(vid_path) csv_path = data_dir + "/dataset%i/data.csv" % epoch_id assert os.path.isfile(csv_path) rows = cm.fetch_csv_data(csv_path) assert frame_count == len(rows) yy = [[float(row['angle'])] for row in rows] while True: ret, img = cap.read() if not ret: break if img.any(): img = preprocess.preprocess(img) imgs[p].append(img) wheels[p].extend(yy) assert len(imgs[p]) == len(wheels[p]) cap.release()
def load_imgs(): global imgs global wheels for p in purposes: for epoch_id in epochs[p]: print 'processing and loading "{}" epoch {} into memory, current num of imgs is {}...'.format( p, epoch_id, len(imgs[p])) vid_path = cm.jn(data_dir, 'epoch{:0>2}_front.mkv'.format(epoch_id)) assert os.path.isfile(vid_path) frame_count = cm.frame_count(vid_path) cap = cv2.VideoCapture(vid_path) csv_path = cm.jn(data_dir, 'epoch{:0>2}_steering.csv'.format(epoch_id)) assert os.path.isfile(csv_path) rows = cm.fetch_csv_data(csv_path) assert frame_count == len(rows) yy = [[float(row['wheel'])] for row in rows] while True: ret, img = cap.read() if not ret: break img = preprocess.preprocess(img) imgs[p].append(img) wheels[p].extend(yy) assert len(imgs[p]) == len(wheels[p]) cap.release()
def load_imgs(): global imgs global wheels for p in purposes: for epoch_id in epochs[p]: print 'processing and loading "{}" datasets {} into memory, current num of imgs is {}...'.format(p, epoch_id, len(imgs[p])) vid_path = data_dir +"/dataset{0}/out-mencoder.avi".format(epoch_id) assert os.path.isfile(vid_path) frame_count = cm.frame_count(vid_path) cap = cv2.VideoCapture(vid_path) csv_path = data_dir + "/dataset%i/data.csv" % epoch_id assert os.path.isfile(csv_path) rows = cm.fetch_csv_data(csv_path) assert frame_count == len(rows) yy = [[float(row['angle'])] for row in rows] while True: ret, img = cap.read() if not ret: break if img.any(): img = preprocess.preprocess(img) imgs[p].append(img) wheels[p].extend(yy) assert len(imgs[p]) == len(wheels[p]) cap.release()
def publish_to_car(self): twist = Twist() try: speed = 0.5 frame_count = cm.frame_count(self.video_location) tempAngle = 0 #Hold the angle turned_yet = False angz = 0 #Open the model sess = tf.InteractiveSession() saver = tf.train.Saver() model_name = 'model.ckpt' model_path = cm.jn(params.save_dir, model_name) saver.restore(sess, model_path) #Get the predicted angle from the model for each frame and publish the angle for frame_id in xrange(frame_count): ret, img = self.video_source.read() #Get the frame if not ret: #Make sure the frame exists return self.publish_frame(img) #Publish the frame for viewing in RViz img = preprocess.preprocess(img) #Process the image deg = model.y.eval(feed_dict={model.x: [img], model.keep_prob: 1.0})[0][0] #Predict the angle deg = round(deg * 8) / 8 #Round the angle to the nearest eighth old_angz = angz angz = (tempAngle - deg) * 1.5 if (deg < tempAngle and deg < -tempAngle) or \ (deg > tempAngle and deg > -tempAngle): angz = old_angz #Create a twist message that determines if a turn is necessary and publish it twist.linear.x = speed; twist.linear.y = 0; twist.linear.z = 0 twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = angz; self.twist_pub.publish(twist) #Update the temporary angle value to the angle of the current frame tempAngle = deg time.sleep(0.06) """end of code section""" except: return finally: twist.linear.x = 0; twist.linear.y = 0; twist.linear.z = 0; twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = 0; self.twist_pub.publish(twist) self.video_source.release() time.sleep(5) rospy.signal_shutdown("Shutting down")
def process_epoch(epoch_id): print '---------- processing video for epoch {} ----------'.format( epoch_id) vid_path = cm.jn(params.data_dir, 'out-video-{}.avi'.format(epoch_id)) frame_count = cm.frame_count(vid_path) vid_scaled_path = cm.jn(params.data_dir, 'out-video-{}-scaled.avi'.format(epoch_id)) if not os.path.exists(vid_scaled_path): assert os.path.isfile(vid_path) os.system("ffmpeg -i " + vid_path + " -vf scale=1280:720 " + vid_scaled_path) print("ffmpeg -i " + vid_path + " -vf scale=1280:720 " + vid_scaled_path) vid_path = vid_scaled_path cap = cv2.VideoCapture(vid_path) machine_steering = [] print 'performing inference...' time_start = time.time() for frame_id in xrange(frame_count): ret, img = cap.read() assert ret prep_start = time.time() img = preprocess.preprocess(img) pred_start = time.time() rad = model.y.eval(feed_dict={ model.x: [img], model.keep_prob: 1.0 })[0][0] deg = rad2deg(rad) pred_end = time.time() prep_time = pred_start - prep_start pred_time = pred_end - pred_start # print 'pred: {} deg. took {} ms'.format(deg, pred_time * 1000) # print 'pred: {} deg (rad={})'.format(deg, rad) machine_steering.append(deg) cap.release() fps = frame_count / (time.time() - time_start) print('completed inference, total frames: {}, average fps: {} Hz'.format( frame_count, round(fps, 1))) # print "Machine Steering:", machine_steering return machine_steering
def load_imgs_v2(): global imgs global wheels for epoch_id in epochs['all']: print('processing and loading epoch {} into memorys. train:{}, val:{}'. format(epoch_id, len(imgs['train']), len(imgs['val']))) # vid_path = cm.jn(data_dir, 'epoch{:0>2}_front.mkv'.format(epoch_id)) vid_path = cm.jn(data_dir, 'out-video-{}.avi'.format(epoch_id)) print("ppppppppppppppppp : data_dir : ", data_dir) print("ppppppppppppppppp : vid_path : ", vid_path) if not os.path.isfile(vid_path): continue frame_count = cm.frame_count(vid_path) cap = cv2.VideoCapture(vid_path) # csv_path = cm.jn(data_dir, 'epoch{:0>2}_steering.csv'.format(epoch_id)) csv_path = cm.jn(data_dir, 'out-key-{}.csv'.format(epoch_id)) assert os.path.isfile(csv_path) rows = cm.fetch_csv_data(csv_path) print("{}, {}".format(len(rows), frame_count)) assert frame_count == len(rows) for row in rows: ret, img = cap.read() if not ret: break img = preprocess.preprocess(img) angle = float(row['wheel']) if random.random() < params.train_pct: imgs['train'].append(img) wheels['train'].append([angle]) else: imgs['val'].append(img) wheels['val'].append([angle]) cap.release() print('Total data: train:{}, val:{}'.format(len(imgs['train']), len(imgs['val'])))
def load_batch(purpose): global current_batch_id xx = [] yy = [] # fetch the batch definition batch_id = current_batch_id[purpose] assert batch_id < len(batches[purpose]) batch = batches[purpose][batch_id] epoch_id, frame_start, frame_end = batch['epoch_id'], batch[ 'frame_start'], batch['frame_end'] assert epoch_id is not None and frame_start is not None and frame_end is not None # update the current batch current_batch_id[purpose] = (current_batch_id[purpose] + 1) % len( batches[purpose]) # fetch image and steering data vid_path = cm.jn(data_dir, 'epoch{:0>2}_front.mkv'.format(epoch_id)) assert os.path.isfile(vid_path) frame_count = cm.frame_count(vid_path) cap = cv2.VideoCapture(vid_path) cm.cv2_goto_frame(cap, frame_start) csv_path = cm.jn(data_dir, 'epoch{:0>2}_steering.csv'.format(epoch_id)) assert os.path.isfile(csv_path) rows = cm.fetch_csv_data(csv_path) assert frame_count == len(rows) yy = [[float(row['wheel'])] for row in rows[frame_start:frame_end + 1]] for frame_id in xrange(frame_start, frame_end + 1): ret, img = cap.read() assert ret img = preprocess.preprocess(img) #cv2.imwrite(os.path.abspath('output/sample_frame.jpg'), img) xx.append(img) assert len(xx) == len(yy) cap.release() return xx, yy
def load_batch(purpose): global current_batch_id xx = [] yy = [] # fetch the batch definition batch_id = current_batch_id[purpose] assert batch_id < len(batches[purpose]) batch = batches[purpose][batch_id] epoch_id, frame_start, frame_end = batch['epoch_id'], batch['frame_start'], batch['frame_end'] assert epoch_id is not None and frame_start is not None and frame_end is not None # update the current batch current_batch_id[purpose] = (current_batch_id[purpose] + 1) % len(batches[purpose]) # fetch image and steering data vid_path = cm.jn(data_dir, 'epoch{:0>2}_front.mkv'.format(epoch_id)) assert os.path.isfile(vid_path) frame_count = cm.frame_count(vid_path) cap = cv2.VideoCapture(vid_path) cm.cv2_goto_frame(cap, frame_start) csv_path = cm.jn(data_dir, 'epoch{:0>2}_steering.csv'.format(epoch_id)) assert os.path.isfile(csv_path) rows = cm.fetch_csv_data(csv_path) assert frame_count == len(rows) yy = [[float(row['wheel'])] for row in rows[frame_start:frame_end+1]] for frame_id in xrange(frame_start, frame_end+1): ret, img = cap.read() assert ret img = preprocess.preprocess(img) #cv2.imwrite(os.path.abspath('output/sample_frame.jpg'), img) xx.append(img) assert len(xx) == len(yy) cap.release() return xx, yy
epoch_ids = sorted(list(set(itertools.chain(*params.epochs.values())))) epoch_ids = [6, 6] # DBG - heechul tot_time_list = [] curFrame = 0 for epoch_id in epoch_ids: print '---------- processing video for epoch {} ----------'.format( epoch_id) # vid_path = cm.jn(params.data_dir, 'epoch{:0>2}_front.mkv'.format(epoch_id)) vid_path = cm.jn(params.data_dir, 'out-video-{}.avi'.format(epoch_id)) assert os.path.isfile(vid_path) frame_count = cm.frame_count(vid_path) cap = cv2.VideoCapture(vid_path) machine_steering = [] print 'performing inference...' time_start = time.time() for frame_id in xrange(frame_count): if curFrame < NFRAMES: cam_start = time.time() ret, img = cap.read() assert ret prep_start = time.time() img = preprocess.preprocess(img)
img_width = params.img_width img_channels = params.img_channels ############### building the batch definitions ############### purposes = ['train', 'val'] batches = OrderedDict() for purpose in purposes: batches[purpose] = [] # determine the epoch_id, frame_start, frame_end for purpose in epochs.keys(): assert len(epochs[purpose]) > 0 for epoch_id in epochs[purpose]: vid_path = cm.jn(data_dir, 'epoch{:0>2}_front.mkv'.format(epoch_id)) assert os.path.isfile(vid_path) frame_count = cm.frame_count(vid_path) assert batch_size <= frame_count batch_count = int(frame_count / batch_size) assert batch_count >= 1 for b in xrange(batch_count): assert purpose in batches frame_start = b * batch_size frame_end = frame_start + batch_size - 1 assert frame_end < frame_count batches[purpose].append(OrderedDict([ ('epoch_id', epoch_id), ('frame_start', frame_start), ('frame_end', frame_end), ]))
if len(sys.argv) == 3: epoch_id = int(sys.argv[1]) skip_count = int(sys.argv[2]) vid_path = 'epochs/out-video-{}.avi'.format(epoch_id) csv_path = 'epochs/out-key-{}.csv'.format(epoch_id) conv_vid_path = 'epochs-conv/out-video-{}.avi'.format(epoch_id) conv_csv_path = 'epochs-conv/out-key-{}.csv'.format(epoch_id) else: sys.exit(1) print("epoch_id: {}, skip_count: {}".format(epoch_id, skip_count)) assert os.path.isfile(vid_path) assert os.path.isfile(csv_path) frame_count = cm.frame_count(vid_path) cap = cv2.VideoCapture(vid_path) cam_width = int(cap.get(3)) cam_height = int(cap.get(4)) cam_fps = int(cap.get(5)) print("w: {}, h:{}, fps: {}".format(cam_width, cam_height, cam_fps)) rows = cm.fetch_csv_data(csv_path) assert frame_count == len(rows) # data files # fourcc = cv2.cv.CV_FOURCC(*'XVID') fourcc = cv2.VideoWriter_fourcc(*'XVID') vidfile = cv2.VideoWriter(conv_vid_path, fourcc, cam_fps, (cam_width, cam_height))