Esempio n. 1
0
def load_imgs():
    global imgs
    global wheels

    for p in purposes:
        for epoch_id in epochs[p]:
            print 'processing and loading "{}" epoch {} into memory, current num of imgs is {}...'.format(
                p, epoch_id, len(imgs[p]))

            vid_path = cm.jn(data_dir, 'epoch{:0>2}_front.mkv'.format(epoch_id))
            assert os.path.isfile(vid_path)
            frame_count = cm.frame_count(vid_path)
            cap = cv2.VideoCapture(vid_path)

            csv_path = cm.jn(data_dir, 'epoch{:0>2}_steering.csv'.format(epoch_id))
            assert os.path.isfile(csv_path)
            rows = cm.fetch_csv_data(csv_path)
            assert frame_count == len(rows)
            yy = [[float(row['wheel'])] for row in rows]

            while True:
                ret, img = cap.read()
                if not ret:
                    break

                img = preprocess.preprocess(img)
                imgs[p].append(img)

            wheels[p].extend(yy)
            assert len(imgs[p]) == len(wheels[p])

            cap.release()
Esempio n. 2
0
def process_epoch(epoch_id):
    print '---------- processing video for epoch {} ----------'.format(
        epoch_id)
    vid_path = cm.jn(params.data_dir, 'out-video-{}.avi'.format(epoch_id))
    frame_count = cm.frame_count(vid_path)

    vid_scaled_path = cm.jn(params.data_dir,
                            'out-video-{}-scaled.avi'.format(epoch_id))
    if not os.path.exists(vid_scaled_path):
        assert os.path.isfile(vid_path)
        os.system("ffmpeg -i " + vid_path + " -vf scale=1280:720 " +
                  vid_scaled_path)
        print("ffmpeg -i " + vid_path + " -vf scale=1280:720 " +
              vid_scaled_path)
    vid_path = vid_scaled_path

    cap = cv2.VideoCapture(vid_path)

    machine_steering = []

    print 'performing inference...'
    time_start = time.time()
    for frame_id in xrange(frame_count):
        ret, img = cap.read()
        assert ret

        prep_start = time.time()
        img = preprocess.preprocess(img)

        pred_start = time.time()
        rad = model.y.eval(feed_dict={
            model.x: [img],
            model.keep_prob: 1.0
        })[0][0]
        deg = rad2deg(rad)
        pred_end = time.time()

        prep_time = pred_start - prep_start
        pred_time = pred_end - pred_start

        # print 'pred: {} deg. took {} ms'.format(deg, pred_time * 1000)
        # print 'pred: {} deg (rad={})'.format(deg, rad)

        machine_steering.append(deg)

    cap.release()

    fps = frame_count / (time.time() - time_start)
    print('completed inference, total frames: {}, average fps: {} Hz'.format(
        frame_count, round(fps, 1)))
    # print "Machine Steering:", machine_steering
    return machine_steering
def load_batch(purpose):
    global current_batch_id
    xx = []
    yy = []

    # fetch the batch definition
    batch_id = current_batch_id[purpose]
    assert batch_id < len(batches[purpose])
    batch = batches[purpose][batch_id]
    epoch_id, frame_start, frame_end = batch['epoch_id'], batch[
        'frame_start'], batch['frame_end']
    assert epoch_id is not None and frame_start is not None and frame_end is not None

    # update the current batch
    current_batch_id[purpose] = (current_batch_id[purpose] + 1) % len(
        batches[purpose])

    # fetch image and steering data
    vid_path = cm.jn(data_dir, 'epoch{:0>2}_front.mkv'.format(epoch_id))
    assert os.path.isfile(vid_path)
    frame_count = cm.frame_count(vid_path)
    cap = cv2.VideoCapture(vid_path)
    cm.cv2_goto_frame(cap, frame_start)

    csv_path = cm.jn(data_dir, 'epoch{:0>2}_steering.csv'.format(epoch_id))
    assert os.path.isfile(csv_path)
    rows = cm.fetch_csv_data(csv_path)
    assert frame_count == len(rows)
    yy = [[float(row['wheel'])] for row in rows[frame_start:frame_end + 1]]

    for frame_id in xrange(frame_start, frame_end + 1):
        ret, img = cap.read()
        assert ret

        img = preprocess.preprocess(img)

        #cv2.imwrite(os.path.abspath('output/sample_frame.jpg'), img)

        xx.append(img)

    assert len(xx) == len(yy)

    cap.release()

    return xx, yy
Esempio n. 4
0
def load_imgs_v2():
    global imgs
    global wheels

    for epoch_id in epochs['all']:
        print('processing and loading epoch {} into memorys. train:{}, val:{}'.
              format(epoch_id, len(imgs['train']), len(imgs['val'])))

        # vid_path = cm.jn(data_dir, 'epoch{:0>2}_front.mkv'.format(epoch_id))
        vid_path = cm.jn(data_dir, 'out-video-{}.avi'.format(epoch_id))
        print("ppppppppppppppppp : data_dir : ", data_dir)
        print("ppppppppppppppppp : vid_path : ", vid_path)
        if not os.path.isfile(vid_path):
            continue

        frame_count = cm.frame_count(vid_path)
        cap = cv2.VideoCapture(vid_path)

        # csv_path = cm.jn(data_dir, 'epoch{:0>2}_steering.csv'.format(epoch_id))
        csv_path = cm.jn(data_dir, 'out-key-{}.csv'.format(epoch_id))
        assert os.path.isfile(csv_path)

        rows = cm.fetch_csv_data(csv_path)
        print("{}, {}".format(len(rows), frame_count))
        assert frame_count == len(rows)

        for row in rows:
            ret, img = cap.read()
            if not ret:
                break

            img = preprocess.preprocess(img)
            angle = float(row['wheel'])

            if random.random() < params.train_pct:
                imgs['train'].append(img)
                wheels['train'].append([angle])
            else:
                imgs['val'].append(img)
                wheels['val'].append([angle])

        cap.release()

    print('Total data: train:{}, val:{}'.format(len(imgs['train']),
                                                len(imgs['val'])))
def get_human_steering(epoch_id):
    epoch_dir = params.data_dir
    assert os.path.isdir(epoch_dir)
    steering_path = cm.jn(epoch_dir, 'epoch{:0>2}_steering.csv'.format(epoch_id))
    assert os.path.isfile(steering_path)
    
    rows = cm.fetch_csv_data(steering_path)
    human_steering = [row['wheel'] for row in rows]
    return human_steering
Esempio n. 6
0
    def publish_to_car(self):
        twist = Twist()

        try:
            speed = 0.5
            frame_count = cm.frame_count(self.video_location)
            tempAngle = 0 #Hold the angle
            turned_yet = False
            angz = 0

            #Open the model
            sess = tf.InteractiveSession()
            saver = tf.train.Saver()
            model_name = 'model.ckpt'
            model_path = cm.jn(params.save_dir, model_name)
            saver.restore(sess, model_path)

            #Get the predicted angle from the model for each frame and publish the angle
            for frame_id in xrange(frame_count):
                ret, img = self.video_source.read() #Get the frame
                if not ret: #Make sure the frame exists
                    return

                self.publish_frame(img) #Publish the frame for viewing in RViz

                img = preprocess.preprocess(img) #Process the image
                deg = model.y.eval(feed_dict={model.x: [img], model.keep_prob: 1.0})[0][0] #Predict the angle
                deg = round(deg * 8) / 8 #Round the angle to the nearest eighth


                old_angz = angz
                angz = (tempAngle - deg) * 1.5
                if (deg < tempAngle and deg < -tempAngle) or \
                   (deg > tempAngle and deg > -tempAngle):
                    angz = old_angz

                #Create a twist message that determines if a turn is necessary and publish it
                twist.linear.x = speed; twist.linear.y = 0; twist.linear.z = 0
                twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = angz;
                self.twist_pub.publish(twist)

                #Update the temporary angle value to the angle of the current frame
                tempAngle = deg
                time.sleep(0.06)
            """end of code section"""


        except:
            return

        finally:
            twist.linear.x = 0; twist.linear.y = 0; twist.linear.z = 0;
            twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = 0;
            self.twist_pub.publish(twist)
            self.video_source.release()
            time.sleep(5)
            rospy.signal_shutdown("Shutting down")
Esempio n. 7
0
def load_batch(purpose):
    global current_batch_id
    xx = []
    yy = []

    # fetch the batch definition
    batch_id = current_batch_id[purpose]
    assert batch_id < len(batches[purpose])
    batch = batches[purpose][batch_id]
    epoch_id, frame_start, frame_end = batch['epoch_id'], batch['frame_start'], batch['frame_end']
    assert epoch_id is not None and frame_start is not None and frame_end is not None

    # update the current batch
    current_batch_id[purpose] = (current_batch_id[purpose] + 1) % len(batches[purpose])

    # fetch image and steering data
    vid_path = cm.jn(data_dir, 'epoch{:0>2}_front.mkv'.format(epoch_id))
    assert os.path.isfile(vid_path)
    frame_count = cm.frame_count(vid_path)
    cap = cv2.VideoCapture(vid_path)
    cm.cv2_goto_frame(cap, frame_start)

    csv_path = cm.jn(data_dir, 'epoch{:0>2}_steering.csv'.format(epoch_id))
    assert os.path.isfile(csv_path)
    rows = cm.fetch_csv_data(csv_path)
    assert frame_count == len(rows)
    yy = [[float(row['wheel'])] for row in rows[frame_start:frame_end+1]]

    for frame_id in xrange(frame_start, frame_end+1):
        ret, img = cap.read()
        assert ret

        img = preprocess.preprocess(img)
        
        #cv2.imwrite(os.path.abspath('output/sample_frame.jpg'), img)            

        xx.append(img)

    assert len(xx) == len(yy)

    cap.release()

    return xx, yy
Esempio n. 8
0
    def publish_to_car(self):
        twist = Twist()

        try:
            speed = 0.5
            tempAngle = 0 #Hold the angle

            #Open the model
            sess = tf.InteractiveSession()
            saver = tf.train.Saver()
            model_name = 'model.ckpt'
            model_path = cm.jn(params.save_dir, model_name)
            saver.restore(sess, model_path)

            while True:
                snapshot = urllib2.urlopen(self.stream_source)
                original_frame = snapshot.read()

                frame = np.asarray(bytearray(original_frame), dtype="uint8")
                frame = cv2.imdecode(frame, cv2.IMREAD_COLOR)
                self.publish_snapshot(frame) #Publish the frame for viewing in RViz

                img = preprocess.preprocess(img) #Process the image
                deg = model.y.eval(feed_dict={model.x: [img], model.keep_prob: 1.0})[0][0] #Predict the angle
                deg = round(deg * 8) / 8 #Round the angle to the nearest eighth


                old_angz = angz
                angz = (tempAngle - deg) * 1.5
                if (deg < tempAngle and deg < -tempAngle) or \
                   (deg > tempAngle and deg > -tempAngle):
                    angz = old_angz

                #Create a twist message that determines if a turn is necessary and publish it
                twist.linear.x = speed; twist.linear.y = 0; twist.linear.z = 0
                twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = angz;
                self.twist_pub.publish(twist)

                #Update the temporary angle value to the angle of the current frame
                tempAngle = deg
                time.sleep(0.06)


        except (KeyboardInterrupt, urllib2.URLError, BadStatusLine) as e:
            rospy.loginfo(e)
            return

        finally:
            twist.linear.x = 0; twist.linear.y = 0; twist.linear.z = 0;
            twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = 0;
            self.twist_pub.publish(twist)
            rospy.signal_shutdown("Shutting down")
            sys.exit(0)
Esempio n. 9
0

def rad2deg(rad):
    return 180.0 * rad / math.pi


NCPU = int(sys.argv[1])
config = tf.ConfigProto(intra_op_parallelism_threads=NCPU, inter_op_parallelism_threads=NCPU, \
                        allow_soft_placement=True, device_count = {'CPU': 1})
# sess = tf.Session(config=config)

NFRAMES = 1000

sess = tf.InteractiveSession(config=config)
saver = tf.train.Saver()
model_load_path = cm.jn(params.save_dir, params.model_load_file2)
saver.restore(sess, model_load_path)

epoch_ids = sorted(list(set(itertools.chain(*params.epochs.values()))))

epoch_ids = [6, 6]  # DBG - heechul

tot_time_list = []

curFrame = 0
for epoch_id in epoch_ids:
    print '---------- processing video for epoch {} ----------'.format(
        epoch_id)
    # vid_path = cm.jn(params.data_dir, 'epoch{:0>2}_front.mkv'.format(epoch_id))

    vid_path = cm.jn(params.data_dir, 'out-video-{}.avi'.format(epoch_id))
Esempio n. 10
0
if use_dnn == True:
    print("Load TF")
    import tensorflow as tf
    model = __import__(params.model)
    import local_common as cm
    import preprocess

    print("Load Model")
    config = tf.ConfigProto(intra_op_parallelism_threads=NCPU,
                            inter_op_parallelism_threads=NCPU, \
                            allow_soft_placement=True,
                            device_count = {'CPU': 1})

    sess = tf.InteractiveSession(config=config)
    saver = tf.train.Saver()
    model_load_path = cm.jn(params.save_dir, params.model_load_file)
    saver.restore(sess, model_load_path)
    print("Done..")

# null_frame = np.zeros((cfg_cam_res[0],cfg_cam_res[1],3), np.uint8)
# cv2.imshow('frame', null_frame)

g = g_tick()
start_ts = time.time()

frame_arr = []
angle_arr = []
# enter main loop
while True:
    if use_thread:
        time.sleep(next(g))
Esempio n. 11
0
    currentSpeed = NEU_SPEED
    print "stop"
    os.system("echo 0=%dus > /dev/servoblaster" % currentSpeed)


#Stop source warnings from appearing
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

#Dataset that will be used for predicting the angle
dataset_id = 12

#Open the model
sess = tf.InteractiveSession()
saver = tf.train.Saver()
model_name = 'model.ckpt'
model_path = cm.jn(params.save_dir, model_name)
saver.restore(sess, model_path)

#Determine if the camera (1) or a video (0) is used for predicting the angle
useCamera = 1

#Start the car
ffw(40)

#If using the camera
if useCamera:
    cap = cv2.VideoCapture(0)  #Capture the input from the camera

    tempAngle = 0  #Hold the angle

    #Get the predicted angle from the model for each frame and have the car turn accordingly
Esempio n. 12
0
epochs = params.epochs
img_height = params.img_height
img_width = params.img_width
img_channels = params.img_channels

############### building the batch definitions ###############
purposes = ['train', 'val']
batches = OrderedDict()
for purpose in purposes:
    batches[purpose] = []

# determine the epoch_id, frame_start, frame_end
for purpose in epochs.keys():
    assert len(epochs[purpose]) > 0
    for epoch_id in epochs[purpose]:
        vid_path = cm.jn(data_dir, 'epoch{:0>2}_front.mkv'.format(epoch_id))
        assert os.path.isfile(vid_path)
        frame_count = cm.frame_count(vid_path)
        assert batch_size <= frame_count

        batch_count = int(frame_count / batch_size)
        assert batch_count >= 1
        for b in xrange(batch_count):
            assert purpose in batches
            frame_start = b * batch_size
            frame_end = frame_start + batch_size - 1
            assert frame_end < frame_count
            batches[purpose].append(OrderedDict([
                ('epoch_id', epoch_id),
                ('frame_start', frame_start),
                ('frame_end', frame_end),
Esempio n. 13
0
    def publish_to_car(self):
        twist = Twist()

        try:
            speed = 0.5
            tempAngle = 0  #Hold the angle

            #Open the model
            sess = tf.InteractiveSession()
            saver = tf.train.Saver()
            model_name = 'model.ckpt'
            model_path = cm.jn(params.save_dir, model_name)
            saver.restore(sess, model_path)

            while True:
                snapshot = urllib2.urlopen(self.stream_source)
                original_frame = snapshot.read()

                frame = np.asarray(bytearray(original_frame), dtype="uint8")
                frame = cv2.imdecode(frame, cv2.IMREAD_COLOR)
                self.publish_snapshot(
                    frame)  #Publish the frame for viewing in RViz

                img = preprocess.preprocess(img)  #Process the image
                deg = model.y.eval(feed_dict={
                    model.x: [img],
                    model.keep_prob: 1.0
                })[0][0]  #Predict the angle
                deg = round(
                    deg * 8) / 8  #Round the angle to the nearest eighth

                old_angz = angz
                angz = (tempAngle - deg) * 1.5
                if (deg < tempAngle and deg < -tempAngle) or \
                   (deg > tempAngle and deg > -tempAngle):
                    angz = old_angz

                #Create a twist message that determines if a turn is necessary and publish it
                twist.linear.x = speed
                twist.linear.y = 0
                twist.linear.z = 0
                twist.angular.x = 0
                twist.angular.y = 0
                twist.angular.z = angz
                self.twist_pub.publish(twist)

                #Update the temporary angle value to the angle of the current frame
                tempAngle = deg
                time.sleep(0.06)

        except (KeyboardInterrupt, urllib2.URLError, BadStatusLine) as e:
            rospy.loginfo(e)
            return

        finally:
            twist.linear.x = 0
            twist.linear.y = 0
            twist.linear.z = 0
            twist.angular.x = 0
            twist.angular.y = 0
            twist.angular.z = 0
            self.twist_pub.publish(twist)
            rospy.signal_shutdown("Shutting down")
            sys.exit(0)
Esempio n. 14
0
        global currentSpeed
        currentSpeed = NEU_SPEED
        print "stop"
	os.system("echo 0=%dus > /dev/servoblaster" % currentSpeed)

#Stop source warnings from appearing
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

#Dataset that will be used for predicting the angle
dataset_id = 12

#Open the model
sess = tf.InteractiveSession()
saver = tf.train.Saver()
model_name = 'model.ckpt'
model_path = cm.jn(params.save_dir, model_name)
saver.restore(sess, model_path)

#Determine if the camera (1) or a video (0) is used for predicting the angle
useCamera = 1

#Start the car
ffw(40)

#If using the camera
if useCamera:
    cap = cv2.VideoCapture(0) #Capture the input from the camera
	
    tempAngle = 0 #Hold the angle

    #Get the predicted angle from the model for each frame and have the car turn accordingly
Esempio n. 15
0
def visualize(epoch_id,
              machine_steering,
              out_dir,
              timeArr,
              perform_smoothing=False,
              verbose=False,
              verbose_progress_step=100,
              frame_count_limit=None):
    epoch_dir = params.data_dir
    human_steering = get_human_steering(epoch_id)
    assert len(human_steering) == len(machine_steering)

    # testing: artificially magnify steering to test steering wheel visualization
    # human_steering = list(np.array(human_steering) * 10)
    # machine_steering = list(np.array(machine_steering) * 10)

    # testing: artificially alter machine steering to test that the disagreement coloring is working
    # delta = 0
    # for i in xrange(len(machine_steering)):
    #     delta += random.uniform(-1, 1)
    #     machine_steering[i] += delta

    if perform_smoothing:
        machine_steering = list(cm.smooth(np.array(machine_steering)))
        #human_steering = list(cm.smooth(np.array(human_steering)))

    steering_min = min(
        np.min(human_steering) * MAX_ANGLE,
        np.min(machine_steering) * MAX_ANGLE)
    steering_max = max(
        np.max(human_steering) * MAX_ANGLE,
        np.max(machine_steering) * MAX_ANGLE)

    assert os.path.isdir(epoch_dir)

    front_vid_path = epoch_dir + "/dataset{0}/out-mencoder2.avi".format(
        epoch_id)
    assert os.path.isfile(front_vid_path)

    dash_vid_path = cm.jn(epoch_dir, 'epoch{:0>2}_dash.mkv'.format(epoch_id))
    dash_exists = os.path.isfile(dash_vid_path)

    front_cap = cv2.VideoCapture(front_vid_path)
    dash_cap = cv2.VideoCapture(dash_vid_path) if dash_exists else None

    assert os.path.isdir(out_dir)
    vid_size = cm.video_resolution_to_size('720p', width_first=True)
    out_path = cm.jn(out_dir, 'epoch{:0>2}_human_machine.mkv'.format(epoch_id))
    vw = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*'X264'), 30,
                         vid_size)
    w, h = vid_size

    totalError = 0  #Holds the sum of all errors, used tocalculate the average error for each frame
    errorArr = [
    ]  #Holds the individual errors, used to calculate standard deviation for each frame

    for f_cur in xrange(len(machine_steering)):
        if (f_cur != 0) and (f_cur % verbose_progress_step == 0):
            print 'completed {} of {} frames'.format(f_cur,
                                                     len(machine_steering))

        if (frame_count_limit is not None) and (f_cur >= frame_count_limit):
            break

        rret, rimg = front_cap.read()
        assert rret

        if dash_exists:
            dret, dimg = dash_cap.read()
            assert dret
        else:
            dimg = rimg.copy()
            dimg[:] = (0, 0, 0)

        ry0, rh = 80, 500
        dimg = dimg[100:, :930]
        dimg = cm.cv2_resize_by_height(dimg, h - rh)

        fimg = rimg.copy()
        fimg[:] = (0, 0, 0)
        fimg[:rh] = rimg[ry0:ry0 + rh]
        dh, dw = dimg.shape[:2]
        fimg[rh:, :dw] = dimg[:]

        ########################## plot ##########################
        plot_size = (500, dh)
        win_before, win_after = 150, 150

        xx, hh, mm = [], [], []
        for f_rel in xrange(-win_before, win_after + 1):
            f_abs = f_cur + f_rel
            if f_abs < 0 or f_abs >= len(machine_steering):
                continue
            xx.append(f_rel / 30)
            hh.append(human_steering[f_abs] * MAX_ANGLE)
            mm.append(machine_steering[f_abs] * MAX_ANGLE)

        fig = plt.figure()
        axis = fig.add_subplot(1, 1, 1)

        steering_range = max(abs(steering_min), abs(steering_max))
        #ylim = [steering_min, steering_max]
        ylim = [-steering_range, steering_range]
        # ylim[0] = min(np.min(hh), np.min(mm))
        # ylim[1] = max(np.max(hh), np.max(mm))

        axis.set_xlabel('Current Time (secs)')
        axis.set_ylabel('Steering Angle')
        axis.axvline(x=0, color='k', ls='dashed')
        axis.plot(xx, hh)
        axis.plot(xx, mm)
        axis.set_xlim([-win_before / 30, win_after / 30])
        axis.set_ylim(ylim)
        #axis.set_ylabel(y_label, fontsize=18)
        axis.label_outer()
        #axes.append(axis)

        buf = io.BytesIO()
        # http://stackoverflow.com/a/4306340/627517
        sx, sy = plot_size
        sx, sy = round(sx / 100, 1), round(sy / 100, 1)

        fig.set_size_inches(sx, sy)
        fig.tight_layout()
        fig.savefig(buf, format="png", dpi=100)
        buf.seek(0)
        buf_img = PIL.Image.open(buf)
        pimg = np.asarray(buf_img)
        plt.close(fig)

        pimg = cv2.resize(pimg, plot_size)
        pimg = pimg[:, :, :3]

        ph, pw = pimg.shape[:2]
        pimg = 255 - pimg

        fimg[rh:, -pw:] = pimg[:]

        ####################### human steering wheels ######################
        wimg = cm.imread(os.path.abspath("images/wheel-tesla-image-150.png"),
                         cv2.IMREAD_UNCHANGED)

        human_wimg = cm.rotate_image(wimg, (human_steering[f_cur] * MAX_ANGLE))
        wh, ww = human_wimg.shape[:2]
        fimg = cm.overlay_image(fimg,
                                human_wimg,
                                y_offset=rh + 50,
                                x_offset=dw + 60)

        ####################### machine steering wheels ######################
        disagreement = abs((machine_steering[f_cur] * MAX_ANGLE) -
                           (human_steering[f_cur] * MAX_ANGLE))
        machine_wimg = cm.rotate_image(wimg,
                                       (machine_steering[f_cur] * MAX_ANGLE))
        red_machine_wimg = machine_wimg.copy()
        green_machine_wimg = machine_wimg.copy()
        red_machine_wimg[:, :, 2] = 255
        green_machine_wimg[:, :, 1] = 255
        #r = disagreement / (steering_max - steering_min)
        max_disagreement = 10
        r = min(1., disagreement / max_disagreement)
        g = 1 - r
        assert r >= 0
        assert g <= 1
        machine_wimg = cv2.addWeighted(red_machine_wimg, r, green_machine_wimg,
                                       g, 0)
        wh, ww = machine_wimg.shape[:2]
        fimg = cm.overlay_image(fimg,
                                machine_wimg,
                                y_offset=rh + 50,
                                x_offset=dw + 260)

        ####################### text ######################
        timg_green_agree = cm.imread(
            os.path.abspath("images/text-green-agree.png"),
            cv2.IMREAD_UNCHANGED)
        timg_ground_truth = cm.imread(
            os.path.abspath("images/text-ground-truth.png"),
            cv2.IMREAD_UNCHANGED)
        timg_learned_control = cm.imread(
            os.path.abspath("images/text-learned-control.png"),
            cv2.IMREAD_UNCHANGED)
        timg_red_disagree = cm.imread(
            os.path.abspath("images/text-red-disagree.png"),
            cv2.IMREAD_UNCHANGED)
        timg_tesla_control_autopilot = cm.imread(
            os.path.abspath("images/text-tesla-control-autopilot.png"),
            cv2.IMREAD_UNCHANGED)
        timg_tesla_control_human = cm.imread(
            os.path.abspath("images/text-tesla-control-human.png"),
            cv2.IMREAD_UNCHANGED)

        textColor = (255, 255, 255
                     )  #Declare the color for the new image background
        bgColor = (0, 0, 0)  #Declare the color for the new image text
        newImage = Image.new('RGBA', (300, 200),
                             bgColor)  #Create blank canvas for metrics
        drawer = ImageDraw.Draw(
            newImage)  #Create ImageDraw for drawing the metrics

        error = abs((human_steering[f_cur] * MAX_ANGLE) -
                    (machine_steering[f_cur] *
                     MAX_ANGLE))  #calculate the error for the current frame
        totalError += error  #Add the current error to the total error
        errorArr.append(error)  #Add the current error to the error array

        stdDev = np.std(
            errorArr
        )  #Calculate the standard deviation as of the current frame
        avgError = totalError / (
            f_cur + 1)  #Calculate the average error as of the current frame

        drawer.text((10, 10), "Frame #%i" % f_cur,
                    fill=textColor)  #Print the frame number
        drawer.text((150, 10),
                    "Time to get angle: %i ms" % (timeArr[f_cur] * 1000),
                    fill=textColor)  #Print the forward pass in milliseconds
        drawer.text((10, 30), "Angles:",
                    fill=textColor)  #Print the steering angles
        drawer.text((10, 40),
                    "Acutal: %.3f" % (human_steering[f_cur] * MAX_ANGLE),
                    fill=textColor)  #Print the actual steering angle
        drawer.text((150, 40),
                    "Predicted: %.3f" % (machine_steering[f_cur] * MAX_ANGLE),
                    fill=textColor)  #Print the predicted steering angle
        drawer.text((10, 60), "Error: %.3f" % (error),
                    fill=textColor)  #Print the error for the current frame
        drawer.text(
            (150, 60), "Average error: %.3f" % (avgError),
            fill=textColor)  #Print the average error as of the current frame
        drawer.text((10, 70),
                    "Standard Deviation: %.3f" % (stdDev),
                    fill=textColor
                    )  #Print the standard deviation as of the current frame

        # timg_ = cm.imread(os.path.abspath("images/text-.png"), cv2.IMREAD_UNCHANGED)

        #timg_test2 = cm.imread(timg_test, cv2.IMREAD_UNCHANGED)
        newImage.save("images/stats.png")
        timg_stats = cm.imread(os.path.abspath("images/stats.png"),
                               cv2.IMREAD_UNCHANGED)

        fimg = cm.overlay_image(fimg,
                                timg_tesla_control_autopilot,
                                y_offset=rh + 8,
                                x_offset=dw + 83)
        fimg = cm.overlay_image(fimg,
                                timg_learned_control,
                                y_offset=rh + 8,
                                x_offset=dw + 256)
        fimg = cm.overlay_image(fimg,
                                timg_ground_truth,
                                y_offset=rh + 205,
                                x_offset=dw + 90)
        fimg = cm.overlay_image(fimg,
                                timg_red_disagree,
                                y_offset=rh + 205,
                                x_offset=dw + 230)
        fimg = cm.overlay_image(fimg,
                                timg_green_agree,
                                y_offset=rh + 205,
                                x_offset=dw + 345)
        fimg = cm.overlay_image(fimg, timg_stats, y_offset=rh,
                                x_offset=0)  #Draw the metrics to the screen
        #fimg = cm.overlay_image(fimg, timg_work, y_offset = 0, x_offset = 0)

        if (frame_count_limit is not None) and (frame_count_limit == 1):
            cv2.imwrite(out_path.replace('mkv', 'jpg'), fimg)
            sys.exit()

        vw.write(fimg)

    front_cap.release()
    if dash_exists:
        dash_cap.release()
    vw.release()

    cm.mkv_to_mp4(out_path, remove_mkv=True)
Esempio n. 16
0
def visualize(epoch_id,
              machine_steering,
              out_dir,
              perform_smoothing=False,
              verbose=False,
              verbose_progress_step=100,
              frame_count_limit=None):
    epoch_dir = params.data_dir
    human_steering = get_human_steering(epoch_id)
    print("epoch_id=%d, h_len=%d, m_len=%d" %
          (epoch_id, len(human_steering), len(machine_steering)))
    assert len(human_steering) == len(machine_steering)

    # testing: artificially magnify steering to test steering wheel visualization
    # human_steering = list(np.array(human_steering) * 10)
    # machine_steering = list(np.array(machine_steering) * 10)

    # testing: artificially alter machine steering to test that the disagreement coloring is working
    # delta = 0
    # for i in xrange(len(machine_steering)):
    #     delta += random.uniform(-1, 1)
    #     machine_steering[i] += delta

    if perform_smoothing:
        machine_steering = list(cm.smooth(np.array(machine_steering)))
        #human_steering = list(cm.smooth(np.array(human_steering)))

    steering_min = min(np.min(human_steering), np.min(machine_steering))
    steering_max = max(np.max(human_steering), np.max(machine_steering))

    assert os.path.isdir(epoch_dir)

    # front_vid_path = cm.jn(epoch_dir, 'epoch{:0>2}_front.mkv'.format(epoch_id))
    front_vid_path = cm.jn(epoch_dir,
                           'out-video-{}-scaled.avi'.format(epoch_id))
    assert os.path.isfile(front_vid_path)

    dash_vid_path = cm.jn(epoch_dir, 'epoch{:0>2}_dash.mkv'.format(epoch_id))
    dash_exists = os.path.isfile(dash_vid_path)

    front_cap = cv2.VideoCapture(front_vid_path)
    dash_cap = cv2.VideoCapture(dash_vid_path) if dash_exists else None

    assert os.path.isdir(out_dir)
    vid_size = cm.video_resolution_to_size('720p', width_first=True)
    # out_path = cm.jn(out_dir, 'epoch{:0>2}_human_machine.mkv'.format(epoch_id))
    out_path = cm.jn(out_dir,
                     'out-video-{}-human_machine.mkv'.format(epoch_id))
    vw = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*'X264'), 30,
                         vid_size)
    w, h = vid_size

    for f_cur in xrange(len(machine_steering)):
        if (f_cur != 0) and (f_cur % verbose_progress_step == 0):
            print 'completed {} of {} frames'.format(f_cur,
                                                     len(machine_steering))

        if (frame_count_limit is not None) and (f_cur >= frame_count_limit):
            break

        rret, rimg = front_cap.read()
        assert rret

        if dash_exists:
            dret, dimg = dash_cap.read()
            assert dret
        else:
            dimg = rimg.copy()
            dimg[:] = (0, 0, 0)

        ry0, rh = 80, 500
        dimg = dimg[100:, :930]
        dimg = cm.cv2_resize_by_height(dimg, h - rh)

        fimg = rimg.copy()
        fimg[:] = (0, 0, 0)
        fimg[:rh] = rimg[ry0:ry0 + rh]
        dh, dw = dimg.shape[:2]
        fimg[rh:, :dw] = dimg[:]

        ########################## plot ##########################
        plot_size = (500, dh)
        win_before, win_after = 150, 150

        xx, hh, mm = [], [], []
        pp = []
        for f_rel in xrange(-win_before, win_after + 1):
            f_abs = f_cur + f_rel
            if f_abs < 0 or f_abs >= len(machine_steering):
                continue
            xx.append(f_rel / 30)
            hh.append(human_steering[f_abs])
            mm.append(machine_steering[f_abs])
            if params.use_picar_mini == True:
                pp.append(get_degree_picar_mini(machine_steering[f_abs]))

        fig = plt.figure()
        axis = fig.add_subplot(1, 1, 1)

        steering_range = max(abs(steering_min), abs(steering_max))
        #ylim = [steering_min, steering_max]
        ylim = [-steering_range, steering_range]
        # ylim[0] = min(np.min(hh), np.min(mm))
        # ylim[1] = max(np.max(hh), np.max(mm))

        axis.set_xlabel('Current Time (secs)')
        axis.set_ylabel('Steering Angle')
        axis.axvline(x=0, color='k', ls='dashed')
        axis.plot(xx, hh)
        axis.plot(xx, mm)
        if params.use_picar_mini == True:
            axis.plot(xx, pp)  # picar-mini-v2.0
        axis.set_xlim([-win_before / 30, win_after / 30])
        axis.set_ylim(ylim)
        #axis.set_ylabel(y_label, fontsize=18)
        axis.label_outer()
        #axes.append(axis)

        buf = io.BytesIO()
        # http://stackoverflow.com/a/4306340/627517
        sx, sy = plot_size
        sx, sy = round(sx / 100, 1), round(sy / 100, 1)

        fig.set_size_inches(sx, sy)
        fig.tight_layout()
        fig.savefig(buf, format="png", dpi=100)
        buf.seek(0)
        buf_img = PIL.Image.open(buf)
        pimg = np.asarray(buf_img)
        plt.close(fig)

        pimg = cv2.resize(pimg, plot_size)
        pimg = pimg[:, :, :3]

        ph, pw = pimg.shape[:2]
        pimg = 255 - pimg

        fimg[rh:, -pw:] = pimg[:]

        ####################### human steering wheels ######################
        wimg = cm.imread(os.path.abspath("images/wheel-tesla-image-150.png"),
                         cv2.IMREAD_UNCHANGED)

        human_wimg = cm.rotate_image(wimg, -human_steering[f_cur])
        wh, ww = human_wimg.shape[:2]
        fimg = cm.overlay_image(fimg,
                                human_wimg,
                                y_offset=rh + 50,
                                x_offset=dw + 60)

        ####################### machine steering wheels ######################
        disagreement = abs(machine_steering[f_cur] - human_steering[f_cur])
        machine_wimg = cm.rotate_image(wimg, -machine_steering[f_cur])
        red_machine_wimg = machine_wimg.copy()
        green_machine_wimg = machine_wimg.copy()
        red_machine_wimg[:, :, 2] = 255
        green_machine_wimg[:, :, 1] = 255
        #r = disagreement / (steering_max - steering_min)
        max_disagreement = 10
        r = min(1., disagreement / max_disagreement)
        g = 1 - r
        assert r >= 0
        assert g <= 1
        machine_wimg = cv2.addWeighted(red_machine_wimg, r, green_machine_wimg,
                                       g, 0)
        wh, ww = machine_wimg.shape[:2]
        fimg = cm.overlay_image(fimg,
                                machine_wimg,
                                y_offset=rh + 50,
                                x_offset=dw + 260)

        ####################### text ######################
        timg_green_agree = cm.imread(
            os.path.abspath("images/text-green-agree.png"),
            cv2.IMREAD_UNCHANGED)
        timg_ground_truth = cm.imread(
            os.path.abspath("images/text-ground-truth.png"),
            cv2.IMREAD_UNCHANGED)
        timg_learned_control = cm.imread(
            os.path.abspath("images/text-learned-control.png"),
            cv2.IMREAD_UNCHANGED)
        timg_red_disagree = cm.imread(
            os.path.abspath("images/text-red-disagree.png"),
            cv2.IMREAD_UNCHANGED)
        timg_tesla_control_autopilot = cm.imread(
            os.path.abspath("images/text-tesla-control-autopilot.png"),
            cv2.IMREAD_UNCHANGED)
        timg_tesla_control_human = cm.imread(
            os.path.abspath("images/text-tesla-control-human.png"),
            cv2.IMREAD_UNCHANGED)
        # timg_ = cm.imread(os.path.abspath("images/text-.png"), cv2.IMREAD_UNCHANGED)

        fimg = cm.overlay_image(fimg,
                                timg_tesla_control_autopilot,
                                y_offset=rh + 8,
                                x_offset=dw + 83)
        fimg = cm.overlay_image(fimg,
                                timg_learned_control,
                                y_offset=rh + 8,
                                x_offset=dw + 256)
        fimg = cm.overlay_image(fimg,
                                timg_ground_truth,
                                y_offset=rh + 205,
                                x_offset=dw + 90)
        fimg = cm.overlay_image(fimg,
                                timg_red_disagree,
                                y_offset=rh + 205,
                                x_offset=dw + 230)
        fimg = cm.overlay_image(fimg,
                                timg_green_agree,
                                y_offset=rh + 205,
                                x_offset=dw + 345)

        if (frame_count_limit is not None) and (frame_count_limit == 1):
            cv2.imwrite(out_path.replace('mkv', 'jpg'), fimg)
            sys.exit()

        vw.write(fimg)

    front_cap.release()
    if dash_exists:
        dash_cap.release()
    vw.release()

    cm.mkv_to_mp4(out_path, remove_mkv=True)
Esempio n. 17
0
def visualize(epoch_id, machine_steering, out_dir, timeArr, perform_smoothing=False,
              verbose=False, verbose_progress_step = 100, frame_count_limit = None):
    epoch_dir = params.data_dir
    human_steering = get_human_steering(epoch_id)
    assert len(human_steering) == len(machine_steering)

    # testing: artificially magnify steering to test steering wheel visualization
    # human_steering = list(np.array(human_steering) * 10)
    # machine_steering = list(np.array(machine_steering) * 10)

    # testing: artificially alter machine steering to test that the disagreement coloring is working
    # delta = 0
    # for i in xrange(len(machine_steering)):
    #     delta += random.uniform(-1, 1)
    #     machine_steering[i] += delta
    
    if perform_smoothing:
        machine_steering = list(cm.smooth(np.array(machine_steering)))
        #human_steering = list(cm.smooth(np.array(human_steering)))

    steering_min = min(np.min(human_steering) * MAX_ANGLE, np.min(machine_steering) * MAX_ANGLE)
    steering_max = max(np.max(human_steering) * MAX_ANGLE, np.max(machine_steering) * MAX_ANGLE)

    assert os.path.isdir(epoch_dir)

    front_vid_path = epoch_dir + "/dataset{0}/out-mencoder2.avi".format(epoch_id)
    assert os.path.isfile(front_vid_path)
    
    dash_vid_path = cm.jn(epoch_dir, 'epoch{:0>2}_dash.mkv'.format(epoch_id))
    dash_exists = os.path.isfile(dash_vid_path)

    front_cap = cv2.VideoCapture(front_vid_path)
    dash_cap = cv2.VideoCapture(dash_vid_path) if dash_exists else None
    
    assert os.path.isdir(out_dir)
    vid_size = cm.video_resolution_to_size('720p', width_first=True)
    out_path = cm.jn(out_dir, 'epoch{:0>2}_human_machine.mkv'.format(epoch_id))
    vw = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*'X264' ), 30, vid_size)
    w, h = vid_size

    totalError = 0 #Holds the sum of all errors, used tocalculate the average error for each frame
    errorArr = [] #Holds the individual errors, used to calculate standard deviation for each frame

    for f_cur in xrange(len(machine_steering)):
        if (f_cur != 0) and (f_cur % verbose_progress_step == 0):
            print 'completed {} of {} frames'.format(f_cur, len(machine_steering))

        if (frame_count_limit is not None) and (f_cur >= frame_count_limit):
            break
            
        rret, rimg = front_cap.read()
        assert rret

        if dash_exists:
            dret, dimg = dash_cap.read()
            assert dret
        else:
            dimg = rimg.copy()
            dimg[:] = (0, 0, 0)
        
        ry0, rh = 80, 500
        dimg = dimg[100:, :930]
        dimg = cm.cv2_resize_by_height(dimg, h-rh)

        fimg = rimg.copy()
        fimg[:] = (0, 0, 0)
        fimg[:rh] = rimg[ry0:ry0+rh]
        dh, dw = dimg.shape[:2]
        fimg[rh:,:dw] = dimg[:]
        
        ########################## plot ##########################
        plot_size = (500, dh)
        win_before, win_after = 150, 150

        xx, hh, mm = [], [], []
        for f_rel in xrange(-win_before, win_after+1):
            f_abs = f_cur + f_rel
            if f_abs < 0 or f_abs >= len(machine_steering):
                continue
            xx.append(f_rel/30)
            hh.append(human_steering[f_abs] * MAX_ANGLE)
            mm.append(machine_steering[f_abs] * MAX_ANGLE)

        fig = plt.figure()
        axis = fig.add_subplot(1, 1, 1)

        steering_range = max(abs(steering_min), abs(steering_max))
        #ylim = [steering_min, steering_max]
        ylim = [-steering_range, steering_range]
        # ylim[0] = min(np.min(hh), np.min(mm))
        # ylim[1] = max(np.max(hh), np.max(mm))
        
        axis.set_xlabel('Current Time (secs)')
        axis.set_ylabel('Steering Angle')
        axis.axvline(x=0, color='k', ls='dashed')
        axis.plot(xx, hh)
        axis.plot(xx, mm)
        axis.set_xlim([-win_before/30, win_after/30])
        axis.set_ylim(ylim)
        #axis.set_ylabel(y_label, fontsize=18)
        axis.label_outer()
        #axes.append(axis)

        buf = io.BytesIO()
        # http://stackoverflow.com/a/4306340/627517
        sx, sy = plot_size
        sx, sy = round(sx / 100, 1), round(sy / 100, 1)

        fig.set_size_inches(sx, sy)
        fig.tight_layout()
        fig.savefig(buf, format="png", dpi=100)
        buf.seek(0)
        buf_img = PIL.Image.open(buf)
        pimg = np.asarray(buf_img)
        plt.close(fig)

        pimg = cv2.resize(pimg, plot_size)
        pimg = pimg[:,:,:3]

        ph, pw = pimg.shape[:2]
        pimg = 255 - pimg
        
        fimg[rh:,-pw:] = pimg[:]

        ####################### human steering wheels ######################
        wimg = cm.imread(os.path.abspath("images/wheel-tesla-image-150.png"), cv2.IMREAD_UNCHANGED)

        human_wimg = cm.rotate_image(wimg, (human_steering[f_cur] * MAX_ANGLE))
        wh, ww = human_wimg.shape[:2]
        fimg = cm.overlay_image(fimg, human_wimg, y_offset = rh+50, x_offset = dw+60)

        ####################### machine steering wheels ######################
        disagreement = abs((machine_steering[f_cur] * MAX_ANGLE) - (human_steering[f_cur] * MAX_ANGLE))
        machine_wimg = cm.rotate_image(wimg, (machine_steering[f_cur] * MAX_ANGLE))
        red_machine_wimg = machine_wimg.copy()
        green_machine_wimg = machine_wimg.copy()
        red_machine_wimg[:,:,2] = 255
        green_machine_wimg[:,:,1] = 255
        #r = disagreement / (steering_max - steering_min)
        max_disagreement = 10
        r = min(1., disagreement / max_disagreement)
        g = 1 - r
        assert r >= 0
        assert g <= 1
        machine_wimg = cv2.addWeighted(red_machine_wimg, r, green_machine_wimg, g, 0)
        wh, ww = machine_wimg.shape[:2]
        fimg = cm.overlay_image(fimg, machine_wimg, y_offset = rh+50, x_offset = dw+260)
        
        ####################### text ######################
        timg_green_agree = cm.imread(os.path.abspath("images/text-green-agree.png"), cv2.IMREAD_UNCHANGED)
        timg_ground_truth = cm.imread(os.path.abspath("images/text-ground-truth.png"), cv2.IMREAD_UNCHANGED)
        timg_learned_control = cm.imread(os.path.abspath("images/text-learned-control.png"), cv2.IMREAD_UNCHANGED)
        timg_red_disagree = cm.imread(os.path.abspath("images/text-red-disagree.png"), cv2.IMREAD_UNCHANGED)
        timg_tesla_control_autopilot = cm.imread(os.path.abspath("images/text-tesla-control-autopilot.png"), cv2.IMREAD_UNCHANGED)
        timg_tesla_control_human = cm.imread(os.path.abspath("images/text-tesla-control-human.png"), cv2.IMREAD_UNCHANGED)

        textColor = (255,255,255) #Declare the color for the new image background
        bgColor = (0,0,0) #Declare the color for the new image text
        newImage = Image.new('RGBA', (300, 200), bgColor) #Create blank canvas for metrics
        drawer = ImageDraw.Draw(newImage) #Create ImageDraw for drawing the metrics

        error = abs((human_steering[f_cur]*MAX_ANGLE) - (machine_steering[f_cur]*MAX_ANGLE)) #calculate the error for the current frame
        totalError += error #Add the current error to the total error
        errorArr.append(error) #Add the current error to the error array

        stdDev = np.std(errorArr) #Calculate the standard deviation as of the current frame
        avgError = totalError / (f_cur + 1) #Calculate the average error as of the current frame

        drawer.text((10, 10), "Frame #%i" % f_cur, fill=textColor) #Print the frame number
        drawer.text((150, 10), "Time to get angle: %i ms" % (timeArr[f_cur] * 1000), fill=textColor) #Print the forward pass in milliseconds
        drawer.text((10,30), "Angles:", fill=textColor) #Print the steering angles
        drawer.text((10,40), "Acutal: %.3f" % (human_steering[f_cur] * MAX_ANGLE), fill = textColor) #Print the actual steering angle
        drawer.text((150,40), "Predicted: %.3f" % (machine_steering[f_cur]* MAX_ANGLE), fill = textColor) #Print the predicted steering angle
        drawer.text((10,60), "Error: %.3f" % (error), fill=textColor) #Print the error for the current frame
        drawer.text((150,60), "Average error: %.3f" % (avgError), fill=textColor) #Print the average error as of the current frame
        drawer.text((10,70), "Standard Deviation: %.3f" % (stdDev), fill=textColor) #Print the standard deviation as of the current frame

        # timg_ = cm.imread(os.path.abspath("images/text-.png"), cv2.IMREAD_UNCHANGED)

        #timg_test2 = cm.imread(timg_test, cv2.IMREAD_UNCHANGED)
        newImage.save("images/stats.png")
        timg_stats = cm.imread(os.path.abspath("images/stats.png"), cv2.IMREAD_UNCHANGED)

        fimg = cm.overlay_image(fimg, timg_tesla_control_autopilot, y_offset = rh+8, x_offset = dw+83)
        fimg = cm.overlay_image(fimg, timg_learned_control, y_offset = rh+8, x_offset = dw+256)
        fimg = cm.overlay_image(fimg, timg_ground_truth, y_offset = rh+205, x_offset = dw+90)
        fimg = cm.overlay_image(fimg, timg_red_disagree, y_offset = rh+205, x_offset = dw+230)
        fimg = cm.overlay_image(fimg, timg_green_agree, y_offset = rh+205, x_offset = dw+345)
        fimg = cm.overlay_image(fimg, timg_stats, y_offset = rh, x_offset=0) #Draw the metrics to the screen
        #fimg = cm.overlay_image(fimg, timg_work, y_offset = 0, x_offset = 0)

        if (frame_count_limit is not None) and (frame_count_limit == 1):
            cv2.imwrite(out_path.replace('mkv', 'jpg'), fimg)
            sys.exit()

        vw.write(fimg)

    front_cap.release()
    if dash_exists:
        dash_cap.release()
    vw.release()

    cm.mkv_to_mp4(out_path, remove_mkv=True)
epochs = params.epochs
img_height = params.img_height
img_width = params.img_width
img_channels = params.img_channels

############### building the batch definitions ###############
purposes = ['train', 'val']
batches = OrderedDict()
for purpose in purposes:
    batches[purpose] = []

# determine the epoch_id, frame_start, frame_end
for purpose in epochs.keys():
    assert len(epochs[purpose]) > 0
    for epoch_id in epochs[purpose]:
        vid_path = cm.jn(data_dir, 'epoch{:0>2}_front.mkv'.format(epoch_id))
        assert os.path.isfile(vid_path)
        frame_count = cm.frame_count(vid_path)
        assert batch_size <= frame_count

        batch_count = int(frame_count / batch_size)
        assert batch_count >= 1
        for b in xrange(batch_count):
            assert purpose in batches
            frame_start = b * batch_size
            frame_end = frame_start + batch_size - 1
            assert frame_end < frame_count
            batches[purpose].append(
                OrderedDict([
                    ('epoch_id', epoch_id),
                    ('frame_start', frame_start),
Esempio n. 19
0
import os
import local_common as cm
import params
from shutil import copyfile

data_dir = ""
csv_path = cm.jn(data_dir, 'data/output.txt')
rows = cm.fetch_csv_data(csv_path)

count = len(params.epochs['train']) + len(params.epochs['val'])
num = len(rows)/count
name = []
name.extend(params.epochs['train'])
name.extend(params.epochs['val'])
cur = num
index = 0

print len(rows), num

data_dir = 'epochs/data'+str(name[index])
if not os.path.isdir(data_dir):
	os.mkdir(data_dir)
f = open(data_dir + '/output.txt', 'w')
f.write("img,wheel\n")
for row in rows:
	cur = cur - 1
	if cur < 0:
		index = index + 1
		cur = num
		data_dir = 'epochs/data'+str(name[index])
		if not os.path.isdir(data_dir):
Esempio n. 20
0
import cv2
import subprocess as sp
import itertools
import params
import sys
import os
import preprocess
import visualize
import time

import local_common as cm

sess = tf.InteractiveSession()
saver = tf.train.Saver()
model_name = 'model.ckpt'
model_path = cm.jn(params.save_dir, model_name)
saver.restore(sess, model_path)

epoch_ids = sorted(list(set(itertools.chain(*params.epochs.values()))))

for epoch_id in epoch_ids:
    print '---------- processing video for epoch {} ----------'.format(
        epoch_id)
    vid_path = cm.jn(params.data_dir, 'epoch{:0>2}_front.mkv'.format(epoch_id))
    assert os.path.isfile(vid_path)
    frame_count = cm.frame_count(vid_path)
    cap = cv2.VideoCapture(vid_path)

    machine_steering = []

    print 'performing inference...'
Esempio n. 21
0
def load_imgs():
    global imgs
    global wheels

    for p in purposes:
        print p
        for epoch_id in epochs[p]:
            print 'processing and loading "{}" epoch {} into memory, current num of imgs is {}...'.format(
                p, epoch_id, len(imgs[p]))

            # csv_path = cm.jn(data_dir, 'epoch{:0>2}_steering.csv'.format(epoch_id))
            csv_path = cm.jn(data_dir, 'data{}/output.txt'.format(epoch_id))
            assert os.path.isfile(csv_path)

            print "DBG:", csv_path
            rows = cm.fetch_csv_data(csv_path)

            # print len(rows)
            #print len(rows), frame_count
            #assert frame_count == len(rows)

            for row in rows:
                # print row
                yy = float(row['wheel'])
                # yy = yy / 22500.

                print yy, "before"
                yy = 1.0 * scale_down(yy, params.input_max,
                                      params.scale) / params.scale
                print yy, "after ==="

                img_path = row['img']
                # print "img"
                # print img_path
                # print yy

                ############ using opencv to read img here
                img_path = cm.jn(data_dir,
                                 'data{}/{}'.format(epoch_id, img_path))
                if not os.path.isfile(img_path):
                    continue
                img = cv2.imread(img_path)

                ########### doing data augmentation

                # move along height of photo
                # for i in range(-2,3,1):
                # for i in range(-1,2,1):
                for i in range(1):
                    i *= 2

                    # move along width of photo
                    # for j in range(-1,2,1):
                    for j in range(1):
                        j *= 2
                        crop = img[40 + i:200 + i, 50 + j:270 + j]

                        # resize to 200*66
                        rsz = cv2.resize(crop, (200, 66))
                        rsz = rsz / 255.
                        imgs[p].append(rsz)
                        wheels[p].append(yy)

                        # flip image and add again
                        flip = np.flip(rsz, 1)
                        imgs[p].append(flip)
                        wheels[p].append(-yy)

                # img = preprocess.preprocess(img)
                # imgs[p].append(img)
                # wheels[p].append(yy)
            # print "wheels"
            # print wheels
            # print imgs[p][0]
            assert len(imgs[p]) == len(wheels[p])