Exemplo n.º 1
0
def dvs_ucf101_exp(ucf101_dir, ucf101_stats_path, recording_save_path,
                   viewer_id, screen_height, screen_width, work_win_scale,
                   bg_color, fps):
    """Setup an experiment for UCF-101 dataset.

    Parameters
    ----------
    ucf101_dir : string
        absolute path of UCF-101 dataset
        e.g. /home/user/UCF-101
    ucf101_stats_path : string
        path to vot dataset stats
    recording_save_path : string
        path to logged recording data
    viewer_id : int
        the ID of jAER viewer, for Linux is 1, Mac OS X is 2
    screen_height : int
        height of the screen in pixel
    screen_width : int
        width of the screen in pixel
    work_win_scale : float
        the scaling factor that calculates working window size
    bg_color : list
        background color definition
    fps : int
        frame per second while displaying the video,
        will round to closest number
    """
    # create data folder if not existed
    if not os.path.exists(recording_save_path):
        os.mkdir(recording_save_path)
    # Load UCF-101 stats
    f = file(ucf101_stats_path, mode="r")
    ucf101_stats = pickle.load(f)
    f.close()

    ucf101_list = ucf101_stats["ucf101_list"]

    # Set read video function based on platform
    read_video = helpers.read_video

    # Create full background
    background = (np.ones(
        (screen_height, screen_width, 3)) * bg_color).astype(np.uint8)

    # Setup OpenCV display window
    window_title = "DVS-UCF101-EXP"
    cv2.namedWindow(window_title, cv2.WND_PROP_FULLSCREEN)

    # Experiment setup calibration
    # Not without tuning images
    swin_h, swin_w = helpers.calibration(win_h=screen_height,
                                         win_w=screen_width,
                                         scale=work_win_scale,
                                         window_title=window_title,
                                         bg_color=bg_color)

    # Main routine
    s = tools.init_dvs()
    tools.reset_dvs_time(s)
    for class_name in ucf101_list:
        class_path = os.path.join(recording_save_path, class_name)
        if not os.path.exists(class_path):
            os.mkdir(class_path)
        for video_name in ucf101_stats[class_name]:
            video_path = str(os.path.join(ucf101_dir, class_name, video_name))
            print video_path

            frames, num_frames = read_video(video_path)
            new_frames = gui.rescale_image_sequence(frames, swin_h, swin_w,
                                                    bg_color)

            new_frames = gui.create_border_sequence(new_frames, screen_height,
                                                    screen_width, bg_color)

            cv2.imshow(window_title, new_frames[0])
            print "[MESSAGE] Adapting video sequence %s" % str(video_name)
            cv2.waitKey(delay=2000)
            tools.start_log_dvs(s, recording_save_path,
                                str(class_name + "/" + video_name[:-4]),
                                viewer_id)
            for i in xrange(num_frames):
                cv2.imshow(window_title, new_frames[i])
                key = cv2.waitKey(delay=int(1000 / fps)) & 0xFF
                if key == 27:
                    cv2.destroyAllWindows()
                    quit()

            cv2.imshow(window_title, new_frames[-1])
            tools.stop_log_dvs(s, viewer_id)
            print "[MESSAGE] Releasing video sequence %s" % str(video_name)
            cv2.waitKey(delay=2000)
            cv2.imshow(window_title, background)
            cv2.waitKey(delay=1000)
            tools.reset_dvs_time(s)
            print "[MESSAGE] Video sequence %s is logged." % str(video_name)

    # Destory both scoket and opencv window
    tools.destroy_dvs(s)
    cv2.destroyAllWindows()
Exemplo n.º 2
0
cv2.waitKey(delay=3000)

# s = tools.init_dvs()
# tools.reset_dvs_time(s)
# tools.start_log_dvs(s, "/home/inilab/data/",
#                     "led_test",
#                     1)

print "[MESSAGE] Displaying from gray to white"
frames = []
box_height = 100
box_width = 100
for color in xrange(127, 256, 8):
    frame = np.ones((box_height, box_width, 3), dtype=np.uint8) * color
    frames.append(frame)
frames = gui.create_border_sequence(frames, screen_height, screen_width,
                                    bg_color)
for frame in frames:
    cv2.imshow(window_title, background)
    cv2.waitKey(delay=int(1000 / fps) * 3)
    cv2.imshow(window_title, frame)
    key = cv2.waitKey(delay=int(1000 / fps))
    if key == 27:
        break

print "[MESSAGE] Releasing"
cv2.imshow(window_title, background)
cv2.waitKey(delay=3000)

# from gray to black

print "[MESSAGE] Displaying from gray to black"
Exemplo n.º 3
0
def dvs_vot_exp(vot_dir, vot_stats_path, recording_save_path, viewer_id,
                screen_height, screen_width, work_win_scale, bg_color, fps):
    """Setup an experiment for VOT dataset.

    Parameters
    ----------
    vot_dir : string
        absolute path of VOT dataset
        e.g. /home/user/vot2015
    vot_stats_path : string
        path to vot dataset stats
    recording_save_path : string
        path to logged recording data
    viewer_id : int
        the ID of jAER viewer, for Linux is 1, Mac OS X is 2
    screen_height : int
        height of the screen in pixel
    screen_width : int
        width of the screen in pixel
    work_win_scale : float
        the scaling factor that calculates working window size
    bg_color : list
        background color definition
    fps : int
        frame per second while displaying the video,
        will round to closest number
    """
    if not os.path.exists(str(recording_save_path)):
        os.mkdir(str(recording_save_path))
    # Load VOT stats
    f = file(vot_stats_path, mode="r")
    vot_stats = pickle.load(f)
    f.close()

    vot_list = vot_stats['vot_list']
    num_frames = vot_stats['num_frames']

    # Load groundtruth and image lists
    print "[MESSAGE] Loading image lists."
    lists = []
    for i in xrange(len(num_frames)):
        list_path = os.path.join(vot_dir, vot_list[i])
        temp_list = tools.create_vot_image_list(list_path, num_frames[i])
        lists.append(temp_list)
    print "[MESSAGE] Ground truths and image lists are loaded."

    # Create full background
    background = (np.ones(
        (screen_height, screen_width, 3)) * bg_color).astype(np.uint8)

    # Setup OpenCV display window
    window_title = "DVS-VOT-EXP"
    cv2.namedWindow(window_title, cv2.WND_PROP_FULLSCREEN)

    # Experiment setup calibration
    # Not without tuning images
    swin_h, swin_w = helpers.calibration(win_h=screen_height,
                                         win_w=screen_width,
                                         scale=work_win_scale,
                                         window_title=window_title,
                                         bg_color=bg_color)

    # Init a general UDP socket
    s = tools.init_dvs()
    tools.reset_dvs_time(s)
    for k in xrange(len(num_frames)):
        print "[MESSAGE] Display video sequence %i" % (k + 1)
        frames = []
        for i in xrange(num_frames[k]):
            frames.append(cv2.imread(lists[k][i]))

        new_frames = gui.rescale_image_sequence(frames, swin_h, swin_w,
                                                bg_color)
        new_frames = gui.create_border_sequence(new_frames, screen_height,
                                                screen_width, bg_color)
        cv2.imshow(window_title, new_frames[0])
        print "[MESSAGE] Adapting video sequence %i" % (k + 1)
        cv2.waitKey(delay=2000)
        tools.start_log_dvs(s, recording_save_path, vot_list[k], viewer_id)
        for i in xrange(num_frames[k]):
            cv2.imshow(window_title, new_frames[i])
            key = cv2.waitKey(delay=int(1000 / fps)) & 0xFF
            if key == 27:
                cv2.destroyAllWindows()
                quit()

        cv2.imshow(window_title, new_frames[-1])
        tools.stop_log_dvs(s, viewer_id)
        print "[MESSAGE] Releasing video sequence %i" % (k + 1)
        cv2.waitKey(delay=2000)
        cv2.imshow(window_title, background)
        cv2.waitKey(delay=1000)
        tools.reset_dvs_time(s)
        print "[MESSAGE] Video sequence %i is logged." % (k + 1)
    # Destory both scoket and opencv window
    tools.destroy_dvs(s)
    cv2.destroyAllWindows()
Exemplo n.º 4
0
def dvs_ucf50_exp(caltech256_dir, caltech256_stats_path, recording_save_path,
                  viewer_id, screen_height, screen_width, saccade_size,
                  work_win_scale, bg_color, fps, start_class):
    """Setup an experiment for Caltech-256 dataset.

    Parameters
    ----------
    caltech256_dir : string
        absolute path of Caltech-256 dataset
        e.g. /home/user/Caltech-256
    caltech256_stats_path : string
        path to Caltech-256 dataset stats
    recording_save_path : string
        path to logged recording data
    viewer_id : int
        the ID of jAER viewer, for Linux is 1, Mac OS X is 2
    screen_height : int
        height of the screen in pixel
    screen_width : int
        width of the screen in pixel
    saccade_size : int
        the step length of each saccade
    work_win_scale : float
        the scaling factor that calculates working window size
    bg_color : list
        background color definition
    fps : int
        frame per second while displaying the video,
        will round to closest number
    start_class : int
        select which class to start
    """
    # create data folder if not existed
    if not os.path.exists(recording_save_path):
        os.mkdir(recording_save_path)
    # Load UCF-50 stats
    f = file(caltech256_stats_path, mode="r")
    caltech256_stats = pickle.load(f)
    f.close()

    caltech256_list = caltech256_stats["caltech256_list"]
    caltech256_list = caltech256_list[(start_class - 1):]

    # Create full background

    background = (np.ones(
        (screen_height, screen_width, 3)) * bg_color).astype(np.uint8)

    # Setup OpenCV display window
    window_title = "DVS-CALTECH-256-EXP"
    cv2.namedWindow(window_title, cv2.WND_PROP_FULLSCREEN)

    # Experiment setup calibration
    # Not without tuning images
    swin_h, swin_w = helpers.calibration(win_h=screen_height,
                                         win_w=screen_width,
                                         scale=work_win_scale,
                                         window_title=window_title,
                                         bg_color=bg_color)

    # Main routine
    s = tools.init_dvs()
    tools.reset_dvs_time(s)
    for class_name in caltech256_list:
        class_path = os.path.join(recording_save_path, class_name)
        if not os.path.exists(class_path):
            os.mkdir(class_path)
        for img_name in caltech256_stats[class_name]:
            img_path = os.path.join(caltech256_dir, class_name, img_name)
            img_n, img_ex = os.path.splitext(img_name)

            frames, num_frames = gui.gen_image_frames(img_path, fps, 1)
            frames = gui.create_saccade_sequence(frames, saccade_size,
                                                 bg_color)

            new_frames = gui.rescale_image_sequence(frames, swin_h, swin_w,
                                                    bg_color)
            new_frames = gui.create_border_sequence(new_frames, screen_height,
                                                    screen_width, bg_color)

            cv2.imshow(window_title, new_frames[0])
            print "[MESSAGE] Adapting image %s" % str(img_n)
            cv2.waitKey(delay=1000)
            tools.start_log_dvs(s, recording_save_path,
                                str(class_name + "/" + img_n), viewer_id)
            for frame in new_frames:
                cv2.imshow(window_title, frame)
                key = cv2.waitKey(delay=int(1000 / fps)) & 0xFF
                if key == 27:
                    cv2.destroyAllWindows()
                    quit()

            cv2.imshow(window_title, new_frames[-1])
            tools.stop_log_dvs(s, viewer_id)
            print "[MESSAGE] Releasing image %s" % str(img_n)
            cv2.waitKey(delay=1000)
            cv2.imshow(window_title, background)
            cv2.waitKey(delay=1000)
            tools.reset_dvs_time(s)
            print "[MESSAGE] Image %s is logged." % str(img_n)

    # Destory both scoket and opencv window
    tools.destroy_dvs(s)
    cv2.destroyAllWindows()
Exemplo n.º 5
0
def dvs_lipreading_exp(lipreading_dir,
                       lipreading_stats_path,
                       recording_save_path,
                       viewer_id,
                       screen_height,
                       screen_width,
                       work_win_scale,
                       bg_color,
                       fps):
    """Setup an experiment for lipreading dataset.

    Parameters
    ----------
    lipreading_dir : string
        absolute path of lipreading dataset
        e.g. /home/user/lipreading
    lipreading_stats_path : string
        path to lipreading dataset stats
    recording_save_path : string
        path to logged recording data
    viewer_id : int
        the ID of jAER viewer, for Linux is 1, Mac OS X is 2
    screen_height : int
        height of the screen in pixel
    screen_width : int
        width of the screen in pixel
    work_win_scale : float
        the scaling factor that calculates working window size
    bg_color : list
        background color definition
    fps : int
        frame per second while displaying the video,
        will round to closest number
    """
    # create data folder if not existed
    if not os.path.exists(recording_save_path):
        os.mkdir(recording_save_path)
    # Load lipreading stats
    f = file(lipreading_stats_path, mode="r")
    lipreading_stats = pickle.load(f)
    f.close()

    lipreading_list = lipreading_stats["lipreading_list"]

    # Set read video function based on platform
    read_video = helpers.read_video

    # Create full background
    background = (np.ones((screen_height,
                           screen_width, 3))*bg_color).astype(np.uint8)

    # Setup OpenCV display window
    window_title = "DVS-LIPREADING-EXP"
    cv2.namedWindow(window_title, cv2.WND_PROP_FULLSCREEN)

    # Experiment setup calibration
    # Not without tuning images
    swin_h, swin_w = helpers.calibration(win_h=screen_height,
                                         win_w=screen_width,
                                         scale=work_win_scale,
                                         window_title=window_title,
                                         bg_color=bg_color)

    # Main routine
    s = tools.init_dvs()
    tools.reset_dvs_time(s)
    for class_name in lipreading_list:
        class_path = os.path.join(recording_save_path, class_name)
        if not os.path.exists(class_path):
            os.mkdir(class_path)
        for video_name in lipreading_stats[class_name]:
            video_path = str(os.path.join(lipreading_dir, class_name,
                                          video_name))

            frames, num_frames = read_video(video_path)
            new_frames = gui.rescale_image_sequence(frames, swin_h, swin_w,
                                                    bg_color)
            new_frames = gui.create_border_sequence(new_frames, screen_height,
                                                    screen_width, bg_color)

            cv2.imshow(window_title, new_frames[0])
            print "[MESSAGE] Adapting video sequence %s" % str(video_name)
            cv2.waitKey(delay=2000)
            tools.start_log_dvs(s, recording_save_path,
                                str(class_name+"/"+video_name[:-4]),
                                viewer_id)
            for i in xrange(num_frames):
                cv2.imshow(window_title, new_frames[i])
                key = cv2.waitKey(delay=int(1000/fps)) & 0xFF
                if key == 27:
                    cv2.destroyAllWindows()
                    quit()

            cv2.imshow(window_title, new_frames[-1])
            tools.stop_log_dvs(s, viewer_id)
            print "[MESSAGE] Releasing video sequence %s" % str(video_name)
            cv2.waitKey(delay=2000)
            cv2.imshow(window_title, background)
            cv2.waitKey(delay=1000)
            tools.reset_dvs_time(s)
            print "[MESSAGE] Video sequence %s is logged." % str(video_name)

    # Destory both scoket and opencv window
    tools.destroy_dvs(s)
    cv2.destroyAllWindows()
Exemplo n.º 6
0
def dvs_ucf50_exp(caltech256_dir,
                  caltech256_stats_path,
                  recording_save_path,
                  viewer_id,
                  screen_height,
                  screen_width,
                  saccade_size,
                  work_win_scale,
                  bg_color,
                  fps,
                  start_class):
    """Setup an experiment for Caltech-256 dataset.

    Parameters
    ----------
    caltech256_dir : string
        absolute path of Caltech-256 dataset
        e.g. /home/user/Caltech-256
    caltech256_stats_path : string
        path to Caltech-256 dataset stats
    recording_save_path : string
        path to logged recording data
    viewer_id : int
        the ID of jAER viewer, for Linux is 1, Mac OS X is 2
    screen_height : int
        height of the screen in pixel
    screen_width : int
        width of the screen in pixel
    saccade_size : int
        the step length of each saccade
    work_win_scale : float
        the scaling factor that calculates working window size
    bg_color : list
        background color definition
    fps : int
        frame per second while displaying the video,
        will round to closest number
    start_class : int
        select which class to start
    """
    # create data folder if not existed
    if not os.path.exists(recording_save_path):
        os.mkdir(recording_save_path)
    # Load UCF-50 stats
    f = file(caltech256_stats_path, mode="r")
    caltech256_stats = pickle.load(f)
    f.close()

    caltech256_list = caltech256_stats["caltech256_list"]
    caltech256_list = caltech256_list[(start_class-1):]

    # Create full background

    background = np.ones((screen_height, screen_width, 3))*bg_color

    # Setup OpenCV display window
    window_title = "DVS-CALTECH-256-EXP"
    cv2.namedWindow(window_title, cv2.WND_PROP_FULLSCREEN)

    # Experiment setup calibration
    # Not without tuning images
    swin_h, swin_w = helpers.calibration(win_h=screen_height,
                                         win_w=screen_width,
                                         scale=work_win_scale,
                                         window_title=window_title,
                                         bg_color=bg_color)

    # Main routine
    s = tools.init_dvs()
    tools.reset_dvs_time(s)
    for class_name in caltech256_list:
        class_path = os.path.join(recording_save_path, class_name)
        if not os.path.exists(class_path):
            os.mkdir(class_path)
        for img_name in caltech256_stats[class_name]:
            img_path = os.path.join(caltech256_dir, class_name, img_name)
            img_n, img_ex = os.path.splitext(img_name)

            frames, num_frames = gui.gen_image_frames(img_path, fps, 1)
            frames = gui.create_saccade_sequence(frames, saccade_size,
                                                 bg_color)

            new_frames = gui.rescale_image_sequence(frames, swin_h, swin_w,
                                                    bg_color)
            new_frames = gui.create_border_sequence(new_frames, screen_height,
                                                    screen_width, bg_color)

            cv2.imshow(window_title, new_frames[0])
            print "[MESSAGE] Adapting image %s" % str(img_n)
            cv2.waitKey(delay=1000)
            tools.start_log_dvs(s, recording_save_path,
                                str(class_name+"/"+img_n),
                                viewer_id)
            for frame in new_frames:
                cv2.imshow(window_title, frame)
                key = cv2.waitKey(delay=int(1000/fps)) & 0xFF
                if key == 27:
                    cv2.destroyAllWindows()
                    quit()

            cv2.imshow(window_title, new_frames[-1])
            tools.stop_log_dvs(s, viewer_id)
            print "[MESSAGE] Releasing image %s" % str(img_n)
            cv2.waitKey(delay=1000)
            cv2.imshow(window_title, background)
            cv2.waitKey(delay=1000)
            tools.reset_dvs_time(s)
            print "[MESSAGE] Image %s is logged." % str(img_n)

    # Destory both scoket and opencv window
    tools.destroy_dvs(s)
    cv2.destroyAllWindows()
Exemplo n.º 7
0
def dvs_ucf101_exp(ucf101_dir,
                   ucf101_stats_path,
                   recording_save_path,
                   viewer_id,
                   screen_height,
                   screen_width,
                   work_win_scale,
                   bg_color,
                   fps):
    """Setup an experiment for UCF-101 dataset.

    Parameters
    ----------
    ucf101_dir : string
        absolute path of UCF-101 dataset
        e.g. /home/user/UCF-101
    ucf101_stats_path : string
        path to vot dataset stats
    recording_save_path : string
        path to logged recording data
    viewer_id : int
        the ID of jAER viewer, for Linux is 1, Mac OS X is 2
    screen_height : int
        height of the screen in pixel
    screen_width : int
        width of the screen in pixel
    work_win_scale : float
        the scaling factor that calculates working window size
    bg_color : list
        background color definition
    fps : int
        frame per second while displaying the video,
        will round to closest number
    """
    ucf101_dir = tools.check_path(ucf101_dir)  # fix path if it's not valid

    # Load UCF-101 stats
    f = file(ucf101_stats_path, mode="r")
    ucf101_stats = pickle.load(f)
    f.close()

    ucf101_list = ucf101_stats["ucf101_list"]

    # Set read video function based on platform
    if _platform == "darwin":
        read_video = helpers.read_video_macosx
    elif _platform == "linux2":
        read_video = helpers.read_video_macosx

    # Create full background

    background = np.ones((screen_height, screen_width, 3))*bg_color

    # Setup OpenCV display window
    window_title = "DVS-UCF101-EXP"
    cv2.namedWindow(window_title, cv2.WND_PROP_FULLSCREEN)

    # Experiment setup calibration
    # Not without tuning images
    swin_h, swin_w = helpers.calibration(win_h=screen_height,
                                         win_w=screen_width,
                                         scale=work_win_scale,
                                         window_title=window_title,
                                         bg_color=bg_color)

    # Main routine
    s = tools.init_dvs()
    tools.reset_dvs_time(s)
    for class_name in ucf101_list:
        if not os.path.exists(str(recording_save_path+class_name+"/")):
            os.mkdir(str(recording_save_path+class_name+"/"))
        for video_name in ucf101_stats[class_name]:
            video_path = str(ucf101_dir+class_name+"/"+video_name)

            frames, num_frames = read_video(video_path)
            new_frames = gui.rescale_image_sequence(frames, swin_h, swin_w,
                                                    bg_color)

            new_frames = gui.create_border_sequence(new_frames, screen_height,
                                                    screen_width, bg_color)

            cv2.imshow(window_title, new_frames[0])
            print "[MESSAGE] Adapting video sequence %s" % str(video_name)
            cv2.waitKey(delay=2000)
            tools.start_log_dvs(s, recording_save_path,
                                str(class_name+"/"+video_name[:-4]),
                                viewer_id)
            for i in xrange(num_frames):
                cv2.imshow(window_title, new_frames[i])
                key = cv2.waitKey(delay=int(1000/fps)) & 0xFF
                if key == 27:
                    cv2.destroyAllWindows()
                    quit()

            cv2.imshow(window_title, new_frames[-1])
            tools.stop_log_dvs(s, viewer_id)
            print "[MESSAGE] Releasing video sequence %s" % str(video_name)
            cv2.waitKey(delay=2000)
            cv2.imshow(window_title, background)
            cv2.waitKey(delay=1000)
            tools.reset_dvs_time(s)
            print "[MESSAGE] Video sequence %s is logged." % str(video_name)

    # Destory both scoket and opencv window
    tools.destroy_dvs(s)
    cv2.destroyAllWindows()
Exemplo n.º 8
0
f.close()

caltech256_list = caltech256_stats["caltech256_list"]
class_name = caltech256_list[0]

cv2.namedWindow("test", cv2.WND_PROP_FULLSCREEN)
swin_h, swin_w = helpers.calibration(win_h=screen_height,
                                     win_w=screen_width,
                                     scale=0.9, window_title="test",
                                     bg_color=bg_color)


for image_name in caltech256_stats[class_name]:
    print "[MESSAGE] Loading image: "+class_name+"/"+image_name

    img_path = os.path.join(base_dir, class_name, image_name)

    frames, num_frames = gui.gen_image_frames(img_path, fps, 1)
    frames = gui.create_saccade_sequence(frames, 3, bg_color)

    new_frames = gui.rescale_image_sequence(frames, swin_h, swin_w,
                                            bg_color)
    new_frames = gui.create_border_sequence(new_frames, screen_height,
                                            screen_width, bg_color)

    for frame in new_frames:
        cv2.imshow("test", frame)
        key = cv2.waitKey(delay=30)

cv2.destroyAllWindows()
Exemplo n.º 9
0
def dvs_vot_exp(tracking_dir, tracking_stats_path, recording_save_path,
                viewer_id, screen_height, screen_width, work_win_scale,
                bg_color, fps):
    """Setup an experiment for VOT dataset.

    Parameters
    ----------
    tracking_dir : string
        absolute path of Tracking dataset
        e.g. /home/user/vot2015
    tracking_stats_path : string
        path to tracking dataset stats
    recording_save_path : string
        path to logged recording data
    viewer_id : int
        the ID of jAER viewer, for Linux is 1, Mac OS X is 2
    screen_height : int
        height of the screen in pixel
    screen_width : int
        width of the screen in pixel
    work_win_scale : float
        the scaling factor that calculates working window size
    bg_color : list
        background color definition
    fps : int
        frame per second while displaying the video,
        will round to closest number
    """
    tracking_dir = tools.check_path(tracking_dir)  # fix path if it's not valid
    recording_save_path = tools.check_path(str(recording_save_path))
    if not os.path.exists(str(recording_save_path)):
        os.mkdir(str(recording_save_path))
    # Load VOT stats
    f = file(tracking_stats_path, mode="r")
    tracking_stats = pickle.load(f)
    f.close()

    # primary list
    pl = tracking_stats["primary_list"]
    # secondary list
    sl = tracking_stats["secondary_list"]

    # Create full background
    background = np.ones((screen_height, screen_width, 3)) * bg_color

    # Setup OpenCV display window
    window_title = "DVS-TRACKING-EXP"
    cv2.namedWindow(window_title, cv2.WND_PROP_FULLSCREEN)

    # Experiment setup calibration
    # Not without tuning images
    swin_h, swin_w = helpers.calibration(win_h=screen_height,
                                         win_w=screen_width,
                                         scale=work_win_scale,
                                         window_title=window_title,
                                         bg_color=bg_color)

    # Init a general UDP socket
    s = tools.init_dvs()
    tools.reset_dvs_time(s)
    for pcg in pl:
        # remove sequence Kalal until I got more memory
        if pcg != "Kalal":
            for scg in sl[pcg]:
                print "[MESSAGE] Display video sequence " + scg
                seq_base_path = tracking_dir + pcg + "/" + scg + "/"
                frames = []
                for fn in tracking_stats[scg]:
                    frames.append(cv2.imread(seq_base_path + fn))

                frames = gui.rescale_image_sequence(frames, swin_h, swin_w,
                                                    bg_color)
                frames = gui.create_border_sequence(frames, screen_height,
                                                    screen_width, bg_color)
                cv2.imshow(window_title, frames[0])
                print "[MESSAGE] Adapting video sequence " + scg
                cv2.waitKey(delay=2000)
                tools.start_log_dvs(s, recording_save_path, scg, viewer_id)
                for frame in frames:
                    cv2.imshow(window_title, frame)
                    key = cv2.waitKey(delay=int(1000 / fps)) & 0xFF
                    if key == 27:
                        cv2.destroyAllWindows()
                        quit()

                cv2.imshow(window_title, frames[-1])
                tools.stop_log_dvs(s, viewer_id)
                print "[MESSAGE] Releasing video sequence " + scg
                cv2.waitKey(delay=2000)
                cv2.imshow(window_title, background)
                cv2.waitKey(delay=1000)
                tools.reset_dvs_time(s)
                print "[MESSAGE] Video sequence " + scg + " is logged."
    # Destory both scoket and opencv window
    tools.destroy_dvs(s)
    cv2.destroyAllWindows()
Exemplo n.º 10
0
def dvs_vot_exp(vot_dir,
                vot_stats_path,
                recording_save_path,
                viewer_id,
                screen_height,
                screen_width,
                work_win_scale,
                bg_color,
                fps):
    """Setup an experiment for VOT dataset.

    Parameters
    ----------
    vot_dir : string
        absolute path of VOT dataset
        e.g. /home/user/vot2015
    vot_stats_path : string
        path to vot dataset stats
    recording_save_path : string
        path to logged recording data
    viewer_id : int
        the ID of jAER viewer, for Linux is 1, Mac OS X is 2
    screen_height : int
        height of the screen in pixel
    screen_width : int
        width of the screen in pixel
    work_win_scale : float
        the scaling factor that calculates working window size
    bg_color : list
        background color definition
    fps : int
        frame per second while displaying the video,
        will round to closest number
    """
    vot_dir = tools.check_path(vot_dir)  # fix path if it's not valid
    recording_save_path = tools.check_path(str(recording_save_path))
    if not os.path.exists(str(recording_save_path)):
        os.mkdir(str(recording_save_path))
    # Load VOT stats
    f = file(vot_stats_path, mode="r")
    vot_stats = pickle.load(f)
    f.close()

    vot_list = vot_stats['vot_list']
    num_frames = vot_stats['num_frames']

    # Load groundtruth and image lists
    print "[MESSAGE] Loading ground truth and image lists."
    gts = []
    for i in xrange(len(num_frames)):
        temp_gt = np.loadtxt(vot_dir+vot_list[i]+"/groundtruth.txt",
                             dtype=float, delimiter=",")
        temp_gt = np.reshape(temp_gt, (temp_gt.shape[0], 4, 2))
        gts.append(temp_gt)

    lists = []
    for i in xrange(len(num_frames)):
        temp_list = tools.create_vot_image_list(vot_dir+vot_list[i]+"/",
                                                num_frames[i])
        lists.append(temp_list)
    print "[MESSAGE] Ground truths and image lists are loaded."

    # Create full background

    background = np.ones((screen_height, screen_width, 3))*bg_color

    # Setup OpenCV display window
    window_title = "DVS-VOT-EXP"
    cv2.namedWindow(window_title, cv2.WND_PROP_FULLSCREEN)

    # Experiment setup calibration
    # Not without tuning images
    swin_h, swin_w = helpers.calibration(win_h=screen_height,
                                         win_w=screen_width,
                                         scale=work_win_scale,
                                         window_title=window_title,
                                         bg_color=bg_color)

    # Init a general UDP socket
    s = tools.init_dvs()
    tools.reset_dvs_time(s)
    for k in xrange(len(num_frames)):
        print "[MESSAGE] Display video sequence %i" % (k+1)
        frames = []
        for i in xrange(num_frames[k]):
            frames.append(cv2.imread(lists[k][i]))

        new_frames = gui.rescale_image_sequence(frames, swin_h, swin_w,
                                                bg_color)
        # new_frames = gui.draw_poly_box_sequence(new_frames, gts[k])
        new_frames = gui.create_border_sequence(new_frames,
                                                screen_height, screen_width,
                                                bg_color)
        cv2.imshow(window_title, new_frames[0])
        print "[MESSAGE] Adapting video sequence %i" % (k+1)
        cv2.waitKey(delay=2000)
        tools.start_log_dvs(s, recording_save_path, vot_list[k], viewer_id)
        for i in xrange(num_frames[k]):
            cv2.imshow(window_title, new_frames[i])
            key = cv2.waitKey(delay=int(1000/fps)) & 0xFF
            if key == 27:
                cv2.destroyAllWindows()
                quit()

        cv2.imshow(window_title, new_frames[-1])
        tools.stop_log_dvs(s, viewer_id)
        print "[MESSAGE] Releasing video sequence %i" % (k+1)
        cv2.waitKey(delay=2000)
        cv2.imshow(window_title, background)
        cv2.waitKey(delay=1000)
        tools.reset_dvs_time(s)
        print "[MESSAGE] Video sequence %i is logged." % (k+1)
    # Destory both scoket and opencv window
    tools.destroy_dvs(s)
    cv2.destroyAllWindows()