def run():
    match = Matcher()
    img = CVImage('/home/cesar/Documentos/Computer_Vision/01/image_0')
    img.read_image()
    img.copy_image()
    img.acquire()
    h = img.new_image.shape[0]
    w = img.new_image.shape[1]
    n = 2  # Number of roi's
    size = np.array([[w / n], [h / n]], np.int32)
    start = np.array([[0], [0]], np.int32)

    # First roi
    correlate_roi(match, img, size, start)

    # Second roi
    start = np.array([[w / n], [0]])
    correlate_roi(match, img, size, start)

    # Third roi
    start = np.array([[0], [h / n]])
    correlate_roi(match, img, size, start)

    # Last roi
    start = np.array([[w / n], [h / n]])
    correlate_roi(match, img, size, start)

    # We have stored two times every original keypoint (curr_kp, prev_kp)
    match.curr_kp = match.curr_kp[::2]
    match.prev_kp = match.prev_kp[::2]

    # The same applies for the descriptors
    match.curr_dsc = match.curr_dsc[::2]
    match.prev_dsc = match.prev_dsc[::2]

    print match.curr_kp[0].pt
    print match.global_kpts1[0]
    # Print the total number of keypoints encountered
    print("Total number of keypoints encountered: \
          {}".format(get_number_keypoints(match)))

    # Test the plot_same_figure function
    # plot_same_figure(match, img)
    # plot_one(match, img)
    # plot_save(match, img)

    # Get Fundamental Matrix
    vo = VisualOdometry()
    print "Type of match.global_kpts1: ", type(match.global_kpts1)
    match.global_kpts1, match.global_kpts2 = \
        vo.EstimateF_multiprocessing(match.global_kpts2, match.global_kpts1)
    # plot_one(match, img)
    # plot_one_np(vo.outlier_points_new, img)
    # plot_together_np(match.global_kpts1, vo.outlier_points_new, img)
    print("Total number of keypoints encountered: \
          {}".format(get_number_keypoints(match)))

    # Triangulate. To get the actual movement of the camera we are "swapping"
    # the scene. The first camera is cam1.P, the first keypoints are
    # global_kpts1. On the other hand, the second camera is cam2.P and the
    # second keypoints are global_kpts2
    scene = get_structure(match, img, vo)
    print "ESCENA", scene[:, :20]
    print "PROYECCION EN SEGUNDA", vo.cam1.project(scene[:, :20])
    print "SEGUNDA", match.global_kpts1[:20]
    print "CORREGIDOS SEGUNDA", vo.correctedkpts1[:, :20]
    print "PROYECCION EN PRIMERA", vo.cam2.project(scene[:, :20])
    print "PRIMERA", match.global_kpts2[:20]
    print "CORREGIDOS EN PRIMERA", vo.correctedkpts2[:, :20]
    fig = plt.figure()
    ax = fig.gca(projection='3d')
    ax.plot(scene[0], scene[1], scene[2], 'ko')
    plt.axis('equal')
    plt.show()