def test_list_flattening(self):
        ezsift_matcher = EZSiftImageMatcher()

        logo_1 = "example.png"
        image = cv2.imread(os.path.abspath(logo_1))
        grey_scale_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        grey_scale_image_1 = np.array(grey_scale_image)
        ezsift_matcher.add_reference_image(logo_1, grey_scale_image_1)

        logo_2 = "logo2.png"
        image = cv2.imread(os.path.abspath(logo_2))
        grey_scale_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        grey_scale_image_2 = np.array(grey_scale_image)
        ezsift_matcher.add_reference_image(logo_2, grey_scale_image_2)

        real_photo = "index.png"
        image = cv2.imread(os.path.abspath(real_photo))
        grey_scale_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        grey_scale_image_3 = np.array(grey_scale_image)

        print ezsift_matcher.match(grey_scale_image_3)
from embedding_data import StudyImageMDSVisualizer2D
import numpy as np
import matplotlib.pyplot as plt

color_cycle = itertools.cycle([[255, 0, 0], [0, 255, 0], [0, 255, 0]])

ezsift_matcher = EZSiftImageMatcher()

num_images = 100

for i in range(0, num_images, 1):
    path = "./img/image-{}.png".format(i)
    print path
    img1 = cv2.imread(path)
    g1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    ezsift_matcher.add_reference_image(str(i), g1)


conf_matrix = ezsift_matcher.get_reference_image_confusion_matrix()

np_conf_mat = np.array(conf_matrix)


for i in range(num_images):
    for j in range(num_images):
        if i != j and i > j:
            np_conf_mat[i][j] = np_conf_mat[j][i]

plt.figure(0)
c = plt.imshow(np_conf_mat, interpolation="none")
plt.colorbar(c)
cap = True
while cap:

    gray = vidgrab.grab_frame_return_grey()
    grey_scale_image = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
    grey_scale_image = np.array(grey_scale_image)

    cv2.imshow('frame', grey_scale_image)

    k = cv2.waitKey(33) & 0xFF
    if k == 27:    # Esc key to stop
        break
    elif k == -1:  # normally -1 returned,so don't print it
        continue
    elif k == ord('c'):
        ezsift_matcher.add_reference_image(str(angles_to_capture[current]), grey_scale_image)
        print "Reference Added", angles_to_capture[current]
        current += 1
        if current >= len(angles_to_capture):
            cap = False
        time.sleep(0.1)


for row in ezsift_matcher.get_reference_image_confusion_matrix():
    print row

while True:

    gray = vidgrab.grab_frame_return_grey()
    grey_scale_image = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
    grey_scale_image = np.array(grey_scale_image)
color_cycle = itertools.cycle([[255,0,0], [0, 255, 0]])


video_grabber = ImageFromFileGrabber(os.path.abspath("data/"))


ezsift_matcher = EZSiftImageMatcher()


logo_1 = "left.png"
image = misc.imread(logo_1, flatten=True) #cv2.imread(os.path.abspath(logo_1))
import matplotlib.pyplot as plt
#grey_scale_image1 = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#grey_scale_image1 = np.array(grey_scale_image1)
ezsift_matcher.add_reference_image(logo_1, image)

logo_2 = "feld.png"
image = misc.imread(logo_2, flatten=True) #cv2.imread(os.path.abspath(logo_2))
#grey_scale_image2 = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#grey_scale_image2 = np.array(grey_scale_image2)
ezsift_matcher.add_reference_image(logo_2, image)




while True:
    gray, color = video_grabber.grab_frame_return_grey()
    gray = scipy.misc.imresize(gray, 1.0)    #grey_scale_image = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
    #grey_scale_image = np.array(grey_scale_image)