コード例 #1
0
    def test_list_flattening(self):
        ezsift_matcher = EZSiftImageMatcher()

        logo_1 = "example.png"
        image = cv2.imread(os.path.abspath(logo_1))
        grey_scale_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        grey_scale_image_1 = np.array(grey_scale_image)
        ezsift_matcher.add_reference_image(logo_1, grey_scale_image_1)

        logo_2 = "logo2.png"
        image = cv2.imread(os.path.abspath(logo_2))
        grey_scale_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        grey_scale_image_2 = np.array(grey_scale_image)
        ezsift_matcher.add_reference_image(logo_2, grey_scale_image_2)

        real_photo = "index.png"
        image = cv2.imread(os.path.abspath(real_photo))
        grey_scale_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        grey_scale_image_3 = np.array(grey_scale_image)

        print ezsift_matcher.match(grey_scale_image_3)
コード例 #2
0
        current += 1
        if current >= len(angles_to_capture):
            cap = False
        time.sleep(0.1)


for row in ezsift_matcher.get_reference_image_confusion_matrix():
    print row

while True:

    gray = vidgrab.grab_frame_return_grey()
    grey_scale_image = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
    grey_scale_image = np.array(grey_scale_image)

    matching_result = ezsift_matcher.match(grey_scale_image)

    angle = []
    angle_average = 0
    lensall = 0
    for logo_key in angles_to_capture:
        coords_1 = matching_result.get_match_coord_lst(str(logo_key))
        c = color_cycle.next()
        angle_average += int(logo_key) * len(coords_1)
        lensall += len(coords_1)
        angle.append([int(logo_key), len(coords_1)])
        [cv2.circle(gray, (e[2], e[3]), 1, c) for e in coords_1]
        [cv2.circle(gray, (e[2], e[3]), 2, c) for e in coords_1]
        [cv2.circle(gray, (e[2], e[3]), 3, c) for e in coords_1]

    if lensall < 10:
コード例 #3
0
logo_2 = "feld.png"
image = misc.imread(logo_2, flatten=True) #cv2.imread(os.path.abspath(logo_2))
#grey_scale_image2 = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#grey_scale_image2 = np.array(grey_scale_image2)
ezsift_matcher.add_reference_image(logo_2, image)




while True:
    gray, color = video_grabber.grab_frame_return_grey()
    gray = scipy.misc.imresize(gray, 1.0)    #grey_scale_image = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
    #grey_scale_image = np.array(grey_scale_image)

    matching_result = ezsift_matcher.match(gray)

    coords_1 = matching_result.get_match_coord_lst(logo_1)
    coords_2 = matching_result.get_match_coord_lst(logo_2)

    print coords_1
    print coords_2

    img = Image.fromarray(color)
    draw = ImageDraw.Draw(img)
    c = color_cycle.next()
    for e in coords_1:
        draw.rectangle((e[2]-2, e[3]-2, e[2]+2, e[3]+2), fill=(255, 0, 0))

    for e in coords_2:
        draw.rectangle((e[2]-2, e[3]-2, e[2]+2, e[3]+2), fill=(0, 255, 0))