Пример #1
0
def compute_rotation(contour1, contour2):
    resolution = 50
    scale = 0.5
    val = 1  # set to 1 for use, 255 for debugging

    shape1 = contour_to_normalized_points(contour1, resolution, scale)
    shape2 = contour_to_normalized_points(contour2, resolution, scale)
    # print shape1
    num_angles = 15
    img1 = np.zeros((resolution, resolution), np.uint8)
    cv2.fillPoly(img1, [shape1], val)
    img1_flipped = cv2.flip(img1, 1)
    show(img1)

    similarities = []
    transforms = []
    for rotation in [x * math.pi * 2.0 / num_angles for x in xrange(num_angles)]:
        for flip in (False, True):
            transforms.append((rotation, flip))
    img2 = np.zeros((resolution, resolution), np.uint8)
    cv2.fillPoly(img2, [shape2], val)

    for rotation, flip in transforms:
        M = cv2.getRotationMatrix2D((resolution / 2, resolution / 2), rotation * 180.0 / math.pi, 1.0)
        img2_rotated = cv2.warpAffine(img2, M, (resolution, resolution))
        show(img2_rotated)
        overlap = np.multiply((img1_flipped if flip else img1), img2_rotated)
        x = np.sum(overlap) * 1.0 / (resolution ** 2)
        similarities.append(x)

    best_idx = max(range(len(similarities)), key=lambda i: similarities[i])
    best_non_flipped_idx = max(range(0, len(similarities), 2), key=lambda i: similarities[i])
    rot, flipped = transforms[best_idx]
    rot_without_flipping = transforms[best_non_flipped_idx][0]
    return rot, flipped, rot_without_flipping
Пример #2
0
def photo_demo():
    # Capture frame-by-frame
    frame = cv2.imread("images/camera-view.png")
    frame = cv2.resize(frame, IMAGE_SIZE)
    show(frame, 'image')
    # frame = cv2.undistort(frame, mtx, dist, None, newcameramtx)
    frame = cv2.warpPerspective(frame, perspective_matrix, IMAGE_SIZE)
    frame = cv2.flip(frame, 1)
    frame = cv2.flip(frame, -1)
    show(frame, 'transformed')

    frame, output = process_image(frame)

    global IMAGE
    IMAGE = frame
    global OUTPUT
    OUTPUT = output
    if SHOW_SINGLE_CAPTURE_AND_DUMP:
        cv2.imshow('label', frame)
        cv2.waitKey(0)

    # cap.release()
    cv2.destroyAllWindows()
Пример #3
0
def photo_demo():
    # Capture frame-by-frame
    frame = cv2.imread("images/camera-view.png")
    frame = cv2.resize(frame, IMAGE_SIZE)
    show(frame, 'image')
    # frame = cv2.undistort(frame, mtx, dist, None, newcameramtx)
    frame = cv2.warpPerspective(frame, perspective_matrix, IMAGE_SIZE)
    frame = cv2.flip(frame, 1)
    frame = cv2.flip(frame, -1)
    show(frame, 'transformed')
    
    frame, output = process_image(frame)
    
    global IMAGE
    IMAGE = frame
    global OUTPUT
    OUTPUT = output
    if SHOW_SINGLE_CAPTURE_AND_DUMP:
        cv2.imshow('label', frame)
        cv2.waitKey(0)
    
    # cap.release()
    cv2.destroyAllWindows()
Пример #4
0
def video_loop():
    cap = cv2.VideoCapture(CAMERA_NUM)
    
    while(True):
        # Capture frame-by-frame
        if BLANKING:
            blanking.set_blank(True)
        wait(0.1)
        if BLANKING:
            blanking.set_blank(False)
        wait(0.1)
        cap.grab()
        # blanking.set_blank(False)
        ret, frame = cap.retrieve()
        frame = cv2.resize(frame, IMAGE_SIZE)
        # frame = cv2.undistort(frame, mtx, dist, None, newcameramtx)
        frame = cv2.warpPerspective(frame, perspective_matrix, IMAGE_SIZE)
        frame = cv2.flip(frame, 1)
        frame = cv2.flip(frame, -1)
        
        frame, output = process_image(frame)
        
        cv2.imshow('image', frame)
        global IMAGE
        IMAGE = frame
        global OUTPUT
        OUTPUT = output
        if SHOW_SINGLE_CAPTURE_AND_DUMP:
            cv2.waitKey(0)
            break
        if BLANKING:
            wait(0.7)
        else:
            wait(0.01)
    
    cap.release()
    cv2.destroyAllWindows()
Пример #5
0
def compute_rotation(contour1, contour2):
    resolution = 50
    scale = 0.5
    val = 1  # set to 1 for use, 255 for debugging

    shape1 = contour_to_normalized_points(contour1, resolution, scale)
    shape2 = contour_to_normalized_points(contour2, resolution, scale)
    # print shape1
    num_angles = 15
    img1 = np.zeros((resolution, resolution), np.uint8)
    cv2.fillPoly(img1, [shape1], val)
    img1_flipped = cv2.flip(img1, 1)
    show(img1)

    similarities = []
    transforms = []
    for rotation in [
            x * math.pi * 2.0 / num_angles for x in xrange(num_angles)
    ]:
        for flip in (False, True):
            transforms.append((rotation, flip))
    img2 = np.zeros((resolution, resolution), np.uint8)
    cv2.fillPoly(img2, [shape2], val)

    for rotation, flip in transforms:
        M = cv2.getRotationMatrix2D((resolution / 2, resolution / 2),
                                    rotation * 180.0 / math.pi, 1.0)
        img2_rotated = cv2.warpAffine(img2, M, (resolution, resolution))
        show(img2_rotated)
        overlap = np.multiply((img1_flipped if flip else img1), img2_rotated)
        x = np.sum(overlap) * 1.0 / (resolution**2)
        similarities.append(x)

    best_idx = max(range(len(similarities)), key=lambda i: similarities[i])
    best_non_flipped_idx = max(range(0, len(similarities), 2),
                               key=lambda i: similarities[i])
    rot, flipped = transforms[best_idx]
    rot_without_flipping = transforms[best_non_flipped_idx][0]
    return rot, flipped, rot_without_flipping