Exemple #1
0
def smallest_candidates(images, best_index):
    '''It is unclear whether we should do best -> others, or other -> best; or even should we use the smallest or the largest'''
    stitcher = Stitcher()

    imageA = images[best_index]
    gray = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
    grays = []
    for index, imageB in enumerate(images):
        if index == best_index: continue

        transformed = stitcher.stitch([imageA, imageB],
                                      showMatches=False,
                                      reprojThresh=4.0)
        grayB = cv2.cvtColor(transformed, cv2.COLOR_BGR2GRAY)
        grays.append(grayB)

    total_flow = np.zeros((len(grays)), dtype=np.float)
    for i in range(len(grays)):
        flowB = cv2.calcOpticalFlowFarneback(grays[i], gray, None, 0.5, 5, 15,
                                             3, 5, 1.2, 0)
        total = np.mean(np.linalg.norm(flowB, axis=-1))
        total_flow[i] = total

    best_indexes = np.argsort(total_flow)
    best_indexes[best_indexes >= best_index] += 1

    return np.hstack(([best_index], best_indexes))
Exemple #2
0
def smallest_flow(images):
    stitcher = Stitcher()

    imageA = images[0]
    gray = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
    grays = [gray]
    for imageB in images[1:]:
        # stitch the images together to create a panorama
        transformed = stitcher.stitch([imageA, imageB],
                                      showMatches=False,
                                      reprojThresh=4.0)
        grayB = cv2.cvtColor(transformed, cv2.COLOR_BGR2GRAY)
        grays.append(grayB)

    total_flow = np.zeros((len(grays), len(grays)), dtype=np.float)
    for i in range(len(grays)):
        for j in range(len(grays)):
            flowB = cv2.calcOpticalFlowFarneback(grays[i], grays[j], None, 0.5,
                                                 2, 15, 3, 5, 1.1,
                                                 0)  #0.5, 5, 15, 3, 5, 1.2, 0)
            total = np.mean(np.linalg.norm(flowB, axis=-1))
            total_flow[i, j] = total

    total_sum = np.sum(total_flow, axis=-1)
    print(total_sum)
    best_index = np.argmin(total_sum)
    return best_index, total_sum[best_index]
Exemple #3
0
def panorama():  #メールを送信する関数
	os.chdir("/home/pi/panorama-stitching/fujii_images") #画像があるディレクトリに移動する

	imageA = cv2.imread("test2.jpg")
	imageB = cv2.imread("test1.jpg")
	angle =270  #回転する角度(反時計回り)

	stitcher = Stitcher()
	(preresult, vis) = stitcher.stitch([imageA, imageB], showMatches=True)

	vis = cv2.flip(vis,1)
	#cv2.imshow("Keypoint Matches1", vis)
	#cv2.imshow("preResult", preresult)
	cv2.imwrite("pre_result.jpg", preresult)

	imageC = cv2.imread("pre_result.jpg")
	imageD = cv2.imread("test3.jpg")
	imageC = cv2.flip(imageC,1)
	imageD = cv2.flip(imageD,1)

	(result, vis2) = stitcher.stitch([imageC, imageD], showMatches=True)
	#cv2.imshow("Keypoint Matches2", vis2)

	result = cv2.flip(result,1)
	#cv2.imshow("Result", result)
	cv2.imwrite("result.jpg", result)
	cv2.waitKey(0)
Exemple #4
0
def stitch(imageA, imageB):
    imageA = cv2.imread(imageA)
    imageB = cv2.imread(imageB)
    imageA = imutils.resize(imageA, width=400)
    imageB = imutils.resize(imageB, width=400)

    stitcher = Stitcher()
    (result, vis) = stitcher.stitch([imageA, imageB], showMatches=True)

    #crop on blank space
    img = result
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    _, thresh = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)
    contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_SIMPLE)
    cnt = contours[0]
    x, y, w, h = cv2.boundingRect(cnt)
    crop = img[y:y + h, x:x + w]

    return (imageA, imageB, crop, vis)
Exemple #5
0
    finally:
        print "Parameters : ", args

    fp = open(args, 'r')
    filenames = [each.rstrip('\r\n') for each in fp.readlines()]
    print filenames
    #images = [cv2.resize(cv2.imread(each),(480, 320)) for each in filenames]
    images = [cv2.resize(cv2.imread(each), (480, 320)) for each in filenames]
    count = len(images)

    #left_list, right_list, center_im = [], [],None

    # load the two images and resize them to have a width of 400 pixels
    # (for faster processing)
    #imageA = cv2.imread(args["first"])
    #imageB = cv2.imread(args["second"])
    #imageA = imutils.resize(imageA, width=400)
    #imageB = imutils.resize(imageB, width=400)
    imageA = images[0]
    stitcher = Stitcher()
    for imageB in images[1:]:
        # stitch the images together to create a panorama
        (result, vis) = stitcher.stitch([imageA, imageB], showMatches=True)
        imageA = result

    # show the images
    cv2.imshow("Image A", imageA)
    cv2.imshow("Image B", imageB)
    cv2.imshow("Keypoint Matches", vis)
    cv2.imshow("Result", result)
    cv2.waitKey(0)
Exemple #6
0
def match_and_clean(first, second, binary, threshold, verbose, percentile):
    imageA = cv2.imread(first)
    max_width = imageA.shape[1]
    gray = enhance(imageA, binary)

    stitcher = Stitcher()

    diff = np.zeros(imageA.shape[:2]).astype(np.float)
    warped = [gray]
    for second_file in second:
        imageB = cv2.imread(second_file)
        imageB = imutils.resize(imageB, width=max_width)
        # stitch the images together to create a panorama
        transformed = stitcher.stitch([imageA, imageB],
                                      showMatches=False,
                                      reprojThresh=4.0)
        grayB = enhance(transformed, binary)

        flow = cv2.calcOpticalFlowFarneback(gray, grayB, None, 0.5, 2, 15, 3,
                                            5, 1.1, 0)
        flowB = revert_flow(transformed, flow)
        warped_gray = enhance(flowB, binary)
        warped.append(warped_gray)
        diff += diff_image(gray, warped_gray)
    diff /= len(second)

    # print(np.histogram(diff, 100))

    others = np.median(np.array(warped), axis=0).astype(np.uint8)
    #others = ((np.max(np.array(warped), axis=0) + np.mean(np.array(warped), axis=0)) / 2).astype(np.uint8)
    others_percentile = np.percentile(np.array(warped), percentile,
                                      axis=0).astype(np.uint8)

    black, white = estimate_black_white(gray)
    print('black', black, 'white', white)

    # compute a threshold
    if threshold == 0:
        threshold = threshold_for_most_dark(diff)
    mask = diff > threshold
    red_mask = get_red_mask(imageA)
    total_mask = np.logical_or(mask, red_mask)
    black_mask = np.logical_and(total_mask, others < (int(black) + white) / 2)
    white_mask = np.logical_and(total_mask, others > (int(black) + white) / 2)

    print('total to remove black pixels', np.sum(black_mask), 'white pixels',
          np.sum(white_mask))
    corrected = np.copy(gray)
    corrected[black_mask] = others[black_mask]
    corrected[white_mask] = ndimage.median_filter(gray, size=20)[white_mask]
    #corrected[total_mask] = ndimage.median_filter(gray, size=20)[total_mask]

    #cv2.imshow('flow', draw_flow(grayB, flow))
    # show the images
    if verbose:
        cv2.imshow("Image A", imutils.resize(imageA, width=1280))
        cv2.imshow("corrected", imutils.resize(corrected, width=1280))
        cv2.imshow('diff', imutils.resize(diff.astype(np.uint8), width=1280))
        cv2.imshow('mask',
                   imutils.resize(mask.astype(np.uint8) * 255, width=1280))
        cv2.imshow(
            'blackmask',
            imutils.resize(black_mask.astype(np.uint8) * 255, width=1280))
        cv2.imshow(
            'whitemask',
            imutils.resize(white_mask.astype(np.uint8) * 255, width=1280))
        cv2.imshow("others", imutils.resize(others, width=1280))
        cv2.waitKey(0)

    return corrected, others_percentile, diff
Exemple #7
0
def panorama():
    stitcher = Stitcher()
    br = CvBridge()
    rospy.init_node('panorama', anonymous=True)

    # Load parameters
    input_image_topic = "camera/compressed"
    if rospy.has_param('~input_image_topic'):
        input_image_topic = rospy.get_param('~input_image_topic')
        input_image_topic = '%s/compressed' % input_image_topic
    else:
        rospy.logwarn(
            "input image topic not provided on param 'input_image_topic'; using %s"
            % input_image_topic)

    output_image_topic = "panorama/compressed"
    if rospy.has_param('~output_image_topic'):
        output_image_topic = rospy.get_param('~output_image_topic')
        output_image_topic = '%s/compressed' % output_image_topic
    else:
        rospy.logwarn(
            "output image topic not provided on param 'output_image_topic'; using %s"
            % output_image_topic)

    num_segments = 2
    if rospy.has_param('~num_segments'):
        num_segments = rospy.get_param('~num_segments')
    else:
        rospy.logwarn(
            "panorama number of segments not provided on param 'num_segments'; using %s"
            % num_segments)

    # Create publisher
    panorama_publisher = rospy.Publisher(output_image_topic,
                                         CompressedImage,
                                         queue_size=1)

    # Create panorama
    rospy.loginfo("Waiting for image on topic %s" % input_image_topic)
    imageA = rospy.wait_for_message(input_image_topic, CompressedImage)
    imageA = br.compressed_imgmsg_to_cv2(imageA)
    for n in range(1, num_segments):
        # move
        rospy.loginfo("Waiting for image on topic %s" % input_image_topic)
        imageB = rospy.wait_for_message(input_image_topic, CompressedImage)
        imageB = br.compressed_imgmsg_to_cv2(imageB)
        rospy.loginfo("Stitching...")
        (result, vis) = stitcher.stitch([imageA, imageB], showMatches=True)
        imageA = result

    # Show the images
    cv2.imshow("Image A", imageA)
    cv2.imshow("Image B", imageB)
    cv2.imshow("Keypoint Matches", vis)
    cv2.imshow("Result", result)
    cv2.waitKey(0)

    # Output panorama to file
    cv2.imwrite("output.jpg", result)
    # Publish panorama to ROS
    compressed_img_message = br.cv2_to_compressed_imgmsg(result)
    rospy.loginfo("Publishing panorama to topic %s" % output_image_topic)
    panorama_publisher.publish(compressed_img_message)