コード例 #1
0
def panorama():  #メールを送信する関数
	os.chdir("/home/pi/panorama-stitching/fujii_images") #画像があるディレクトリに移動する

	imageA = cv2.imread("test2.jpg")
	imageB = cv2.imread("test1.jpg")
	angle =270  #回転する角度(反時計回り)

	stitcher = Stitcher()
	(preresult, vis) = stitcher.stitch([imageA, imageB], showMatches=True)

	vis = cv2.flip(vis,1)
	#cv2.imshow("Keypoint Matches1", vis)
	#cv2.imshow("preResult", preresult)
	cv2.imwrite("pre_result.jpg", preresult)

	imageC = cv2.imread("pre_result.jpg")
	imageD = cv2.imread("test3.jpg")
	imageC = cv2.flip(imageC,1)
	imageD = cv2.flip(imageD,1)

	(result, vis2) = stitcher.stitch([imageC, imageD], showMatches=True)
	#cv2.imshow("Keypoint Matches2", vis2)

	result = cv2.flip(result,1)
	#cv2.imshow("Result", result)
	cv2.imwrite("result.jpg", result)
	cv2.waitKey(0)
コード例 #2
0
def smallest_flow(images):
    stitcher = Stitcher()

    imageA = images[0]
    gray = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
    grays = [gray]
    for imageB in images[1:]:
        # stitch the images together to create a panorama
        transformed = stitcher.stitch([imageA, imageB],
                                      showMatches=False,
                                      reprojThresh=4.0)
        grayB = cv2.cvtColor(transformed, cv2.COLOR_BGR2GRAY)
        grays.append(grayB)

    total_flow = np.zeros((len(grays), len(grays)), dtype=np.float)
    for i in range(len(grays)):
        for j in range(len(grays)):
            flowB = cv2.calcOpticalFlowFarneback(grays[i], grays[j], None, 0.5,
                                                 2, 15, 3, 5, 1.1,
                                                 0)  #0.5, 5, 15, 3, 5, 1.2, 0)
            total = np.mean(np.linalg.norm(flowB, axis=-1))
            total_flow[i, j] = total

    total_sum = np.sum(total_flow, axis=-1)
    print(total_sum)
    best_index = np.argmin(total_sum)
    return best_index, total_sum[best_index]
コード例 #3
0
ファイル: stitch.py プロジェクト: LaserSaver/Workbench
def stitch_images(imageA, imageB):

    ''' Stitches 2 images together
        Args:
            imageA: First image
            imageB: Second image

        Returns:
            result: the images stitched together

    '''
    # load the two images and resize them to have a width of 400 pixels
    # (for faster processing)
    imageA = imutils.resize(imageA, width=400, height=400)
    imageB = imutils.resize(imageB, width=400, height=400)

    # stitch the images together to create a panorama
    stitcher = Stitcher()
    (result, vis) = stitcher.stitch([imageA, imageB], showMatches=True)
    # show the images (commented out for using pi through ssh)
    #cv2.imshow("Image A", imageA)
    #cv2.imshow("Image B", imageB)
    #cv2.imshow("Keypoint Matches", vis)
    #cv2.imshow("Result", result)
    #cv2.waitKey(0)
    return result
コード例 #4
0
def smallest_candidates(images, best_index):
    '''It is unclear whether we should do best -> others, or other -> best; or even should we use the smallest or the largest'''
    stitcher = Stitcher()

    imageA = images[best_index]
    gray = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
    grays = []
    for index, imageB in enumerate(images):
        if index == best_index: continue

        transformed = stitcher.stitch([imageA, imageB],
                                      showMatches=False,
                                      reprojThresh=4.0)
        grayB = cv2.cvtColor(transformed, cv2.COLOR_BGR2GRAY)
        grays.append(grayB)

    total_flow = np.zeros((len(grays)), dtype=np.float)
    for i in range(len(grays)):
        flowB = cv2.calcOpticalFlowFarneback(grays[i], gray, None, 0.5, 5, 15,
                                             3, 5, 1.2, 0)
        total = np.mean(np.linalg.norm(flowB, axis=-1))
        total_flow[i] = total

    best_indexes = np.argsort(total_flow)
    best_indexes[best_indexes >= best_index] += 1

    return np.hstack(([best_index], best_indexes))
コード例 #5
0
def stitch(imageA, imageB):
    imageA = cv2.imread(imageA)
    imageB = cv2.imread(imageB)
    imageA = imutils.resize(imageA, width=400)
    imageB = imutils.resize(imageB, width=400)

    stitcher = Stitcher()
    (result, vis) = stitcher.stitch([imageA, imageB], showMatches=True)

    #crop on blank space
    img = result
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    _, thresh = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)
    contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_SIMPLE)
    cnt = contours[0]
    x, y, w, h = cv2.boundingRect(cnt)
    crop = img[y:y + h, x:x + w]

    return (imageA, imageB, crop, vis)
コード例 #6
0
# initialize the image stitcher, motion detector, and total
# number of frames read
stitcher = Stitcher()
motion = BasicMotionDetector(minArea=2000)
total = 0
count = 1

while True:
    ret, Left = cap1.read()
    ret, Right = cap2.read()

    if Left is None or Right is None:
        continue

    result = stitcher.stitch([Left, Right])

    if result is None:
        print("[INFO] homography could not be computed")
        continue

    # convert the panorama to grayscale, blur it slightly, update
    # the motion detector
    result_temp = result.copy()
    result_temp[0:480, 0:350] = black_img
    result_temp[0:480, 930:1280] = black_img
    gray = cv2.cvtColor(result_temp, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (21, 21), 0)
    locs = motion.update(gray)

    # only process the panorama for motion if a nice average has
コード例 #7
0
ファイル: stitch.py プロジェクト: OlofHarrysson/imageanalysis
from pyimagesearch.panorama import Stitcher
import argparse
import imutils
import cv2
import numpy as np

img1 = cv2.imread("images/femfel1.png")
img2 = cv2.imread("images/femfel2.png")

stitcher = Stitcher()
(result, vis) = stitcher.stitch([img1, img2], showMatches=True)

img_diff = cv2.absdiff(result, img1)

kernel = np.ones((2, 2), np.uint8)
img_eroded = cv2.erode(img_diff, kernel, iterations=1)

gray = cv2.cvtColor(img_eroded, cv2.COLOR_BGR2GRAY)
retval, dest = cv2.threshold(gray, 40, 255, cv2.THRESH_BINARY)

image, contours, hierarchy = cv2.findContours(dest, cv2.RETR_EXTERNAL,
                                              cv2.CHAIN_APPROX_NONE)

for c in contours:
    (x, y, w, h) = cv2.boundingRect(c)
    cv2.rectangle(img_eroded, (x, y), (x + w, y + h), (0, 0, 255), 1)

# Initiate ORB detector
orb = cv2.ORB_create()

# find the keypoints and descriptors with ORB
コード例 #8
0
    finally:
        print "Parameters : ", args

    fp = open(args, 'r')
    filenames = [each.rstrip('\r\n') for each in fp.readlines()]
    print filenames
    #images = [cv2.resize(cv2.imread(each),(480, 320)) for each in filenames]
    images = [cv2.resize(cv2.imread(each), (480, 320)) for each in filenames]
    count = len(images)

    #left_list, right_list, center_im = [], [],None

    # load the two images and resize them to have a width of 400 pixels
    # (for faster processing)
    #imageA = cv2.imread(args["first"])
    #imageB = cv2.imread(args["second"])
    #imageA = imutils.resize(imageA, width=400)
    #imageB = imutils.resize(imageB, width=400)
    imageA = images[0]
    stitcher = Stitcher()
    for imageB in images[1:]:
        # stitch the images together to create a panorama
        (result, vis) = stitcher.stitch([imageA, imageB], showMatches=True)
        imageA = result

    # show the images
    cv2.imshow("Image A", imageA)
    cv2.imshow("Image B", imageB)
    cv2.imshow("Keypoint Matches", vis)
    cv2.imshow("Result", result)
    cv2.waitKey(0)
コード例 #9
0
    #3
    left_check, left_frame = left.read()
    right_check, right_frame = right.read()
    # stitching code below this

    # load the two images and resize them to have a width of 400 pixels
    # (for faster processing)
    imageA = left_frame
    imageB = right_frame

    imageA = imutils.resize(imageA, width=400)
    imageB = imutils.resize(imageB, width=400)

    # stitch the images together to create a panorama
    stitcher = Stitcher()
    (frame, vis) = stitcher.stitch([imageA, imageB], showMatches=True)

    ##########################
    blob = cv.dnn.blobFromImage(frame,
                                1 / 255, (inpWidth, inpHeight), [0, 0, 0],
                                1,
                                crop=False)

    #Set the input the the net
    net.setInput(blob)
    outs = net.forward(getOutputsNames(net))

    postprocess(frame, outs)

    #show the image
    cv.imshow(winName, frame)
コード例 #10
0
# loop over frames from the video streams
while True:
	# grab the frames from their respective video streams
	left = leftStream.read()
	right = rightStream.read()

	# resize the frames
	left = imutils.resize(left, width=400)
	right = imutils.resize(right, width=400)

	# stitch the frames together to form the panorama
	# IMPORTANT: you might have to change this line of code
	# depending on how your cameras are oriented; frames
	# should be supplied in left-to-right order
	result = stitcher.stitch([left, right])

	# no homograpy could be computed
	if result is None:
		print("[INFO] homography could not be computed")
		break

	# convert the panorama to grayscale, blur it slightly, update
	# the motion detector
	gray = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
	#gray = cv2.equalizeHist(gray)
	gray = cv2.GaussianBlur(gray, (21, 21), 0)
	locs = motion.update(gray)

	# only process the panorama for motion if a nice average has
	# been built up
コード例 #11
0
ファイル: stitch.py プロジェクト: thugbobby/Python_public
ap.add_argument("-f", "--first", required=True,
	help="path to the first image")
ap.add_argument("-s", "--second", required=True,
	help="path to the second image")
args = vars(ap.parse_args())

# load the two images and resize them to have a width of 400 pixels
# (for faster processing)
imageA = cv2.imread(args["first"])
imageB = cv2.imread(args["second"])
#imageA = cv2.resize(imageA, (imageA.shape[0],imageA.shape[1]//2,))
#imageB = cv2.resize(imageB, (imageB.shape[0],imageB.shape[1]//2))
#imageA = imutils.resize(imageA, width=600)
#imageB = imutils.resize(imageB, width=600)

# stitch the images together to create a panorama
stitcher = Stitcher()
(result, vis) = stitcher.stitch([imageA, imageB], showMatches=True,ratio=0.75, reprojThresh=4.0)

# show the images
cv2.imshow("Image A", imutils.resize(imageA, width=800))
cv2.imshow("Image B", imutils.resize(imageB, width=800))
cv2.imshow("Keypoint Matches", imutils.resize(vis, width=1280))
cv2.imshow("Result", imutils.resize(result, width=1280))
cv2.waitKey(0)
cv2.destroyAllWindows()

outdir = os.path.split(args["first"])[0]
filename = os.path.split(args["first"])[-1]
cv2.imwrite(os.path.sep.join([outdir, "stitched.png"]),result)
コード例 #12
0
    success_left, image_left = vidcap_left.read()
    success_right, image_right = vidcap_right.read()
    if success_left is False or success_right is False:
        print("frames are finished")
        break
    image_left = imutils.resize(image_left, width=600)
    image_right = imutils.resize(image_right, width=600)

    # left = imutils.resize(left, width=400)
    # right = imutils.resize(right, width=400)

    # stitch the frames together to form the panorama
    # IMPORTANT: you might have to change this line of code
    # depending on how your cameras are oriented; frames
    # should be supplied in left-to-right order
    result = stitcher.stitch([image_left, image_right])

    # no homograpy could be computed
    if result is None:
        print("[INFO] homography could not be computed")
        break

    # convert the panorama to grayscale, blur it slightly, update
    # the motion detector
    #gray = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
    #gray = cv2.GaussianBlur(gray, (21, 21), 0)
    #locs = motion.update(gray)

    # only process the panorama for motion if a nice average has
    # been built up
    """
コード例 #13
0
from pyimagesearch.panorama import Stitcher
import argparse
import imutils
import cv2
import numpy as np

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--first", required=True, help="path to the first image")
ap.add_argument("-s",
                "--second",
                required=True,
                help="path to the second image")
args = vars(ap.parse_args())

imageA = cv2.imread(args["first"])
imageB = cv2.imread(args["second"])
imageA = imutils.resize(imageA, width=500)
imageB = imutils.resize(imageB, width=500)

# stitch the images together to create a panorama
stitcher = Stitcher()
(result) = stitcher.stitch([imageA, imageB])

# show the images
#cv2.imshow("Image A", imageA)
#cv2.imshow("Image B", imageB)
cv2.imshow("Result", result)
cv2.imshow("Stacked", np.hstack((imageA, imageB)))
cv2.imwrite("Result.jpg", result)
cv2.waitKey(0)
コード例 #14
0
# import the necessary packages
from pyimagesearch.panorama import Stitcher
import argparse
import imutils
import cv2

# define basic parameter
IM_NUM = 17
result = []
stitcher = Stitcher()

for i in range(IM_NUM, 14, -1):
    if (i < 10):
        im_name = "000" + str(i) + ".jpg"
    else:
        im_name = "00" + str(i) + ".jpg"

    im_path = "/auto/extra/b02902015/py-faster-rcnn/video_image/Compress/" + im_name
    image = cv2.imread(im_path)
    cv2.imshow(im_name, image)
    if (i == IM_NUM):
        result = image
        continue
    result = stitcher.stitch([result, image], showMatches=True, diraction=0)

cv2.imshow("Result", result)
cv2.waitKey(0)
コード例 #15
0
def panorama():
    stitcher = Stitcher()
    br = CvBridge()
    rospy.init_node('panorama', anonymous=True)

    # Load parameters
    input_image_topic = "camera/compressed"
    if rospy.has_param('~input_image_topic'):
        input_image_topic = rospy.get_param('~input_image_topic')
        input_image_topic = '%s/compressed' % input_image_topic
    else:
        rospy.logwarn(
            "input image topic not provided on param 'input_image_topic'; using %s"
            % input_image_topic)

    output_image_topic = "panorama/compressed"
    if rospy.has_param('~output_image_topic'):
        output_image_topic = rospy.get_param('~output_image_topic')
        output_image_topic = '%s/compressed' % output_image_topic
    else:
        rospy.logwarn(
            "output image topic not provided on param 'output_image_topic'; using %s"
            % output_image_topic)

    num_segments = 2
    if rospy.has_param('~num_segments'):
        num_segments = rospy.get_param('~num_segments')
    else:
        rospy.logwarn(
            "panorama number of segments not provided on param 'num_segments'; using %s"
            % num_segments)

    # Create publisher
    panorama_publisher = rospy.Publisher(output_image_topic,
                                         CompressedImage,
                                         queue_size=1)

    # Create panorama
    rospy.loginfo("Waiting for image on topic %s" % input_image_topic)
    imageA = rospy.wait_for_message(input_image_topic, CompressedImage)
    imageA = br.compressed_imgmsg_to_cv2(imageA)
    for n in range(1, num_segments):
        # move
        rospy.loginfo("Waiting for image on topic %s" % input_image_topic)
        imageB = rospy.wait_for_message(input_image_topic, CompressedImage)
        imageB = br.compressed_imgmsg_to_cv2(imageB)
        rospy.loginfo("Stitching...")
        (result, vis) = stitcher.stitch([imageA, imageB], showMatches=True)
        imageA = result

    # Show the images
    cv2.imshow("Image A", imageA)
    cv2.imshow("Image B", imageB)
    cv2.imshow("Keypoint Matches", vis)
    cv2.imshow("Result", result)
    cv2.waitKey(0)

    # Output panorama to file
    cv2.imwrite("output.jpg", result)
    # Publish panorama to ROS
    compressed_img_message = br.cv2_to_compressed_imgmsg(result)
    rospy.loginfo("Publishing panorama to topic %s" % output_image_topic)
    panorama_publisher.publish(compressed_img_message)
コード例 #16
0
    w = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
    h = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
    fourcc = cv2.cv.CV_FOURCC(*'XVID')
    out = cv2.VideoWriter(ofile[i], fourcc, fps, (w * 3, 2 * h))
    ret, oldFrame = cap.read()

    x, y, z = oldFrame.shape
    bg = np.zeros(shape=(2 * x, 3 * y, z), dtype=np.uint8)
    bg[x / 2:x + x / 2, y:2 * y] = oldFrame

    frameAug = np.zeros(shape=(2 * x, 3 * y, z), dtype=np.uint8)
    stitcher = Stitcher()
    for fr in range(1, frameCount):
        ret, frame = cap.read()
        frameAug.fill(0)
        frameAug[x / 2:x + x / 2, y:2 * y] = frame

        print fr, bg.shape
        result = stitcher.stitch([frameAug, bg])
        out.write(result)

        cv2.imshow("out", result)
        bg = result.copy()
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    out.release()
    cv2.destroyAllWindows()
    cap.release()
コード例 #17
0
# initialize the image stitcher, motion detector, and total
# number of frames read
stitcher = Stitcher()
motion = BasicMotionDetector(minArea=500)
total = 0

while True:
    left=leftStream.read()
    right=rightStream.read()
    s_right=second_right.read()
    
    left=imutils.resize(left, width=400)
    right=imutils.resize(right, width=400)
    s_right=imutils.resize(s_right, width=400)
    
    result=stitcher.stitch([right, s_right])
    #result0=stitcher.stitch([left, right])
    result1=stitcher.stitch([left, result])
    
    if result is None:
        print("[INFO] homography could not be computed")
        break
    
    timestamp=datetime.datetime.now()
    ts=timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
    cv2.putText(result, ts, (10, result.shape[0] - 10),
                cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
    cv2.putText(result1, ts, (10, result1.shape[0] - 10),
                cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
    
    cv2.imshow("First result", result)
コード例 #18
0
from pyimagesearch.panorama import Stitcher
import argparse
import imutils
import cv2

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--first", required=True, help="path to the first image")
ap.add_argument("-s",
                "--second",
                required=True,
                help="path to the second image")
args = vars(ap.parse_args())

# load the two images and resize them to have a width of 400 pixels
# (for faster processing)
imageA = cv2.imread(args["first"])
imageB = cv2.imread(args["second"])
imageA = imutils.resize(imageA, width=400)
imageB = imutils.resize(imageB, width=400)

# stitch the images together to create a panorama
stitcher = Stitcher()
result = stitcher.stitch([imageA, imageB], showMatches=False, diraction=0)

# show the images
cv2.imshow("Image A", imageA)
cv2.imshow("Image B", imageB)
# cv2.imshow("Keypoint Matches", vis)
cv2.imshow("Result", result)
cv2.waitKey(0)
コード例 #19
0
ファイル: align.py プロジェクト: dongwang218/alignment
        right = []
        left_file = os.path.join(dir_left, os.path.basename(first))
        right_file = os.path.join(dir_right, os.path.basename(first))
        left.append(left_file)
        right.append(right_file)
        cv2.imwrite(left_file, imageA[:, :split_index])
        cv2.imwrite(right_file, imageA[:, split_index:])

        stitcher = Stitcher()

        for second_file in second:
            imageB = cv2.imread(second_file)
            imageB = imutils.resize(imageB, width=max_width)
            # stitch the images together to create a panorama
            transformed = stitcher.stitch([imageA, imageB],
                                          showMatches=False,
                                          reprojThresh=4.0)
            n_split_index = is_gray_double_page(transformed)
            if split_index is None:
                n_split_index = split_index
            left_file = os.path.join(dir_left, os.path.basename(second_file))
            right_file = os.path.join(dir_right, os.path.basename(second_file))
            left.append(left_file)
            right.append(right_file)
            cv2.imwrite(left_file, transformed[:, :n_split_index])
            cv2.imwrite(right_file, transformed[:, n_split_index:])
            if len(left) > args.number:
                break
        corrected_left, others_left, diff_left = match_and_clean(
            left[0], left[1:], args.binary, args.threshold, args.verbose,
            args.percentile)
# loop over frames from the video streams
while True:
	# grab the frames from their respective video streams
	left = leftStream.read()
	right = rightStream.read()

	# resize the frames
	left = imutils.resize(left, width=400)
	right = imutils.resize(right, width=400)

	# stitch the frames together to form the panorama
	# IMPORTANT: you might have to change this line of code
	# depending on how your cameras are oriented; frames
	# should be supplied in left-to-right order
	result = stitcher.stitch([left, right])

	# no homograpy could be computed
	if result is None:
		print("[INFO] homography could not be computed")
		break

	# convert the panorama to grayscale, blur it slightly, update
	# the motion detector
	gray = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
	gray = cv2.GaussianBlur(gray, (21, 21), 0)
	locs = motion.update(gray)

	# only process the panorama for motion if a nice average has
	# been built up
	if total > 32 and len(locs) > 0:
コード例 #21
0
ファイル: align.py プロジェクト: dongwang218/alignment
def match_and_clean(first, second, binary, threshold, verbose, percentile):
    imageA = cv2.imread(first)
    max_width = imageA.shape[1]
    gray = enhance(imageA, binary)

    stitcher = Stitcher()

    diff = np.zeros(imageA.shape[:2]).astype(np.float)
    warped = [gray]
    for second_file in second:
        imageB = cv2.imread(second_file)
        imageB = imutils.resize(imageB, width=max_width)
        # stitch the images together to create a panorama
        transformed = stitcher.stitch([imageA, imageB],
                                      showMatches=False,
                                      reprojThresh=4.0)
        grayB = enhance(transformed, binary)

        flow = cv2.calcOpticalFlowFarneback(gray, grayB, None, 0.5, 2, 15, 3,
                                            5, 1.1, 0)
        flowB = revert_flow(transformed, flow)
        warped_gray = enhance(flowB, binary)
        warped.append(warped_gray)
        diff += diff_image(gray, warped_gray)
    diff /= len(second)

    # print(np.histogram(diff, 100))

    others = np.median(np.array(warped), axis=0).astype(np.uint8)
    #others = ((np.max(np.array(warped), axis=0) + np.mean(np.array(warped), axis=0)) / 2).astype(np.uint8)
    others_percentile = np.percentile(np.array(warped), percentile,
                                      axis=0).astype(np.uint8)

    black, white = estimate_black_white(gray)
    print('black', black, 'white', white)

    # compute a threshold
    if threshold == 0:
        threshold = threshold_for_most_dark(diff)
    mask = diff > threshold
    red_mask = get_red_mask(imageA)
    total_mask = np.logical_or(mask, red_mask)
    black_mask = np.logical_and(total_mask, others < (int(black) + white) / 2)
    white_mask = np.logical_and(total_mask, others > (int(black) + white) / 2)

    print('total to remove black pixels', np.sum(black_mask), 'white pixels',
          np.sum(white_mask))
    corrected = np.copy(gray)
    corrected[black_mask] = others[black_mask]
    corrected[white_mask] = ndimage.median_filter(gray, size=20)[white_mask]
    #corrected[total_mask] = ndimage.median_filter(gray, size=20)[total_mask]

    #cv2.imshow('flow', draw_flow(grayB, flow))
    # show the images
    if verbose:
        cv2.imshow("Image A", imutils.resize(imageA, width=1280))
        cv2.imshow("corrected", imutils.resize(corrected, width=1280))
        cv2.imshow('diff', imutils.resize(diff.astype(np.uint8), width=1280))
        cv2.imshow('mask',
                   imutils.resize(mask.astype(np.uint8) * 255, width=1280))
        cv2.imshow(
            'blackmask',
            imutils.resize(black_mask.astype(np.uint8) * 255, width=1280))
        cv2.imshow(
            'whitemask',
            imutils.resize(white_mask.astype(np.uint8) * 255, width=1280))
        cv2.imshow("others", imutils.resize(others, width=1280))
        cv2.waitKey(0)

    return corrected, others_percentile, diff
コード例 #22
0
ファイル: stitch.py プロジェクト: Sailja/Assignment2
from matplotlib import pyplot as plt
import cv2

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--first", required=True,
	help="path to the first image")
ap.add_argument("-s", "--second", required=True,
	help="path to the second image")
args = vars(ap.parse_args())

# load the two images and resize them to have a width of 400 pixels
# (for faster processing)
imageA = cv2.imread(args["first"])
imageB = cv2.imread(args["second"])
#imageA = imutils.resize(imageA, width=4000)
#imageB = imutils.resize(imageB, width=4000)

# stitch the images together to create a panorama
stitcher = Stitcher()
(result, vis) = stitcher.stitch([imageA, imageB], showMatches=True)

# show the images
cv2.imshow("Image A", imageA)
cv2.imshow("Image B", imageB)
cv2.imshow("Keypoint Matches", vis)
cv2.imshow("Result", result)
cv2.waitKey(0)
cv2.imwrite('mosaic.jpg', result)

コード例 #23
0
  pts1_2 = np.float32([coords2[0], coords2[1], coords2[2], coords2[3]])
  pts2_2 = np.float32([[0, 0], [W2, 0], [W2, H2], [0, H2]])
  matrix2 = cv2.getPerspectiveTransform(pts1_2, pts2_2)
  result2 = cv2.warpPerspective(image2, matrix2, (W2, H2))

  warped1 = four_point_transform(image1, pts1_1)
  warped1 = cv2.resize(warped1,(W1,H1))
  
  warped2 = four_point_transform(image2, pts1_2)
  warped2 = cv2.resize(warped2,(W2,H2))

  # show the warped images
  cv2.imshow("Video1", imutils.resize(warped1, width=800))
  cv2.imshow("Video2", imutils.resize(warped2, width=800))
  
  (result, vis) = stitcher.stitch([warped1, warped2], showMatches=True,ratio=0.75, reprojThresh=4.0)

  # show the images
  cv2.imshow("Keypoint Matches", imutils.resize(vis, width=1280))
  cv2.putText(result, "FC1={},FC2={}".format(fc1,fc2), (10,25), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 0))
  
  cv2.imshow("Result", imutils.resize(result, width=1280))
  
  if writer is not None:
    writer.write(result)

  #filename = os.path.split(args["image"])[-1]
  #cv2.imwrite(os.path.sep.join([outdir, filename.split('.')[0]+"_trans."+filename.split('.')[-1]]),warped)
  
  
  key = cv2.waitKey(2)