def imageStiching(self, img1, img2): # 读取拼接图片 imageA = cv2.imread(img1) imageB = cv2.imread(img2) # cv2.imshow("Image A", imageA) # cv2.imshow("Image B", imageB) cv2.waitKey(0) cv2.destroyAllWindows() # 把图片拼接起来 stitcher = Stitcher() stitcher.stitch([imageA, imageB])
from Stitcher import Stitcher # .pos file specifying the names of the tile images position_list_filename = '/home/sihao/SchultzBox/Sihao/Imaging/050618Grid_stack/050618aaPositionList.pos' # Create Stitcher object s = Stitcher(position_list_filename) # Stitch and save s.stitch() s.save('stitched.png')
import cv2 # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-f", "--first", required=True, help="path to the first image") ap.add_argument("-s", "--second", required=True, help="path to the second image") args = vars(ap.parse_args()) # load the two images and resize them to have a width of 400 pixels # (for faster processing) imageA = cv2.imread(args["first"]) imageB = cv2.imread(args["second"]) imageA = cv2.imread("IMG_0090.jpg") imageB = cv2.imread("IMG_0091.jpg") imageA = imutils.resize(imageA, width=400) imageB = imutils.resize(imageB, width=400) # stitch the images together to create a panorama stitcher = Stitcher() (result, vis) = stitcher.stitch([imageA, imageB], showMatches=True) # show the images cv2.imshow("Image A", imageA) cv2.imshow("Image B", imageB) cv2.imshow("Keypoint Matches", vis) cv2.imshow("Result", result) cv2.waitKey(0)
from Stitcher import Stitcher import cv2 # 读取拼接图片 imageA = cv2.imread("image/left_01.png") imageB = cv2.imread("image/right_01.png") # 把图片拼接成全景图 stitcher = Stitcher() (result, vis) = stitcher.stitch([imageA, imageB]) # 显示所有图片 cv2.imshow("Matches", vis) cv2.imshow("Result", result) cv2.waitKey(0) cv2.destroyAllWindows()
import cv2 import imutils from Stitcher import Stitcher import copy import numpy as np if __name__ == "__main__": imageA = cv2.imread("images/random_forest2.jpg") imageB = cv2.imread("images/random_forest1_lowexposure.jpg") # imageA = cv2.imread("images/boat1.jpg") # imageB = cv2.imread("images/boat2.jpg") imageA = imutils.resize(imageA, width=2000) imageB = imutils.resize(imageB, width=2000) # stitch the images together to create a panorama stitcher = Stitcher() (result, H, vis) = stitcher.stitch(imageB, imageA, showMatches=True) # get the wrapped image that we use for blending wrapped_image = cv2.warpPerspective( imageB, H, (imageA.shape[1] + imageB.shape[1], imageB.shape[0])) src = copy.deepcopy(wrapped_image) # convert it to grayscale image im_bw = cv2.cvtColor(wrapped_image, cv2.COLOR_RGB2GRAY) # threshold the image ret, thresh_im = cv2.threshold(im_bw, 0, 255, 0) # calculate the contours from the black and white image _, contours, hierarchy = cv2.findContours(im_bw, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # approximate the contour by polygon # this polygon is used as mask for the blending epsilon = 0.1 * cv2.arcLength(contours[0], True) polygon = cv2.approxPolyDP(contours[0], epsilon, True)
def main(): stitcher = Stitcher() if config_scale: background = cv2.imread('images/background_scaled.jpg') else: background = cv2.imread('images/background.jpg') transformer = Transformer(config_scale) cap_left = cv2.VideoCapture(videos_path + videos[0]) cap_mid = cv2.VideoCapture(videos_path + videos[1]) cap_right = cv2.VideoCapture(videos_path + videos[2]) frame_width = int(cap_mid.get(cv.CV_CAP_PROP_FRAME_WIDTH)) frame_height = int(cap_mid.get(cv.CV_CAP_PROP_FRAME_HEIGHT)) frame_count = int(cap_mid.get(cv.CV_CAP_PROP_FRAME_COUNT)) init_points = {'C0': (71, 1153), \ 'R0': (80, 761), 'R1': (80, 1033), 'R2': (95, 1127), 'R3': (54, 1156), 'R4': (65, 1185), 'R5': (61, 1204), 'R6': (56, 1217), 'R7': (69, 1213), 'R8': (67, 1253), 'R9': (75, 1281), 'R10': (92, 1347), \ 'B0': (71, 1409), 'B1': (72, 1016), 'B2': (47, 1051), 'B3': (58, 1117), 'B4': (74, 1139), 'B5': (123, 1156), 'B6': (61, 1177), 'B7': (48, 1198), 'B8': (102, 1353)} points = init_points.values() tracker = Tracker(background, config_scale, init_points.values()) # cap_left.set(cv.CV_CAP_PROP_POS_FRAMES, 1400) # cap_mid.set(cv.CV_CAP_PROP_POS_FRAMES, 1400) # cap_right.set(cv.CV_CAP_PROP_POS_FRAMES, 1400) for fr in range(frame_count): print(fr) status_left, frame_left = cap_left.read() status_mid, frame_mid = cap_mid.read() status_right, frame_right = cap_right.read() scaled_size = (frame_width / image_down_scale_factor, frame_height / image_down_scale_factor) frame_left = cv2.resize(frame_left, scaled_size) frame_mid = cv2.resize(frame_mid, scaled_size) frame_right = cv2.resize(frame_right, scaled_size) # Adjust the brightness difference. frame_mid = cv2.convertScaleAbs(frame_mid, alpha=0.92) if status_left and status_mid and status_right: warped_left_mid = stitcher.stitch(frame_mid, frame_left, H_left_mid) warped_left_mid_right = stitcher.stitch(warped_left_mid, frame_right, H_mid_right) warped_left_mid_right_cropped = crop_img(warped_left_mid_right) # plt.imshow(warped_left_mid_right_cropped) # plt.show() # cv2.waitKey(0) points = tracker.tracking(warped_left_mid_right_cropped) for i in range(len(points)): cv2.circle(warped_left_mid_right_cropped, (points[i][1], points[i][0]), 3, (0, 0, 255), -1) height, width = warped_left_mid_right_cropped.shape[:2] warped_left_mid_right_cropped = cv2.resize( warped_left_mid_right_cropped, (width / 2, height / 2)) cv2.imshow('Objects', warped_left_mid_right_cropped) cv2.waitKey(1) # background = transformer.transform(points) # plt.imshow(warped_left_mid_right_cropped) # plt.show() # cv2.imshow('Objects', background) # cv2.waitKey(30) cv2.waitKey(0) cv2.destroyAllWindows() cap_left.release() cap_mid.release() cap_right.release()
print('start') device1_cap = cv2.VideoCapture(device1_url)#读取视频流 device2_cap = cv2.VideoCapture(device2_url)#读取视频流 while(device1_cap.isOpened() or device2_cap.isOpened()): print('success') if(device1_cap.isOpened()): device1_ret, device1_frame = device1_cap.read() cv2.imshow('frame1',device1_frame) if(device2_cap.isOpened()): device2_ret, device2_frame = device2_cap.read() cv2.imshow('frame2',device2_frame) if(device1_cap.isOpened() and device2_cap.isOpened()): # 把图像拼接成全景图 stitcher = Stitcher() (result, vis) = stitcher.stitch([device1_frame, device2_frame], showMatches=True) print("panomanic mode") cv2.imshow("Result", result) if cv2.waitKey(1) & 0xFF == ord('q'): break print('end') device1_cap.release() device2_cap.release() cv2.destroyAllWindows()
def main(): stitcher = Stitcher() if config_scale: background = cv2.imread('images/background_scaled.jpg') else: background = cv2.imread('images/background.jpg') transformer = Transformer(config_scale) cap_left = cv2.VideoCapture(videos_path + videos[0]) cap_mid = cv2.VideoCapture(videos_path + videos[1]) cap_right = cv2.VideoCapture(videos_path + videos[2]) frame_width = int(cap_mid.get(cv.CV_CAP_PROP_FRAME_WIDTH)) frame_height = int(cap_mid.get(cv.CV_CAP_PROP_FRAME_HEIGHT)) frame_count = int(cap_mid.get(cv.CV_CAP_PROP_FRAME_COUNT)) init_points = {'C0': (71, 1153), \ 'R0': (80, 761), 'R1': (80, 1033), 'R2': (95, 1127), 'R3': (54, 1156), 'R4': (65, 1185), 'R5': (61, 1204), 'R6': (56, 1217), 'R7': (69, 1213), 'R8': (67, 1253), 'R9': (75, 1281), 'R10': (92, 1347), \ 'B0': (71, 1409), 'B1': (72, 1016), 'B2': (47, 1051), 'B3': (58, 1117), 'B4': (74, 1139), 'B5': (123, 1156), 'B6': (61, 1177), 'B7': (48, 1198), 'B8': (102, 1353)} points = init_points.values() tracker = Tracker(background, config_scale, init_points.values()) # cap_left.set(cv.CV_CAP_PROP_POS_FRAMES, 1400) # cap_mid.set(cv.CV_CAP_PROP_POS_FRAMES, 1400) # cap_right.set(cv.CV_CAP_PROP_POS_FRAMES, 1400) for fr in range(frame_count): print(fr) status_left, frame_left = cap_left.read() status_mid, frame_mid = cap_mid.read() status_right, frame_right = cap_right.read() scaled_size = (frame_width / image_down_scale_factor, frame_height / image_down_scale_factor) frame_left = cv2.resize(frame_left, scaled_size) frame_mid = cv2.resize(frame_mid, scaled_size) frame_right = cv2.resize(frame_right, scaled_size) # Adjust the brightness difference. frame_mid = cv2.convertScaleAbs(frame_mid, alpha=0.92) if status_left and status_mid and status_right: warped_left_mid = stitcher.stitch(frame_mid, frame_left, H_left_mid) warped_left_mid_right = stitcher.stitch(warped_left_mid, frame_right, H_mid_right) warped_left_mid_right_cropped = crop_img(warped_left_mid_right) # plt.imshow(warped_left_mid_right_cropped) # plt.show() # cv2.waitKey(0) points = tracker.tracking(warped_left_mid_right_cropped) for i in range(len(points)): cv2.circle(warped_left_mid_right_cropped, (points[i][1], points[i][0]), 3, (0, 0, 255), -1) height, width = warped_left_mid_right_cropped.shape[:2] warped_left_mid_right_cropped = cv2.resize(warped_left_mid_right_cropped, (width / 2, height / 2)) cv2.imshow('Objects', warped_left_mid_right_cropped) cv2.waitKey(1) # background = transformer.transform(points) # plt.imshow(warped_left_mid_right_cropped) # plt.show() # cv2.imshow('Objects', background) # cv2.waitKey(30) cv2.waitKey(0) cv2.destroyAllWindows() cap_left.release() cap_mid.release() cap_right.release()
from Stitcher import Stitcher import cv2 # 读取拼接图片 imageB = cv2.imread("image/left_03.jpg") imageA = cv2.imread("image/right_03.jpg") imgA = cv2.resize(imageA, (500, 500), interpolation=cv2.INTER_CUBIC) imgB = cv2.resize(imageB, (500, 500), interpolation=cv2.INTER_CUBIC) # 把图片拼接成全景图 stitcher = Stitcher() (result, vis) = stitcher.stitch([imgA, imgB], showMatches=True) # 显示所有图片 cv2.imshow("Image A", imageA) cv2.imshow("Image B", imageB) cv2.imshow("Keypoint Matches", vis) cv2.imshow("Result", result) cv2.waitKey(0) cv2.destroyAllWindows()
mypath = 'images' # f = [] files = [join(mypath, f) for f in listdir(mypath) if isfile(join(mypath, f))] files = sorted(files) # print(files) stitcher = Stitcher() j = files[-1] im1 = cv2.imread(j) # cv2.imshow('i',im1) for i in reversed(xrange(len(files) - 1)): print i im2 = cv2.imread(files[i]) # cv2.imshow('n',im2) result = stitcher.stitch(im1, im2) im1 = result # im1= result[~np.all(result == 0, axis=2)] # result = stitcher.stitch(im1,im2) # im1= result[~np.all(result == 0, axis=2)] # result = stitcher.stitch(im1,im2) # cv2.imshow('im1',im1) cv2.namedWindow('m', cv2.WINDOW_NORMAL) cv2.resizeWindow('m', 5000, 500) cv2.imshow('m', im1) cv2.waitKey(0)