def one_stitch(queue): stitcher = Stitcher() while not queue.empty(): filename, images = queue.get() print( f'Processing video {queue.maxsize-queue.qsize()} of {queue.maxsize}, filename {filename}', flush=True) result = stitcher.multistitch(images, manual=False, os="win") cv2.imwrite(filename, result)
def partialform(first, second): imageA = cv2.imread(first) imageB = cv2.imread(second) imageA = imutils.resize(imageA, width=800) imageB = imutils.resize(imageB, width=800) stitcher = Stitcher() (result, vis) = stitcher.stitch([imageA, imageB], showMatches=True) displayImg(imageA, True) displayImg(imageB, True) displayImg(vis, True) displayImg(result, True) cv2.imwrite('\\'.join(first.split('\\')[:-1]) + '\\stitched_.png', result)
def originFunc(): # initialize the video streams and allow them to warmup print("[INFO] starting cameras...") leftStream = VideoStream(src=1).start() rightStream = VideoStream(src=0).start() time.sleep(2.0) # number of frames read stitcher = Stitcher() total = 0 # loop over frames from the video streams while True: # grab the frames from their respective video streams left = leftStream.read() right = rightStream.read() # resize the frames left = imutils.resize(left, width=400) right = imutils.resize(right, width=400) # stitch the frames together to form the panorama # IMPORTANT: you might have to change this line of code # depending on how your cameras are oriented; frames # should be supplied in left-to-right order result = stitcher.stitch([left, right]) # no homograpy could be computed if result is None: print("[INFO] homography could not be computed") break # show the output images cv2.imshow("Result", result) cv2.imshow("Left Frame", left) cv2.imshow("Right Frame", right) key = cv2.waitKey(1) & 0xFF # if the `q` key was pressed, break from the loop if key == ord("q"): break # do a bit of cleanup print("[INFO] cleaning up...") cv2.destroyAllWindows() leftStream.stop() rightStream.stop()
from panorama import Stitcher import cv2 #im1,im2,im3,res,vis = s.stitchAll('test_images/halfdome-00.png', # 'test_images/halfdome-01.png','test_images/halfdome-02.png') #s.show(im1,im2,im3,res,vis) stitcher = Stitcher() perspective = [ 'test_images/halfdome-00.png', 'test_images/halfdome-01.png', 'test_images/halfdome-02.png', 'test_images/halfdome-03.png', 'test_images/halfdome-04.png', 'test_images/halfdome-05.png' ] perspectiveReverse = [ 'test_images/halfdome-05.png', 'test_images/halfdome-04.png', 'test_images/halfdome-03.png', 'test_images/halfdome-02.png', 'test_images/halfdome-01.png', 'test_images/halfdome-00.png' ] affine = [ 'test_images/t1.jpg', 'test_images/t2.jpg', 'test_images/t3.jpg', 'test_images/t4.jpg', 'test_images/t5.jpg', 'test_images/t6.jpg', 'test_images/t7.jpg', 'test_images/t8.jpg', 'test_images/t9.jpg', 'test_images/t10.jpg' ] diagonalLeftRight = [ 'test_images/b0.jpg', 'test_images/b1.jpg', 'test_images/b2.jpg', 'test_images/b3.jpg' ] diagonalRightLeft = [ 'test_images/b3.jpg', 'test_images/b2.jpg', 'test_images/b1.jpg', 'test_images/b0.jpg' ]
from panorama import Stitcher from imutils.video import VideoStream import datetime import imutils import time import cv2 # initialize the video streams and allow them to warmup print("[INFO] starting cameras...") leftStream = VideoStream(src=0).start() rightStream = VideoStream(src=1).start() time.sleep(2.0) # initialize the image stitcher, motion detector, and total # number of frames read stitcher = Stitcher() total = 0 # loop over frames from the video streams while True: # grab the frames from their respective video streams left = leftStream.read() right = rightStream.read() # resize the frames left = imutils.resize(left, width=640, height=480) right = imutils.resize(right, width=640, height=480) # stitch the frames together to form the panorama # IMPORTANT: you might have to change this line of code # depending on how your cameras are oriented; frames
import tkinter as tk from tkinter import filedialog from panorama import Stitcher from video_reader import frame_generator import cv2 root = tk.Tk() root.title("HyperMapStitcher") root.wm_iconbitmap("hms.ico") stitcher = Stitcher() frame = tk.Frame(root, width=300, height=250, padx=15, pady=15) input_filename = "" input_hint_label = tk.Label(frame, text="Must specify an input file") output_filename = "" output_hint_label = tk.Label(frame, text="Must specify an output file") manual_checkbox = tk.IntVar() def input_file(): global input_filename input_filename = filedialog.askopenfilename(initialdir=".", title="Select input video", filetypes=[["video files", ["*.mp4", "*.avi"]], ["all files", "*.*"]]) def output_file(): global output_filename
count = count + 1 return count, nameArray #function call to count files fileCount, names = countFiles() #get 4 files each and stich for i in range(0, fileCount, 4): imageA = cv2.imread(names[i]) imageB = cv2.imread(names[i + 1]) imageC = cv2.imread(names[i + 2]) imageD = cv2.imread(names[i + 3]) # stitch the images together to create a panorama stitcher = Stitcher() (result, vis) = stitcher.stitch([imageA, imageB], showMatches=True) # stitch the images together to create a panorama stitcher = Stitcher() (result2, vis2) = stitcher.stitch([imageC, imageD], showMatches=True) imageE = result imageF = result2 # stitch the images together to create a panorama stitcher = Stitcher() (result3, vis3) = stitcher.stitch([imageE, imageF], showMatches=True) #rename the file with date and time stamp anomalyId = str(uuid.uuid4()) notificationId = str(uuid.uuid4())
import cv2 as cv2 # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-f", "--first", required=True, help="path to the first image") ap.add_argument("-s", "--second", required=True, help="path to the second image") args = vars(ap.parse_args()) # load the two images and resize them to have a width of 400 pixels # (for faster processing) imageB = cv2.imread("bryce_right_01.png") imageA = cv2.imread("bryce_right_02.png") imageA = imutils.resize(imageA, width=400) imageB = imutils.resize(imageB, width=400) cv2.imshow("Image A", imageA) cv2.imshow("Image B", imageB) cv2.waitKey(0) # stitch the images together to create a panorama stitcher = Stitcher() (result, vis) = stitcher.stitch([imageA, imageB], showMatches=True) image # show the images cv2.imshow("Image A", imageA) cv2.imshow("Image B", imageB) cv2.imshow("Keypoint Matches", vis) cv2.imshow("Result", result) cv2.waitKey(0)
angle_list = [] score_list = [] start_time = time.time() for img_idx in range(img_cnt): # img_idx = 8 fix_img_name = os.path.join(data_in_dir,sub_dir[ihc_idx],"HE","level"+str(lv_idx),str(img_idx)+".jpg") float_img_name = os.path.join(data_in_dir, sub_dir[ihc_idx], sub_dir_IHC[ihc_idx], "level" + str(lv_idx),str(img_idx) + ".jpg") print(float_img_name) print(fix_img_name) Img_fix_col = Image.open(fix_img_name) Img_float_col = Image.open(float_img_name) # Img_fix = sp.misc.fromimage(Img_fix_col,True) # flatten is True, means we convert images into graylevel images. # Img_float = sp.misc.fromimage(Img_float_col,True) Img_fix = np.array(Img_fix_col) # flatten is True, means we convert images into graylevel images. Img_float = np.array(Img_float_col) stitcher = Stitcher() (result, vis) = stitcher.stitch([Img_fix, Img_float], showMatches=True) # cv2.imwrite("ImageA.jpg", cv2.cvtColor(Img_fix, cv2.COLOR_BGR2RGB)) # cv2.imwrite("ImageB.jpg", cv2.cvtColor(Img_float, cv2.COLOR_BGR2RGB)) # cv2.imwrite("matches.jpg", cv2.cvtColor(vis, cv2.COLOR_BGR2RGB)) # cv2.imwrite("Result.jpg", cv2.cvtColor(result, cv2.COLOR_BGR2RGB)) # print("OK") ''' if the rotation angle is confirmed to be 0, we can use below method to distill offset. ''' matches, ptsA, ptsB, H, status = stitcher.returnMatchCoord([Img_fix, Img_float]) # here, already with RANSAC algorithm to match key points matched_ptsA = [] matched_ptsB = [] slops = []
imageD = cv2.imread(args["fourth"]) hA, wA = imageA.shape[:2] # height and width of each image hB, wB = imageB.shape[:2] hC, wC = imageC.shape[:2] hD, wD = imageD.shape[:2] imageA = cv2.resize(imageA, (400, int(400 * hA / wA)), cv2.INTER_CUBIC) # resize imageB = cv2.resize(imageB, (400, int(400 * hB / wB)), cv2.INTER_CUBIC) imageC = cv2.resize(imageC, (400, int(400 * hC / wC)), cv2.INTER_CUBIC) imageD = cv2.resize(imageD, (400, int(400 * hD / wD)), cv2.INTER_CUBIC) images = [imageB, imageA, imageD, imageC] # order of original each image => CDAB order_set = [] # to correct order of each image # stitch the images together to create a panorama stitcher = Stitcher() # declartion order_set = stitcher.detectKeyPoint(images) # return correct order of images result = stitcher.stitch( images[order_set[0]], images[order_set[1]]) # group by two images and switch them result2 = stitcher.stitch(images[order_set[2]], images[order_set[3]]) result3 = stitcher.stitch(result, result2) cv2.imshow("Result", result) # first stitching cv2.imshow("Result2", result2) # second stitching cv2.imshow("Result3", result3) # stitching the first and second one
def raw_reg(fixed_img, float_img, init_offset, down_rate, lv, method="FFT"): if type(fixed_img) == Image.Image: Img_fix = sp.misc.fromimage( fixed_img, True ) # flatten is True, means we convert images into graylevel images. Img_float = sp.misc.fromimage(float_img, True) else: Img_fix = fixed_img Img_float = float_img if method == "FFT": con_s = dict(angle=[0, 0], scale=[1, 1]) sim = ird.similarity(Img_fix, Img_float, constraints=con_s) tvec = sim["tvec"].round(4) score = sim["success"] offset = [tvec[1], tvec[0]] elif method == "ECC": warp_mode = cv2.MOTION_TRANSLATION warp_matrix = np.eye(2, 3, dtype=np.float32) number_of_iterations = 500 # Specify the number of iterations. termination_eps = 1e-8 # in the correlation coefficient between two iterations criteria = (cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS, number_of_iterations, termination_eps ) # Define termination criteria try: (score, warp_matrix) = cv2.findTransformECC(Img_fix, Img_float, warp_matrix, warp_mode, criteria) offset = [warp_matrix[0, 2], warp_matrix[1, 2]] except: logging.warning("Unaligned patch") offset = [0, 0] score = 0 elif method == "SIFT": stitcher = Stitcher() Img_fix = np.array(fixed_img) Img_float = np.array(float_img) matches, ptsA, ptsB, H, status = stitcher.returnMatchCoord([ Img_fix, Img_float ]) # here, already with RANSAC algorithm to match key points matched_ptsA = [] matched_ptsB = [] slops = [] offsets = [] for m_idx, m in enumerate(ptsA): if status[m_idx] == 1: matched_ptsA.append(m) matched_ptsB.append(ptsB[m_idx]) if (ptsB[m_idx][0] - m[0]) == 0: s = 999999 else: s = (ptsB[m_idx][1] - m[1]) / (ptsB[m_idx][0] - m[0]) offsetY = ptsB[m_idx][1] - m[1] offsetX = ptsB[m_idx][0] - m[0] slops.append(s) offsets.append([offsetX, offsetY]) offset = np.mean(offsets, 0) # use mean offset as offset score = np.mean(np.std(offsets, 0)) # use std as score elif method == "SIFT_ENH": stitcher = Stitcher() Img_fix = np.array(fixed_img) Img_float = np.array(float_img) matches, ptsA, ptsB, H, status = stitcher.returnMatchCoord([ Img_fix, Img_float ]) # here, already with RANSAC algorithm to match key points matched_ptsA = [] matched_ptsB = [] slops = [] offsets = [] for m_idx, m in enumerate(ptsA): if status[m_idx] == 1: matched_ptsA.append(m) matched_ptsB.append(ptsB[m_idx]) if (ptsB[m_idx][0] - m[0]) == 0: s = 999999 else: s = (ptsB[m_idx][1] - m[1]) / (ptsB[m_idx][0] - m[0]) offsetY = ptsB[m_idx][1] - m[1] offsetX = ptsB[m_idx][0] - m[0] slops.append(s) offsets.append([offsetX, offsetY]) # use a complicate way to distill matched key points if len(slops) > 0: max_slop = np.amax(slops) min_slop = np.amin(slops) bins = int(len(slops) / 2) slop_hist = histogram(slops, min_slop, max_slop, bins) step = (max_slop - min_slop) / bins idx_max_count = slop_hist.index(max(slop_hist)) if type(idx_max_count) == list: idx_max_count = idx_max_count[0] low_range = min_slop + idx_max_count * step high_range = min_slop + (idx_max_count + 1) * step idx_s_list = [] for idx_s, s in enumerate(slops): if low_range <= s <= high_range: idx_s_list.append(idx_s) offset = np.mean([offsets[i] for i in idx_s_list], 0) score = 1 / (np.mean(np.std([offsets[i] for i in idx_s_list], 0)) + 0.00000001) else: logging.warning("Unable to match this patch") return [0, 0], 0 else: return [0, 0], 0 offset = [ offset[0] + init_offset[0] / down_rate[lv], offset[1] + init_offset[1] / down_rate[lv] ] return offset, score
count = count + 1 return count, nameArray fileCount, names = countFiles() for i in range(0,fileCount,4): imageA = cv2.imread(names[i]) imageB = cv2.imread(names[i+1]) imageC = cv2.imread(names[i+2]) imageD = cv2.imread(names[i+3]) # stitch the images together to create a panorama stitcher = Stitcher() (result, vis) = stitcher.stitch([imageA, imageB], showMatches=True) # stitch the images together to create a panorama stitcher = Stitcher() (result2, vis2) = stitcher.stitch([imageC, imageD], showMatches=True) imageE = result imageF = result2 #merging two results height, width, channels = imageE.shape #create a null image using numpy with all zeros with diemnetions = 4*width blank_image = np.zeros((height,width*2,channels), np.uint8)
# import the necessary packages from panorama import Stitcher import argparse import imutils import cv2 # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-f", "--first", required=True, help="path to the first image") ap.add_argument("-s", "--second", required=True, help="path to the second image") args = vars(ap.parse_args()) # load the two images and resize them to have a width of 400 pixels # (for faster processing) imageA = cv2.imread(args["first"]) imageB = cv2.imread(args["second"]) imageA = imutils.resize(imageA, width=400) imageB = imutils.resize(imageB, width=400) # stitch the images together to create a panorama stitcher = Stitcher() (result, vis) = stitcher.stitch([imageA, imageB], showMatches=True) # show the images cv2.imshow("Image A", imageA) cv2.imshow("Image B", imageB) cv2.imshow("Keypoint Matches", vis) cv2.imshow("Result", result) cv2.waitKey(0)
#leftStream = VideoStream(src=2).start() #rightStream = VideoStream(src=1).start() #time.sleep(2.0) leftVideo = cv2.VideoCapture(0) print("left started") rightVideo = cv2.VideoCapture(1) print("right started") leftVideo.set(3, 320) leftVideo.set(4, 240) rightVideo.set(3, 320) rightVideo.set(4, 240) print("before init") # initialize the image stitcher, motion detector, and total # number of frames read stitcher = Stitcher() total = 0 print("after init") # loop over frames from the video streams while True: print("...") # grab the frames from their respective video streams #left = leftStream.read() #right = rightStream.read() #print("left: ", left) #print("right: ", right) # resize the frames #left = imutils.resize(left, width=400) #right = imutils.resize(right, width=400)
import cv2 import imutils from panorama import Stitcher import numpy as np def sharpen(img): kern = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]]) return cv2.filter2D(img, -1, kern) a = cv2.imread("frames/002.jpg") b = cv2.imread("frames/003.jpg") a = sharpen(a) b = sharpen(b) stitcher = Stitcher() (result, vis) = stitcher.stitch([a, b], showMatches=True) cv2.imshow("a", vis) cv2.waitKey(0) cv2.imshow("a", result) cv2.waitKey(0) cv2.imwrite("vis.jpg", vis) cv2.imwrite("res.jpg", result)
def main(args): Pano = 1 rospy.init_node('panorama', anonymous=True) ic = image_converter() stitcher = Stitcher() M_left_center = None M_right_center = None while True: try: # if ic.left_image is not None and ic.center_image is not None and ic.right_image is not None and M_left_center is None and M_center_right is None: # print("Cheguei aqui!") # (M_left_center, M_center_right) = stitcher.transformationsCalculator([ic.left_image, ic.center_image, ic.right_image], ratio=0.8, reprojThresh=4.0) and ic.left_image_seg is not None and ic.center_image_seg is not None and ic.right_image_seg is not None if ic.left_image is not None and ic.center_image is not None and ic.right_image is not None: if M_left_center is None and M_right_center is None: (M_left_center, M_right_center) = stitcher.transformationsCalculator( [ic.left_image, ic.center_image, ic.right_image], ratio=0.8, reprojThresh=4.0) if ic.left_image is not None and ic.center_image is not None and ic.right_image is not None: if Pano == 0: if M_left_center is not None and M_right_center is not None: result = stitcher.stitch( [ic.left_image, ic.center_image, ic.right_image], M_left_center, M_right_center, ratio=0.8, reprojThresh=4.0) if result is None: print( "There was an error in the stitching procedure" ) else: pub_panorama(result) ic.clean_images() else: print( "Nao foram calculadas as matrizes de transformacao" ) else: continue if ic.left_image_seg is not None and ic.center_image_seg is not None and ic.right_image_seg is not None: if Pano == 1: if M_left_center is not None and M_right_center is not None: result = stitcher.stitch([ ic.left_image_seg, ic.center_image_seg, ic.right_image_seg ], M_left_center, M_right_center, ratio=0.8, reprojThresh=4.0) if result is None: print( "There was an error in the stitching procedure" ) else: pub_panorama_seg(result) ic.clean_images() else: print( "Nao foram calculadas as matrizes de transformacao" ) else: continue except KeyboardInterrupt: print("Shutting down")
from video_reader import frame_generator from panorama import Stitcher import cv2 infile = "in/hell2.mkv" outfile = "out/hell2.png" stitcher = Stitcher() images = frame_generator(infile, frames=10, width=600, image_crop=(186, 694, 5, 1905)) result = stitcher.multistitch(images, manual=False, os="win") cv2.imwrite(outfile, result)
import imutils import cv2 from panorama import Stitcher imageA = cv2.imread("./img/first.jpg") imageB = cv2.imread("./img/second.jpg") imageC = cv2.imread("./img/third.jpg") imageD = cv2.imread("./img/fourth.jpg") imageE = cv2.imread("./img/fifth.jpg") imageA = imutils.resize(imageA, width=200) imageB = imutils.resize(imageB, width=200) imageC = imutils.resize(imageC, width=200) imageD = imutils.resize(imageD, width=200) imageE = imutils.resize(imageE, width=200) images = [imageA, imageB, imageC, imageD, imageE] Stitcher(images, showMatches=False)
output_file = "stitched.jpg" fp = open(input_file_list, 'r') _images = [each.rstrip('\r\n') for each in fp.readlines()] begin_time = datetime.datetime.now() for x in _images: print x try: images = [] for _image in _images: images.append(load_image(_image)) # Stitch the first two images stitcher = Stitcher() result, kps, features, deg = stitcher.stitch([images[0], images[1]], firstTime=True) # stitch the result image with the image with idx until stiching all images in the array for idx in range(2, len(images)): stitcher = Stitcher() result, kps, features, deg = stitcher.stitch([result, images[idx]], firstTime=False, l_ori_kps=kps, l_features=features, l_deg=deg) cv2.imwrite(output_file, result) except Exception as e:
str(img_idx) + ".jpg") float_img_name = os.path.join(data_in_dir, sub_dir[ihc_idx], sub_dir_IHC[ihc_idx], "level" + str(lv_idx), str(img_idx) + ".jpg") print(float_img_name) print(fix_img_name) Img_fix_col = Image.open(fix_img_name) Img_float_col = Image.open(float_img_name) # Img_fix = sp.misc.fromimage(Img_fix_col,True) # flatten is True, means we convert images into graylevel images. # Img_float = sp.misc.fromimage(Img_float_col,True) Img_fix = np.array( Img_fix_col ) # flatten is True, means we convert images into graylevel images. Img_float = np.array(Img_float_col) stitcher = Stitcher() # (result, vis) = stitcher.stitch([Img_fix, Img_float], showMatches=True) # cv2.imwrite("ImageA.jpg", cv2.cvtColor(Img_fix, cv2.COLOR_BGR2RGB)) # cv2.imwrite("ImageB.jpg", cv2.cvtColor(Img_float, cv2.COLOR_BGR2RGB)) # cv2.imwrite("matches.jpg", cv2.cvtColor(vis, cv2.COLOR_BGR2RGB)) # cv2.imwrite("Result.jpg", cv2.cvtColor(result, cv2.COLOR_BGR2RGB)) # print("OK") ''' if the rotation angle is confirmed to be 0, we can use below method to distill offset. ''' matches, ptsA, ptsB, H, status = stitcher.returnMatchCoord([ Img_fix, Img_float ]) # here, already with RANSAC algorithm to match key points matched_ptsA = [] matched_ptsB = []
# python stitch.py --first images/sedona_left_01.png \ # --second images/sedona_right_01.png # python stitch.py --first E:\BetterLightning\ISS045-E-3622.jpg --second E:\BetterLightning\ISS045-E-3622.jpg # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-f", "--first", required=True, help="path to the first image") ap.add_argument("-s", "--second", required=True, help="path to the second image") args = vars(ap.parse_args()) # load the two images and resize them to have a width of 400 pixels # (for faster processing) imageA = cv2.imread(args["first"]) imageB = cv2.imread(args["second"]) imageA = imutils.resize(imageA, width=400) imageB = imutils.resize(imageB, width=400) # stitch the images together to create a panorama stitcher = Stitcher() # this is in the panorama.py file (result, vis) = stitcher.stitch([imageA, imageB], showMatches=True) # show the images cv2.imshow("Image A", imageA) cv2.imshow("Image B", imageB) cv2.imshow("Keypoint Matches", vis) cv2.imshow("Result", result) cv2.waitKey(0)
import cv2 import numpy as np from panorama import Stitcher import imutils # képek beolvasása kep1 = cv2.imread("1.jpg") kep2 = cv2.imread("2.jpg") #átméretezés kep1=imutils.resize(kep1, height=400) kep2=imutils.resize(kep2, height=400) #összeillesztés stitcher = Stitcher() (panorama, kpontok) = stitcher.stitch([kep1, kep2], showMatches=True) panorama = imutils.resize (panorama, height = 400) # megjelenítés és mentés cv2.imshow("első kép", kep1) cv2.imshow("második kép", kep2) cv2.imshow("Kulcspontok", kpontok) cv2.imshow("Panoramakep", panorama) cv2.imwrite ("Panoramakep.png", panorama) cv2.waitKey(0)
# import the necessary packages from panorama import Stitcher import argparse import imutils import cv2 # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-f", "--first", required=True, help="path to the first image") ap.add_argument("-s", "--second", required=True, help="path to the second image") args = vars(ap.parse_args()) # load the two images and resize them to have a width of 400 pixels # (for faster processing) imageA = cv2.imread(args["first"]) imageB = cv2.imread(args["second"]) imageA = imutils.resize(imageA, width=400) imageB = imutils.resize(imageB, width=400) # stitch the images together to create a panorama stitcher = Stitcher() result = stitcher.stitch([imageA, imageB]) # show the images cv2.imshow("Image A", imageA) cv2.imshow("Image B", imageB) cv2.imshow("Result", result) cv2.waitKey(0)
ap.add_argument( "-f", "--frames", required=False, help="Takes every Nth frame where N is this parameter. Default is 10", default=10) ap.add_argument("-m", "--manual", required=False, help="manual fix mode, default to false", default=False) ap.add_argument( "-o", "--os", required=False, help="os. can be win or linux. determines keycodes for manual mode", default="linux") args = vars(ap.parse_args()) stitcher = Stitcher() images = frame_generator(args["video"], frames=int(args["frames"])) result = stitcher.multistitch(images, manual=args["manual"], os=args["os"]) cv2.imshow("Result", result) last_key = cv2.waitKeyEx(0) if last_key == 13: filepath = args["name"] if filepath[-4:] != ".png": filepath = filepath + ".png" cv2.imwrite(filepath, result)
def open_stream(self): total_frame = 0 # colecionando imagenes para el stream print('Iniciando streaming de la camara en: ', self.client_address) e1 = cv2.getTickCount() # obtener las imagenes del stream una por una try: myfont = pygame.font.SysFont("monospace", 15) screen = pygame.display.set_mode((200, 200), 0, 24) label = myfont.render( "Presione q o x para finalizar\n el programa.", 1, (255, 255, 0)) screen.blit(label, (0, 0)) pygame.display.flip() cam = cv2.VideoCapture() while self.corriendo_programa: # Read the length of the image as a 32-bit unsigned int. If the # length is zero, quit the loop image_len = struct.unpack( '<L', self.connection.read(struct.calcsize('<L')))[0] if not image_len: print('Finalizado por Cliente') break # Construct a stream to hold the image data and read the image # data from the connection image_stream = io.BytesIO() image_stream.write(self.connection.read(image_len)) image_stream.seek(0) jpg = image_stream.read() image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_COLOR) image = cv2.rectangle(image, (0, 120), (318, 238), (30, 230, 30), 1) # image = cv2.flip(image, -1) # guardar la imagen cv2.imwrite( 'streamtest_img/frame{:>05}.jpg'.format(total_frame), image) # mostrar la imagen imageA = imutils.resize(image, width=400) imageB = imutils.resize(cam, width=400) stitcher = Stitcher() (result, vis) = stitcher.stitch([imageA, imageB], showMatches=True) cv2.imshow('Computer Vision', result) total_frame += 1 screen.blit( myfont.render(("Total Frames: " + str(total_frame)), 1, (255, 255, 0), (0, 0, 0)), (60, 0)) for event in pygame.event.get(): if event.type == KEYDOWN: key_input = pygame.key.get_pressed() if key_input[pygame.K_x] or key_input[pygame.K_q]: print("Deteniendo el stream") self.corriendo_programa = False break e2 = cv2.getTickCount() # calcular el total de streaming time0 = (e2 - e1) / cv2.getTickFrequency() print("Duracion del streaming:", time0) print('Total cuadros : ', total_frame) finally: pygame.quit() self.connection.close() self.server_socket.close() cv2.destroyAllWindows() os.system("pause")
from panorama import Stitcher import cv2 import numpy as np import os sequences = ['legumes', 'magasin', 'neige', 'parc', 'studio', 'visages'] stitcher = Stitcher() try: os.mkdir('Resultats Stitch') except OSError as error: print(error) # Chargement des images des une liste 2D for i, sequence in enumerate(sequences): index = 0 try: os.mkdir('Resultats Stitch/' + sequence) except OSError as error: print(error) while True: path1 = 'SYS809_projet2021_sequences/' + sequence + 'A-' + str( index).zfill(2) + '.jpg' path2 = 'SYS809_projet2021_sequences/' + sequence + 'A-' + str( index + 1).zfill(2) + '.jpg' image1 = cv2.imread(path1) image2 = cv2.imread(path2) if image2 is not None: (result, vis) = stitcher.stitch([image1, image2],