def main(): stitcher = Stitcher() while True: leftCam = cv2.VideoCapture(2) rearCam = cv2.VideoCapture(1) rightCam = cv2.VideoCapture(0) #ap = argparse.ArgumentParser() #ap.add_argument('-i', '--input-dir', required=True, help='Path to image input directory') #args = vars(ap.parse_args()) #dir = args['input_dir'].strip(' ') #input_dir = os.path.join(os.getcwd(), dir) #image_files = sorted(os.listdir(input_dir)) #image_files = [os.path.join(input_dir, x) for x in image_files] #images = [imutils.resize(cv2.imread(x), width=400) for x in image_files] images = [] images.append(leftCam.read()[1]) images.append(rearCam.read()[1]) images.append(rightCam.read()[1]) images = [imutils.resize(cv2.flip(x, 1), width=400) for x in images] try: result = stitcher.stitch(images) cv2.imshow("Stitched", result) except RuntimeError: cv2.waitKey(100) continue cv2.waitKey(100)
def main(): parser = argparse.ArgumentParser( description='A small utility that creates a big map.') parser.add_argument( '--start', required=True, help= 'top-left point of the region of interest (comma-separated lat/lng pair)' ) parser.add_argument( '--end', required=True, help= 'bottom-right point of the region of interest (comma-separated lat/lng pair)' ) parser.add_argument('--zoom', type=int, required=True, help='zoom level') parser.add_argument('--out', required=True, help='output directory') parser.add_argument('--transit', help='enable transit layer', action='store_true') args = parser.parse_args() start = map(float, args.start.split(',')) end = map(float, args.end.split(',')) tile_size_px = 500 print('[bigmapmaker] Starting.') screenshotter = Screenshotter(start, end, args.zoom, args.out, args.transit, tile_size_px) screenshotter.fetch_tiles() print('[bigmapmaker] Done with fetching, moving on to stitching.') stitcher = Stitcher(args.out, tile_size_px) stitcher.stitch() print('[bigmapmaker] Done.')
def template(directory: str, in_file: str, out_file: str = None, passes: int = -1, logger=None): OUT_PREFIX = 'out_' FEEDBACK = 'Finished stitching in {0}' ERROR = 'Error: {0}!' if out_file == None: out_file = OUT_PREFIX + in_file try: Stitcher(directory, in_file, out_file, passes, logger) if logger is None: print(FEEDBACK.format(os.path.join(directory, out_file))) else: logger.info(FEEDBACK.format(os.path.join(directory, out_file))) except Exception as e: if logger is None: print(ERROR.format(e)) else: logger.error(e) raise print() return
def process(self): while (True): start_time = time.time() # Capture frame-by-frame self.extFrames.clear() for cam in self.extCams: ret, frame = cam.read() frame = self.f.unwarp(frame) self.extFrames.append(frame) # Our operations on the frame come here # Display the resulting frame if self.stitch: # s = CVStitcher(args.debug) s = Stitcher(self.debug) pano = s.stitch(self.extFrames) # cv2.resize(pano, (320, 180), interpolation=cv2.INTER_AREA) cv2.imshow("pano", pano) if self.debug: for i in range(len(self.extCams)): cv2.imshow( 'frame' + str(i), cv2.resize(self.extFrames[i], (480, 270), interpolation=cv2.INTER_AREA)) for cam in self.extCams: print(self.decode_fourcc(cam.get(cv2.CAP_PROP_FOURCC))) if cv2.waitKey(1) & 0xFF == ord('q'): break fps = 1.0 / (time.time() - start_time) print("[INFO] approx. FPS: {:.2f}".format(fps)) # Cleanly exit. for cam in self.extCams: cam.release() cv2.destroyAllWindows()
def stitchFolder(self, targetFolder): stitcherHandler = Stitcher() filesToStitch = [] onlyFiles = [ f for f in os.listdir(targetFolder) if isfile(join(targetFolder, f)) ] for files in onlyFiles: if (files.endswith(".jpg")): filesToStitch.append(targetFolder + "/" + files) if (len(filesToStitch) < 2): return parentName = os.path.split(os.path.dirname(filesToStitch[0]))[1] outputFile = os.path.join(os.path.dirname(filesToStitch[0]), parentName + ".tiff") logFile = os.path.join(os.path.dirname(filesToStitch[0]), parentName + "_log.txt") filesToStitch.sort() print(filesToStitch) print(outputFile) print("Logging to: " + logFile) if (outputFile is None): exit() stitcherHandler.stitchFileList(filesToStitch, outputFile, logFile, self.progressCallback, self.maskBox.IsChecked(), self.scalePath, self.verticalCore.IsChecked()) if (self.archiveImages.IsChecked()): self.ArchiveQueue.put(targetFolder)
def __init__(self, path=None): # Initialize stitcher object and pdf object self.path = path if (path == None): self.stitcher = None else: self.stitcher = Stitcher(self.path) self.quilt = None
def homography_matrices(video): stitcher = Stitcher() initialized = False for i, frame in enumerate(video): stdout.write('{}\r'.format(i)) stdout.flush() if initialized is False: initialized = True homography_matrices = np.array([np.identity(3)]) prev_frame = frame continue if i % 20 == 0: homography_matrices = np.append(homography_matrices, [np.identity(3)], axis=0) prev_frame = frame continue H = Stitcher().find_homography(frame, prev_frame) homography_matrices = np.append(homography_matrices, [H], axis=0) initialized = False for i, frame in enumerate(video): stdout.write('{}\r'.format(i)) stdout.flush() if initialized is False: initialized = True reduced_H = np.identity(3) prev_frame = frame continue if i % 20 == 0: H = Stitcher().find_homography(frame, prev_frame) reduced_H = reduced_H @ H prev_frame = frame # H = reduced_H @ H homography_matrices[i] = reduced_H @ homography_matrices[i] return homography_matrices
def main(_): assert FLAGS.out assert FLAGS.db and os.path.exists(FLAGS.db) picpac_config = dict( seed=2016, #loop=True, shuffle=True, reshuffle=True, #resize_width=256, #resize_height=256, round_div=FLAGS.stride, batch=1, split=1, split_fold=0, annotate='json', channels=FLAGS.channels, stratify=True, pert_color1=20, pert_angle=20, pert_min_scale=0.8, pert_max_scale=1.2, #pad=False, pert_hflip=True, pert_vflip=True, channel_first=False # this is tensorflow specific # Caffe's dimension order is different. ) stream = picpac.ImageStream(FLAGS.db, perturb=False, loop=False, **picpac_config) gal = Gallery(FLAGS.out) cc = 0 with Model(FLAGS.model, name=FLAGS.name, prob=True) as model: for images, _, _ in stream: #images *= 600.0/1500 #images -= 800 #images *= 3000 /(2000-800) _, H, W, _ = images.shape if FLAGS.max_size: if max(H, W) > FLAGS.max_size: continue if FLAGS.patch: stch = Stitcher(images, FLAGS.patch) probs = stch.stitch(model.apply(stch.split())) else: probs = model.apply(images) cc += 1 save(gal.next(), images, probs) if FLAGS.max and cc >= FLAGS.max: break gal.flush() pass
def __init__(self, bot): self.bot = bot self.sql_query = SQLQuery(initialize_connection()) self.manager = Manager() self.stitcher = Stitcher() self.dbl_token = config('DBL_TOKEN') self.webhook_auth_token = config('ALICE_WEBHOOK_AUTH_TOKEN') self.dblpy = dbl.DBLClient(self.bot, self.dbl_token, autopost=True, webhook_path='/dblwebhook', webhook_auth=self.webhook_auth_token, webhook_port=environ.get("PORT", 8000))
def startNoah(self, path): #data is a string with the text self.stitcher = Stitcher(path) self.stitcher.stitch() self.pdfifier = PDFifier() self.quilt = self.stitcher.get_quilt() self.image_num = 0 image_map = QPixmap(self.quilt[0].slide_path) data = self.fix_text(self.quilt[0].script) self.slideImage.setPixmap(image_map) self.textArea.setText('\n'.join(data)) self.repaint()
def stitch(img1, img2, sigma=2.0, levels=None, flag_fast=False): if flag_fast: if img1.shape[1] > 400: img1 = cv2.resize( img1, (width, int(img1.shape[0] * float(width) / img1.shape[1]))) if img2.shape[1] > 400: img2 = cv2.resize( img2, (width, int(img2.shape[0] * float(width) / img2.shape[1]))) # stitch the images together to create a panorama stitcher = Stitcher() return stitcher.stitch([img1, img2], sigma=sigma, levels=levels)
def write(self, name='fullfile.csv'): stitch = Stitcher(self.fpath) files = len(self.successful) - 1 try: stitch.import_vf(files) except Exception as e: print("VF files failed. Reason: {}".format(e)) try: stitch.import_franklin() except: print("Whoops, no franklin.") try: stitch.import_hamilton() except Exception as e: print("whoops, no Hamilton: {}".format(e)) stitch.write(fname=name)
def stitch(self): stitcherHandler = Stitcher() print(self.fileList) print(self.outputFile) if (self.outputFile is None): exit() parentName = os.path.split(os.path.dirname(self.fileList[0]))[1] outputFile = os.path.join( os.path.dirname(self.fileList[0]), parentName + str(len(self.fileList)) + ".tiff") self.finishedFiles = [] self.finishedFiles.append(outputFile) _thread.start_new_thread( stitcherHandler.stitchFileList, (self.fileList, outputFile, self.progressCallback, self.maskImages.get()))
def run(self, path): self.stitcher = Stitcher(path) self.stitcher.stitch() self.pdfifier = PDFifier() self.quilt = self.stitcher.get_quilt() name = self.random_string(16) self.pdfifier.makePDF(self.quilt, name) filename = name if (os.path.isfile(path + ".mp3")): os.remove(path + ".mp3") if (os.path.isdir("audio_segments")): shutil.rmtree("audio_segments") if (os.path.isdir("slides")): shutil.rmtree("slides") return filename
def __init__(self, logger, ip, port, configfps, ingestproxy, mcip): self._logger = logger self._ip = ip self._port = port # read all config files and add channels for configfp in configfps: Channel.append(configfp) # create shared memdb self._memdb = MemDB() # handler class for responding to http requests self._myhandler = makehandlerclass(self._logger.getChild("HTTPServer"), ingestproxy, mcip, self._memdb) # self._myhandler.protocol_version = "HTTP/1.1" #-->Do not use HTTP1.1 because handler does not support 206 and byte requests easily. self._myhandler.server_version = "m2u" self._httpd = None # stitcher for concatenating RTP slices into fragments self._stitcher = Stitcher(name="stitcher", args=(self._logger.getChild("Stitcher"), self._memdb))
def stitchFolder(self, targetFolder): stitcherHandler = Stitcher() filesToStitch = [] onlyFiles = [ f for f in os.listdir(targetFolder) if isfile(join(targetFolder, f)) ] for files in onlyFiles: if (files.endswith(".jpg")): filesToStitch.append(targetFolder + "/" + files) parentName = os.path.split(os.path.dirname(filesToStitch[0]))[1] outputFile = os.path.join(os.path.dirname(filesToStitch[0]), parentName + ".tiff") print(filesToStitch) print(outputFile) if (outputFile is None): exit() _thread.start_new_thread( stitcherHandler.stitchFileList, (filesToStitch, outputFile, "test.txt", self.progressCallback, self.maskImages.get(), None))
"Stack several image files to create digital long exposure photographies") parser.add_argument("--align", action="store_true", help="run only the aligner, do not compress") parser.add_argument("--transform", action="store_true", help="run only the aligner and transform, do not compress") parser.add_argument("--stitch", action="store_true", help="stitch images for panoramic formats") args = parser.parse_args() # --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- aligner = Aligner() stitcher = Stitcher() stacker = Stacker(aligner) input_images_aligner = [] input_images_stitcher = [] input_images_stacker = [] # transform to absolute paths BASE_DIR = os.path.dirname(os.path.realpath(__file__)) # --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- # init aligner if args.align or args.transform: # expand all paths
u0 = np.zeros((ny, nx, nF[0])) v0 = np.zeros((ny, nx, nF[0])) for i in range(nF[0].astype(int)): u0[:, :, i] = i / 5. v0[:, :, i] = 2 * i / 5. #Initialize data us = np.zeros((ny, nx, np.sum(nF))) vs = np.zeros((ny, nx, np.sum(nF))) us[:, :, 0:nF[0]] = u0 vs[:, :, 0:nF[0]] = v0 #Load in optic flow data vidx = 1 #Load MFSF data u1 = np.zeros((ny, nx, nF[vidx])) v1 = np.zeros((ny, nx, nF[vidx])) for i in range(nF[0].astype(int)): v1[:, :, i] = -i / 5. #Make a Stitcher thestitch = Stitcher(u1, v1) self = thestitch (u, v) = thestitch.run(u0, v0) us[:, :, np.sum(nF[0:vidx]):np.sum(nF[0:vidx + 1])] = u vs[:, :, np.sum(nF[0:vidx]):np.sum(nF[0:vidx + 1])] = v #Save output matrix #mdict = {'u':us, 'v':vs, 'parmsOF':params, 'info':info} #savemat(args.fn_out, mdict)
from stitcher import Stitcher image_list = ["cam4.png", "cam8.png"] #,"images/image_3.png"] stitcher = Stitcher(image_list) stitcher.process_images()