def compute(self, *args): if self.leftImage is not None and self.rightImage is not None: focalLength = self.getFocalLength() k1 = self.getK1() k2 = self.getK2() if focalLength <= 0: return if self.motionModelVar.get() == alignment.eTranslate: left = warp.warpSpherical(self.leftImage, focalLength, k1, k2) right = warp.warpSpherical( self.rightImage, focalLength, k1, k2 ) else: left = self.leftImage right = self.rightImage mapping = self.computeMapping(left, right) height, width, _ = right.shape # TODO what if the mapping is singular? mapping = np.linalg.inv(mapping) mapping /= mapping[2, 2] points = np.array([ [0, 0, 1], [width, 0, 1], [0, height, 1], [width, height, 1], ], dtype=float) trans_points = np.dot(mapping, points.T).T trans_points /= trans_points[:, 2][:, np.newaxis] all_points = np.vstack([points, trans_points]) minX = np.min(all_points[:, 0]) maxX = np.max(all_points[:, 0]) minY = np.min(all_points[:, 1]) maxY = np.max(all_points[:, 1]) # Create an accumulator image newWidth = int(np.ceil(maxX) - np.floor(minX)) newHeight = int(np.ceil(maxY) - np.floor(minY)) translation = np.array([[1, 0, -minX], [0, 1, -minY], [0, 0, 1]]) warpedRightImage = cv2.warpPerspective( right, np.dot(translation, mapping), (newWidth, newHeight) ) warpedLeftImage = cv2.warpPerspective( left, translation, (newWidth, newHeight) ) alpha = 0.5 beta = 1.0 - alpha gamma = 0.0 dst = cv2.addWeighted( warpedLeftImage, alpha, warpedRightImage, beta, gamma ) self.setImage(dst)
def compute(self, *args): if self.leftImage is not None and self.rightImage is not None: focalLength = self.getFocalLength() k1 = self.getK1() k2 = self.getK2() if focalLength <= 0: return if self.motionModelVar.get() == alignment.eTranslate: left = warp.warpSpherical(self.leftImage, focalLength, k1, k2) right = warp.warpSpherical( self.rightImage, focalLength, k1, k2 ) else: left = self.leftImage right = self.rightImage mapping = self.computeMapping(left, right) height, width, _ = right.shape # TODO what if the mapping is singular? mapping = np.linalg.inv(mapping) topRight = np.array([width, height, 1]) tranTopRight = np.dot(mapping, topRight) tranTopRight = np.divide(tranTopRight, tranTopRight[2]) bottomRight = np.array([width, 0, 1]) tranBottomRight = np.dot(mapping, bottomRight) tranBottomRight = np.divide(tranBottomRight, tranBottomRight[2]) newHeight = int(abs(tranTopRight[1] - tranBottomRight[1])) newWidth = int(max(tranTopRight[0], tranBottomRight[0])) warpedRightImage = cv2.warpPerspective( right, mapping, (newWidth, newHeight) ) warpedLeftImage = cv2.warpPerspective( left, np.eye(3, 3), (newWidth, newHeight) ) alpha = 0.5 beta = 1.0 - alpha gamma = 0.0 dst = cv2.addWeighted( warpedLeftImage, alpha, warpedRightImage, beta, gamma ) self.setImage(dst)
def warpImage(self, *args): if self.image is not None: focalLength = float(self.focalLengthEntry.get()) k1 = self.getK1() k2 = self.getK2() warpedImage = warp.warpSpherical(self.image, focalLength, k1, k2) self.setImage(warpedImage) self.set_status('Warped image with focal length ' + str(focalLength)) elif len(args) == 0: # i.e., click on the button uiutils.error('Select an image before warping!')
def compute(self, *args): if self.images is not None and len(self.images) > 0: f = self.getFocalLength() if f <= 0: return k1 = self.getK1() k2 = self.getK2() processedImages = None if self.motionModelVar.get() == alignment.eTranslate: processedImages = [ warp.warpSpherical(i, f, k1, k2) for i in self.images ] else: processedImages = self.images t = np.eye(3) ipv = [] for i in range(0, len(processedImages) - 1): self.set_status( 'Computing mapping from {0} to {1}'.format(i, i+1) ) ipv.append( blend.ImageInfo('', processedImages[i], np.linalg.inv(t)) ) t = self.computeMapping( processedImages[i], processedImages[i+1] ).dot(t) ipv.append(blend.ImageInfo( '', processedImages[len(processedImages)-1], np.linalg.inv(t)) ) t = self.computeMapping( processedImages[len(processedImages)-1], processedImages[0] ).dot(t) if self.is360Var.get(): ipv.append(blend.ImageInfo( '', processedImages[0], np.linalg.inv(t)) ) self.set_status('Blending Images') self.setImage(blend.blendImages( ipv, int(self.blendWidthSlider.get()), self.is360Var.get() == 1 )) self.set_status('Panorama generated') else: uiutils.error( 'Select a folder with images before creating the panorama!' )
def setUp(self): ''' Run the warps once (independent of thresholds) ''' #blank image blank = np.asarray(np.ones((40, 40, 3))*255.0, dtype=np.uint8) #simple grid image grid = np.asarray(np.ones((40, 40, 3))*255.0, dtype=np.uint8) grid[(10,30),:,:]=0 grid[:,(10,30),:]=0 parameters = (20,0.1,-0.1) resBl = np.load('testMat/warpBlank.npy') self.img_bl = warp.warpSpherical(blank,parameters[0],parameters[1],parameters[2]) self.org_bl = resBl
def pano(set_n, use_algorithm): name_of_set = "ep" + str(set_n) name_of_path0 = "data/" + name_of_set #+ "SIFT" + "_set_Lunch/" if use_algorithm == "SIFT": scale_percent = 30 elif use_algorithm == "SURF": scale_percent = 20 elif use_algorithm == "ORB": scale_percent = 10 scale_percent = 20 w_p = 200 name_of_final = use_algorithm + "_" + name_of_set focalLength = 2500 k1 = -0.0484573 k2 = 0.0100024 f = focalLength start_time = timeit.default_timer() name_of_path = name_of_path0 iter = 1 processedImages = None point_time_1 = timeit.default_timer() files = os.listdir(name_of_path0) images = [cv2.imread(os.path.join(name_of_path0, i)) for i in files] images_crop = [] for i in images: # calculate the 50 percent of original dimensions width = int(i.shape[1] * scale_percent / 100) height = int(i.shape[0] * scale_percent / 100) # dsize dsize = (width, height) # resize image images_crop.append(cv2.resize(i, dsize)) #images_crop.append(i) point_time_2 = timeit.default_timer() processedImages = [warp.warpSpherical(i, f, k1, k2) for i in images_crop] #processedImages = images_crop t = np.eye(3) ipv = [] for i in range(0, len(processedImages) - 1): ipv.append(blend.ImageInfo('', processedImages[i], np.linalg.inv(t))) t = computeMapping(processedImages[i], processedImages[i + 1]).dot(t) ipv.append( blend.ImageInfo('', processedImages[len(processedImages) - 1], np.linalg.inv(t))) t = computeMapping(processedImages[len(processedImages) - 1], processedImages[0]).dot(t) result = blend.blendImages(ipv, int(w_p), False) #cv2.imwrite("result_" + name_of_final + ".jpg", result) stop_time = timeit.default_timer() height = result.shape[0] width = result.shape[1] filtr_res = contraharmonic_mean(result, (height, width), 0.5) #cv2.imwrite("result_" + name_of_final + "_f.jpg", result) # Create our shapening kernel, it must equal to one eventually kernel_sharpening = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]]) # applying the sharpening kernel to the input image & displaying it. sharpened = cv2.filter2D(result, -1, kernel_sharpening) #cv2.imwrite("result_" + name_of_final + "_s.jpg", sharpened) median = result #cv2.medianBlur(result, 3) #median = cv2.medianBlur(median, 3) #cv2.imwrite("result_" + name_of_final + "_m.jpg", median) sharpenedmedian = cv2.filter2D(median, -1, kernel_sharpening) cv2.imwrite("result_" + name_of_final + "_SM.jpg", sharpenedmedian) #seam_carving.main_seam("result_" + name_of_final + "_SM.jpg", "result_" + name_of_final + "_SM_SEAM.jpg") resave = cv2.imread("result_" + name_of_final + "_SM.jpg") cv2.imwrite("it3/result_" + name_of_final + ".jpg", resave) print('Execution time ' + name_of_set + ' ' + use_algorithm + ': ', stop_time - start_time)