def start(): frame = cv2.imread("building.jpg") height = frame.shape[0] width = frame.shape[1] aspect_ratio = width / height ph = 200 pw = int(ph * aspect_ratio) roi = [[62, 142], [358, 48], [49, 257], [396, 276]] project_pts = [[0, 0], [pw, 0], [0, ph], [pw, ph]] src_pts = np.float32(roi) dest_pts = np.float32(project_pts) transformation_matrix = cv2.getPerspectiveTransform(src_pts, dest_pts) output = cv2.warpPerspective(frame, transformation_matrix, (pw, ph)) cv2.imwrite("perspective_corrected_image.jpg", output) for pts in roi: cv2.circle(frame, tuple(pts), 5, (0, 0, 255), -1) cv2.imshow("Image", frame) cv2.imshow("Corrected", output) cv2.waitKey(0)
def fitToCode(image: np.ndarray, centerPoints: np.ndarray, dimension: int) -> np.ndarray: dx = 3 dy = 4 x0 = 3 y0 = 3 x1 = dimension - 4 y1 = dimension - 4 dest = np.array( [ [x0, y0], [x1, y0], [x0, y1], [x1, y1], ], np.float32, ) src = np.array(centerPoints, np.float32) matrix = cv2.getPerspectiveTransform(src, dest) warped = cv2.warpPerspective( image, matrix, (dimension, dimension), flags=cv2.INTER_NEAREST, ) return warped
def draw_area(undist, binary_warped, Minv, left_fit, right_fit): """ Parameter: undist: binary_wraped: Minv: left_fit: right_fit: Return: result: """ # Generate x and y values for plotting ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0]) left_fitx = left_fit[0] * ploty**2 + left_fit[1] * ploty + left_fit[2] right_fitx = right_fit[0] * ploty**2 + right_fit[1] * ploty + right_fit[2] # Create an image to draw the lines on warp_zero = np.zeros_like(binary_warped).astype(np.uint8) color_warp = np.dstack((warp_zero, warp_zero, warp_zero)) # Recast the x and y points into usable format for cv2.fillPoly() pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))]) pts_right = np.array( [np.flipud(np.transpose(np.vstack([right_fitx, ploty])))]) pts = np.hstack((pts_left, pts_right)) # Draw the lane onto the warped blank image cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0)) # Warp the blank back to original image space using inverse perspective matrix (Minv) newwarp = cv2.warpPerspective(color_warp, Minv, (undist.shape[1], undist.shape[0])) # Combine the result with the original image result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0) return result
def square_trans(Table_2D: '桌子四角', points: '物体底部四点', lined_img=[]): ''' 若最后一个参数非空(调试模式),则显示图片 ''' affine_table_2D = np.float32([[0, 0], [0, 550], [550, 0], [550, 550]]) # 方桌边长550mm M = cv2.getPerspectiveTransform(Table_2D, affine_table_2D) # 获取透视变换矩阵 transed_points = np.matmul(points, np.transpose(M)) for i in range(4): transed_points[i][0] = transed_points[i][0] / transed_points[i][2] transed_points[i][1] = transed_points[i][1] / transed_points[i][2] a = [0 for i in range(4)] for i in range(4): a[i] = (int(transed_points[i][0]), int(transed_points[i][1])) angle = np.degrees( np.arccos((a[0][0] - a[0][1]) / (((a[0][0] - a[0][1])**2 + (a[1][0] - a[1][1])**2)**0.5))) if len(lined_img): # Perspective_Transformation transed = cv2.warpPerspective(lined_img, M, (550, 550)) cv2.line(transed, a[0], a[1], (0, 255, 0), 1) cv2.line(transed, a[0], a[2], (0, 255, 0), 1) cv2.line(transed, a[1], a[3], (0, 255, 0), 1) cv2.line(transed, a[2], a[3], (0, 255, 0), 1) cv2.imshow('perspective', transed) return transed_points, angle
def circle_trans(Cicle_2D: '上下右左', points: '物体底部四点', lined_img=[]): ''' 若最后一个参数非空(调试模式),则显示图片 ''' affined_Circle_2D = np.float32([[300, 0], [300, 600], [600, 300], [0, 300]]) # 圆桌R = 300 M = cv2.getPerspectiveTransform(Cicle_2D, affined_Circle_2D) # 获取透视变换矩阵 transed_points = np.matmul(points, np.transpose(M)) for i in range(4): transed_points[i][0] = transed_points[i][0] / transed_points[i][2] transed_points[i][1] = transed_points[i][1] / transed_points[i][2] a = [0 for i in range(4)] for i in range(4): a[i] = (int(transed_points[i][0]), int(transed_points[i][1])) if len(lined_img): # Perspective_Transformation transed = cv2.warpPerspective(lined_img, M, (600, 600)) cv2.line(transed, a[0], a[1], (0, 255, 0), 1) cv2.line(transed, a[0], a[2], (0, 255, 0), 1) cv2.line(transed, a[1], a[3], (0, 255, 0), 1) cv2.line(transed, a[2], a[3], (0, 255, 0), 1) cv2.imshow('perspective', transed) return transed_points
def rectify_3d_with_db(painting_roi, ranked_list, dst_points, src_points) -> bool: best = max(ranked_list, key=ranked_list.get) match = cv2.imread(best) h_match = int(match.shape[0]) w_match = int(match.shape[1]) src_points = np.squeeze(src_points, axis=1).astype(np.float32) dst_points = np.squeeze(dst_points, axis=1).astype(np.float32) src_points = np.array( utils.remove_points_outside_roi(src_points, w_match, h_match)) if src_points.shape[0] < 4: return None else: H, _ = cv2.findHomography(dst_points, src_points, cv2.RANSAC, 5.0) if H is None: print( "[ERROR] Homography matrix can't be estimated. Rectification aborted." ) return None img_dataset_warped = cv2.warpPerspective( match, H, (painting_roi.shape[1], painting_roi.shape[0])) print("[SUCCESS] Warped from keypoints") mask = np.all(img_dataset_warped == [0, 0, 0], axis=-1) img_dataset_warped[mask] = painting_roi[mask] show_img(img_dataset_warped) return img_dataset_warped
def alineamiento(imagen,ancho,alto): imagen_alineada=None # le pasamos grises a la imagen grises=cv2.cvtColor(imagen, cv2.COLOR_BGR2GRAY) # devolvemos dos umbrales minimos y maximos tipoumbral,umbral=cv2.threshold(grises, 150,255, cv2.THRESH_BINARY) # mostramos el umbral cv2.imshow("Umbral", umbral) # contornos que devuelven dos valores, contorno y jerarquia contorno=cv2.findContours(umbral, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] # ordenar los contornos son sorted # reverse ordena los puntos de menor a mayor, son para los eje x contorno=sorted(contorno,key=cv2.contourArea,reverse=True)[:1] # recorremos los cotornor for c in contorno: # la variable c esta recorriendo todos los contornos # epsilon ayuda a encontrar las areas # arcLenght sirve para sacar las areas epsilon=0.01*cv2.arcLength(c, True) # aca piden las curvas que va a analizar para que las curvas no tengan tando ruido approximacion=cv2.approxPolyDP(c, epsilon, True) # contamos objetos que tenemos en la lista # los 4 puntos forman un circulo if len(approximacion)==4: puntos=ordenarpuntos(approximacion) # convertimos los puntos en alto y ancho puntos1=np.float32(puntos) puntos2=np.float32([[0,0],[ancho,0],[0,alto],[ancho,alto]]) # metodo de perspectiva # se mantiene fijo M en caso que la camara rote M = cv2.getPerspectiveTransform(puntos1, puntos2) # a la imagen alineada le pasamos la informacion imagen_alineada=cv2.warpPerspective(imagen, M, (ancho,alto)) return imagen_alineada
def Perspective(img, c): src = getPerspectiveSrc(c) src.sort() maxWidth, maxHeight = src[0], src[1] rect = getMinAreaRectPoints(c) dst = srcToDstConvert(src) dst, maxWidth, maxHeight = func.GetDst(rect) rect = rect.astype(np.float32) dst = dst.astype(np.float32) M = cv2.getPerspectiveTransform(rect, dst) warp = cv2.warpPerspective(img, M, (maxWidth, maxHeight)) # 畫點的數字 # for i in range(len(dst)): # x = int(dst[i,0]) # y = int(dst[i,1]) # if x <= 0 : # x += 10 # else: # x -= 10 # if y <= 0 : # y += 20 # else: # y -= 20 # cv2.putText(warp,str(i) , (x, y),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) return warp
def four_point_transform(self, orig, contour): rect = self.order_points(contour) print(rect) tl, tr, br, bl = rect # create the new image width -- the max length of the top or bottom of the contour widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2)) widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2)) maxWidth = max(int(widthA), int(widthB)) # create the new image height -- the max of the left/right of the contour heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2)) heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2)) maxHeight = max(int(heightA), int(heightB)) # now that we have the dimensions of the new image, construct # the set of destination points to obtain a "birds eye view", # (i.e. top-down view) of the image, again specifying points # in the top-left, top-right, bottom-right, and bottom-left # order dst = np.array([ [0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, maxHeight - 1]], dtype = "float32") # compute the perspective transform matrix and then apply it M = cv2.getPerspectiveTransform(rect, dst) warped = cv2.warpPerspective(orig, M, (maxWidth, maxHeight)) return warped
def get_top_view(image, corners, make_square=True): # get bounding box rect = cv2.minAreaRect(corners) box = cv2.boxPoints(rect) box = np.int0(box) # cv2.drawContours(image, [box], 0, (0, 0, 255), 2) # rect = (center, shape, angle) # dimensions height = int(rect[1][1]) width = int(rect[1][0]) final = np.float32([[0,0],[width,0],[0,height],[width,height]]) # perspective transformation matrix transformation_matrix = cv2.getPerspectiveTransform(corners, final) warped = cv2.warpPerspective(image, transformation_matrix, (width, height)) side = max(width, height) if side < 200: return None, None, None # make it a square try: warped = cv2.resize(warped, (side,side), interpolation=cv2.INTER_CUBIC) warped = cv2.resize(warped, (450,450), interpolation=cv2.INTER_CUBIC) except Exception as e: print(e) return warped, transformation_matrix, (height,width)
def get_board_from_border(img): pts2 = np.float32([[0, 0], [800, 0], [0, 800], [800, 800]]) pts1 = np.float32([points[0], points[1], points[2], points[3]]) matrix = cv2.getPerspectiveTransform(pts1, pts2) result = cv2.warpPerspective(img, matrix, (800, 800)) cv2.imshow("Frame", result) cv2.waitKey(100) cv2.destroyAllWindows() return result # if __name__ == '__main__': # points = [] # calibrate_params("028.png") # # img = load_image("damki.png") # # img = resize(10, img) # # cv2.imwrite("../pictures/damkiresize.png", img) # # calibrate_image(img) # # b = generate_chessboard() # # find_circles(img, b) # # for line in b: # # print(line) # # res = calibrate_image(img) # # cv2.imwrite(path_to_src('pictures', "calib.png"), res) # # pip = load_image("calib.png") # # calibrate_params("calib.png")
def four_point_transform(image, pts): # obtain a consistent order of the points and unpack them # individually rect = order_points(pts) (tl, tr, br, bl) = rect # compute the width of the new image, which will be the # maximum distance between bottom-right and bottom-left # x-coordiates or the top-right and top-left x-coordinates widthA = np.sqrt(((br[0] - bl[0])**2) + ((br[1] - bl[1])**2)) widthB = np.sqrt(((tr[0] - tl[0])**2) + ((tr[1] - tl[1])**2)) maxWidth = max(int(widthA), int(widthB)) # compute the height of the new image, which will be the # maximum distance between the top-right and bottom-right # y-coordinates or the top-left and bottom-left y-coordinates heightA = np.sqrt(((tr[0] - br[0])**2) + ((tr[1] - br[1])**2)) heightB = np.sqrt(((tl[0] - bl[0])**2) + ((tl[1] - bl[1])**2)) maxHeight = max(int(heightA), int(heightB)) # now that we have the dimensions of the new image, construct # the set of destination points to obtain a "birds eye view", # (i.e. top-down view) of the image, again specifying points # in the top-left, top-right, bottom-right, and bottom-left # order dst = np.array([[0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, maxHeight - 1]], dtype="float32") # compute the perspective transform matrix and then apply it M = cv2.getPerspectiveTransform(rect, dst) warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight)) # return the warped image return warped
def rectify_with_db(painting_roi, ranked_list, dst_points, src_points) -> bool: best = max(ranked_list, key=ranked_list.get) match = cv2.imread(best) h_match = int(match.shape[0]) w_match = int(match.shape[1]) src_points = np.squeeze(src_points, axis=1).astype(np.float32) dst_points = np.squeeze(dst_points, axis=1).astype(np.float32) # src_points = np.array(utils.remove_points_outside_roi( # src_points, w_match, h_match)) if src_points.shape[0] < 4: src_points, bbox = get_corners(painting_roi, draw=True) if len(src_points) < 4: print("[ERROR] Can't find enough corners") return None src_points = utils.order_corners(src_points) # dst_point((x, y), (x+w, y), (x+w, y+h), (x, y+h)) x, y, w, h = bbox dst_points = np.array([(0, 0), (w, 0), (0, h), (w, h)]) H, _ = cv2.findHomography(src_points, dst_points, cv2.RANSAC, 5.0) if H is None: print( "[ERROR] Homography matrix can't be estimated. Rectification aborted." ) return None painting_roi = cv2.warpPerspective(painting_roi, H, (w, h)) print("[SUCCESS] Warped from corners") show_img(painting_roi) else: H, _ = cv2.findHomography(src_points, dst_points, cv2.RANSAC, 5.0) if H is None: print( "[ERROR] Homography matrix can't be estimated. Rectification aborted." ) return None painting_roi = cv2.warpPerspective(painting_roi, H, (w_match, h_match)) #rectify_from_3d(src_points, dst_points, match, painting_roi) # ----------------------------------------------------------# print("[SUCCESS] Warped from keypoints") show_img(painting_roi) return True
def full_view(filename1, filename2, dirname): leftgray, rightgray = cv2.imread(dirname + filename1), cv2.imread(dirname + filename2) hessian = 400 surf = cv2.xfeatures2d.SURF_create( hessian) # 将Hessian Threshold设置为400,阈值越大能检测的特征就越少 kp1, des1 = surf.detectAndCompute(leftgray, None) # 查找关键点和描述符 kp2, des2 = surf.detectAndCompute(rightgray, None) FLANN_INDEX_KDTREE = 0 # 建立FLANN匹配器的参数 indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) # 配置索引,密度树的数量为5 searchParams = dict(checks=50) # 指定递归次数 # FlannBasedMatcher:是目前最快的特征匹配算法(最近邻搜索) flann = cv2.FlannBasedMatcher(indexParams, searchParams) # 建立匹配器 matches = flann.knnMatch(des1, des2, k=2) # 得出匹配的关键点 good = [] # 提取优秀的特征点 for m, n in matches: # if m.distance < 0.7 * n.distance: # 如果第一个邻近距离比第二个邻近距离的0.7倍小,则保留 if m.distance < 0.3 * n.distance: good.append(m) src_pts = np.array([kp1[m.queryIdx].pt for m in good]) # 查询图像的特征描述子索引 dst_pts = np.array([kp2[m.trainIdx].pt for m in good]) # 训练(模板)图像的特征描述子索引 H = cv2.findHomography(src_pts, dst_pts) # 生成变换矩阵 h, w = leftgray.shape[:2] h1, w1 = rightgray.shape[:2] shft = np.array([[1.0, 0, w], [0, 1.0, 0], [0, 0, 1.0]]) M = np.dot(shft, H[0]) # 获取左边图像到右边图像的投影映射关系 dst_corners = cv2.warpPerspective(leftgray, M, (w * 2, h)) # 透视变换,新图像可容纳完整的两幅图 # cv2.imshow('before add right', dst_corners) # dst_corners[0:h, 0:w] = leftgray dst_corners[0:h, w:w + w1] = rightgray # 将第二幅图放在右侧 # 删除空白列 sum_col = np.sum(np.sum(dst_corners, axis=0), axis=1) result_img = np.zeros(shape=(dst_corners.shape[0], 1, 3)) for i in range(len(sum_col)): if sum_col[i] != 0: result_img = np.hstack([result_img, dst_corners[:, i:i + 1, :]]) result_img = result_img[:, 1:] # cv2.imshow('dest', dst_corners) result_name = get_full_view_result_name(filename1, filename2) cv2.imwrite(dirname + result_name, result_img) cv2.waitKey() cv2.destroyAllWindows() return result_name
def wrap_perspective(img, edges): global imgOutput width, height = 250, 350 pts1 = np.float32([edges[0], edges[1], edges[2], edges[3]]) pts2 = np.float32([[0, 0], [width, 0], [0, height], [width, height]]) matrix = cv2.getPerspectiveTransform(pts1, pts2) imgOutput = cv2.warpPerspective(img, matrix, (width, height)) cv2.imshow("Output Image ", imgOutput)
def project(I,T,pts,dim): ptsh = np.matrix(np.concatenate((pts,np.ones((1,4))),0)) ptsh = np.matmul(T,ptsh) ptsh = ptsh/ptsh[2] ptsret = ptsh[:2] ptsret = ptsret/dim Iroi = cv2.warpPerspective(I,T,(dim,dim),borderValue=.0,flags=cv2.INTER_LINEAR) return Iroi,ptsret
def transform(original, corners, x=450, y=450): new_size = np.float32([[0, 0], [x, 0], [0, y], [x, y]]) # puntos de las esquinas de la nueva imagen M = cv2.getPerspectiveTransform(corners, new_size) size = np.float32([x, y]) # dimensiones nueva imagen result = cv2.warpPerspective(original, M, tuple(size)) return result
def warp(image, corners, warp_size): image_copy = image.copy() destination = np.array([[0, 0], [warp_size - 1, 0], [warp_size - 1, warp_size - 1], [0, warp_size - 1]], dtype="float32") transform_matrix = cv2.getPerspectiveTransform(corners, destination) transform_matrix_inv = cv2.getPerspectiveTransform(destination, corners) # inverse to do opposite transformation later warped = cv2.warpPerspective(image_copy, transform_matrix, (warp_size, warp_size)) return warped, transform_matrix_inv
def CorrectPerspective(img, pts): ptsVec = np.float32(pts) width = img.shape[1] height = img.shape[0] ptsVec2 = np.float32(((0, 0), (width, 0), (width, height), (0, height))) matrix = cv2.getPerspectiveTransform(ptsVec, ptsVec2) result = cv2.warpPerspective(img, matrix, (width, height)) return result
def __bird_eye(self, img): h, w, _ = img.shape before = np.array([(0, h), (w / 4, h / 2), (3 * w / 4, h / 2), (w, h)], np.float32) after = np.array([(w / 4, h), (w / 4, 0), (3 * w / 4, 0), (0.79 * w, h)], np.float32) M = cv2.getPerspectiveTransform(before, after) dst = cv2.warpPerspective(img, M, (w, h)) return dst
def create_transformed(baseImg): ## transform image and place it onto the second image movePoints = [[0, 0], [0, newSize[1]], [newSize[0], newSize[1]], [newSize[0], 0]] H, _ = cv2.findHomography(np.array(locations), np.array(movePoints)) # warp and resize warped = cv2.warpPerspective(baseImg, H, newSize) return warped
def reconstruct(Iorig, I, Y, out_size, threshold=.9): net_stride = 2**4 side = ((208. + 40.) / 2.) / net_stride # 7.75 Probs = Y[..., 0] Affines = Y[..., 2:] rx, ry = Y.shape[:2] ywh = Y.shape[1::-1] iwh = np.array(I.shape[1::-1], dtype=float).reshape((2, 1)) xx, yy = np.where(Probs > threshold) WH = getWH(I.shape) MN = WH / net_stride vxx = vyy = 0.5 #alpha base = lambda vx, vy: np.matrix([[-vx, -vy, 1.], [vx, -vy, 1.], [vx, vy, 1.], [-vx, vy, 1.]]).T labels = [] for i in range(len(xx)): y, x = xx[i], yy[i] affine = Affines[y, x] prob = Probs[y, x] mn = np.array([float(x) + .5, float(y) + .5]) A = np.reshape(affine, (2, 3)) A[0, 0] = max(A[0, 0], 0.) A[1, 1] = max(A[1, 1], 0.) pts = np.array(A * base(vxx, vyy)) #*alpha pts_MN_center_mn = pts * side pts_MN = pts_MN_center_mn + mn.reshape((2, 1)) pts_prop = pts_MN / MN.reshape((2, 1)) labels.append(DLabel(0, pts_prop, prob)) final_labels = nms(labels, .1) TLps = [] if len(final_labels): final_labels.sort(key=lambda x: x.prob(), reverse=True) for i, label in enumerate(final_labels): t_ptsh = getRectPts(0, 0, out_size[0], out_size[1]) ptsh = np.concatenate((label.pts * getWH(Iorig.shape).reshape( (2, 1)), np.ones((1, 4)))) H = find_T_matrix(ptsh, t_ptsh) Ilp = cv2.warpPerspective(Iorig, H, out_size, borderValue=.0) TLps.append(Ilp) return final_labels, TLps
def getWarp(img, biggest, widthImg, heightImg): biggest = reorder(biggest) pts1 = np.float32(biggest) pts2 = np.float32([[0,0], [widthImg, 0], [0, heightImg], [widthImg, heightImg]]) matrix = cv2.getPerspectiveTransform(pts1, pts2) imgOutput = cv2.warpPerspective(img, matrix, (widthImg, heightImg)) imgCropped = imgOutput[10:imgOutput.shape[0]-10, 10:imgOutput.shape[1]-10] imgCropped = cv2.resize(imgCropped, (widthImg, heightImg)) return imgCropped
def warpImg (img,points,w,h,pad=15): # print(points) points =reorder(points) pts1 = np.float32(points) pts2 = np.float32([[0,0],[w,0],[0,h],[w,h]]) matrix = cv2.getPerspectiveTransform(pts1,pts2) imgWarp = cv2.warpPerspective(img,matrix,(w,h)) imgWarp = imgWarp[pad:imgWarp.shape[0]-pad,pad:imgWarp.shape[1]-pad] return imgWarp
def _apply_func_perspective(image): """ Apply a perspective to an image """ rgb_image = image.convert('RGBA') img_arr = np.array(rgb_image) a = img_arr w, h = a.shape[0], a.shape[1] if h // w > 3: img = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGBA2BGRA) img = cv2.copyMakeBorder(img, 20, 20, 0, 0, cv2.BORDER_CONSTANT, value=[255, 255, 255]) #img = cv2.imread(img) img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGRA2RGBA)) img = img.resize((48, 48), Image.ANTIALIAS) return img ''' Set random vertex to target quadrilateral ''' random_flag = random.uniform(0, 2) if random_flag > 1: vertex1 = [0, 0] vertex4 = [random.uniform(1.0000, 1.1618) * (w - 1), 0] lens = vertex4[0] - vertex1[0] vertex2 = [random.uniform(0.1, 0.1618) * (w - 1), h - 1] vertex3 = [vertex2[0] + lens * random.uniform(0.932, 1), h - 1] else: vertex4 = [(w - 1) * random.uniform(1.0000, 1.1618), 0] vertex1 = [random.uniform(0.1000, 0.2618) * (w - 1), 0] lens = vertex4[0] - vertex1[0] vertex2 = [random.uniform(0.0000, 0.0618) * (w - 1), h - 1] vertex3 = [vertex2[0] + lens * random.uniform(0.932, 1), h - 1] pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]) pts1 = np.float32([vertex1, vertex2, vertex3, vertex4]) ''' get 3*3 transform martix M ''' M = cv2.getPerspectiveTransform(pts, pts1) dsize = get_perspective_offset(M, w, h) dst = cv2.warpPerspective(a, M, dsize) img_arr = np.array(dst) img = Image.fromarray(np.uint8(img_arr)) img = img.resize((48, 48), Image.ANTIALIAS) return img
def sudoku_main(image): ## find sudoku contour sudoku_contour = find_sudoku(image) if sudoku_contour is None: return image ## get corners of the contour corners = get_corners(sudoku_contour) if corners is None: return image corners = sort_corners(corners) ## get top view of the board in square shape top_view, transformation_matrix, original_shape = get_top_view( image, corners) if top_view is None: return image ## OCR grid = read_grid(top_view) if grid is None: return image print(grid) # test sudoku test = "740030010019068502000004300056370001001800095090020600103407200500200008080001470" if grid == test: print("true") # solvong the sudoku solved = solve(test) # write the solution over the top view empty_boxes = [[0 for j in range(9)] for i in range(9)] k = 0 for i in range(9): for j in range(9): if grid[k] == '0': empty_boxes[i][j] = 1 k = k + 1 written = write_solution(top_view, empty_boxes, solved) # covert the top view to original size resized = cv2.resize(top_view, (original_shape[1], original_shape[0]), interpolation=cv2.INTER_CUBIC) # reverse perspective transform warped = cv2.warpPerspective(resized, transformation_matrix, (image.shape[1], image.shape[0]), flags=cv2.WARP_INVERSE_MAP) # overlay on the original image result = np.where(warped.sum(axis=-1, keepdims=True) != 0, warped, image) return result
def wrap_perspective(path): img = cv2.imread(path) w, h = 250, 350 pts1 = np.float32([[111, 219], [287, 188], [154, 482], [352, 440]]) pts2 = np.float32([[0, 0], [w, 0], [0, h], [w, h]]) matrix = cv2.getPerspectiveTransform(pts1, pts2) imgOutput = cv2.warpPerspective(img, matrix, (w, h)) cv2.imshow('Images', img) cv2.imshow('Output', imgOutput) cv2.waitKey(0)
def warp2Images(dst, src, H, t): [xmin, xmax, ymin, ymax] = outputLimits(H, dst.shape[:2], src.shape[:2]) sizeCheck(xmin, ymin, xmax, ymax) Ht = np.array([[1, 0, t[1]], [0, 1, t[0]], [0, 0, 1]]) src_warped = cv2.warpPerspective(src, Ht.dot(H), (dst.shape[1], dst.shape[0]), cv2.BORDER_TRANSPARENT) src_warped[src_warped == 0] = dst[src_warped == 0] return src_warped
def orb_stitcher(imgs): # find the keypoints with ORB orb1 = cv2.ORB_create(1000, 1.1, 13) bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False) kp_master, des_master = orb1.detectAndCompute(imgs[0], None) kp_secondary, des_secondary = orb1.detectAndCompute(imgs[1], None) matches = bf.match(des_secondary, des_master) # Sort them in the order of their distance. matches = sorted(matches, key=lambda x: x.distance) selected = [] for m in matches: if m.distance < 40: selected.append(m) out_img = cv2.drawMatches(imgs[1], kp_secondary, imgs[0], kp_master, selected, None) cv2.namedWindow('www', cv2.WINDOW_NORMAL) cv2.imshow('www', out_img) # cv2.imwrite('matches.jpg',out_img) cv2.waitKey(0) warped = None if len(selected) > 10: dst_pts = np.float32([kp_master[m.trainIdx].pt for m in selected]).reshape(-1, 1, 2) src_pts = np.float32([kp_secondary[m.queryIdx].pt for m in selected]).reshape(-1, 1, 2) M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0) h, w = imgs[0].shape[0:2] pts = np.float32([[0, 0], [w, 0], [w, h], [0, h], [0, 0]]).reshape(-1, 1, 2) dst = cv2.perspectiveTransform(pts, M) max_extent = np.max(dst, axis=0)[0].astype(np.int)[::-1] sz_out = (max(max_extent[1], imgs[0].shape[1]), max(max_extent[0], imgs[0].shape[0])) # img2 = cv2.polylines(imgs[0], [np.int32(dst)], True, [0,255,0], 3, cv2.LINE_AA) cv2.namedWindow('w', cv2.WINDOW_NORMAL) warped = cv2.warpPerspective(imgs[1], M, dsize=sz_out) img_for_show = warped.copy() img_for_show[0:imgs[0].shape[0], 0:imgs[0].shape[1], 1] = imgs[0][:, :, 1] cv2.imshow('w', img_for_show) cv2.waitKey(0) return warped
def projective_transformation(img, vp): rows, cols = img.shape[:2] src_points = np.float32([[0, 0], [cols - 1, 0], [0, rows - 1], [cols - 1, rows - 1]]) # dst_points = np.float32([[int(0.33*cols),int(rows/2)], [int(0.33*cols) + 100,int(rows/2)], [0,rows-1], [cols-1,rows-1]]) dst_points = np.float32([[vp[0] - 100, vp[1]], [vp[0] + 100, vp[1]], [-cols, 2 * rows], [2 * cols, 2 * rows]]) projective_matrix = cv2.getPerspectiveTransform(src_points, dst_points) img_output = cv2.warpPerspective(img, projective_matrix, (cols, rows)) return img_output