def square(self, image): """ SQURE FUNCTION """ height = image.shape[0] width = image.shape[1] white = [255, 255, 255] if height != width: if height > width: dif = height - width squared = cv2.copyMakeBorder(image, 0, 0, int(dif / 2), int(dif / 2), cv2.BORDER_CONSTANT, value=white) # print(squared.shape) return squared else: dif = width - height squared = cv2.copyMakeBorder(image, int(dif / 2), int(dif / 2), 0, 0, cv2.BORDER_CONSTANT, value=white) # print(squared.shape) return squared else: return image
def resize_frame(img, frame_height, frame_width): prop_size = "" # proper size, indicates whether to keep the measurement of fixed height or width to recalculate the other height, width = img.shape[:2] if height > width: new_height = frame_height new_width = width * frame_height / height prop_size = "h" if new_width > frame_width: height = new_height width = new_width new_width = frame_width new_height = height * frame_width / width prop_size = "w" elif width > height: new_width = frame_width new_height = height * frame_width / width prop_size = "w" if new_height > frame_height: height = new_height width = new_width new_height = frame_height new_width = width * frame_height / height prop_size = "h" img_r = cv2.resize(img, (int(new_width), int(new_height)), interpolation=cv2.INTER_AREA) if prop_size == "h": dim_bordo = frame_width - img_r.shape[1] frame = cv2.copyMakeBorder(img_r, top=0, bottom=0, left=int(dim_bordo / 2), right=int(dim_bordo / 2), borderType=cv2.BORDER_CONSTANT, value=[0, 0, 0]) elif prop_size == "w": dim_bordo = frame_height - img_r.shape[0] frame = cv2.copyMakeBorder(img_r, top=int(dim_bordo / 2), bottom=int(dim_bordo / 2), left=0, right=0, borderType=cv2.BORDER_CONSTANT, value=[0, 0, 0]) # in case the frame with border have sizes different from those desired caused by roundings to int (some pixels), is resized to the exact size if frame.shape[0] != frame_height or frame.shape[1] != frame_width: frame = cv2.resize(frame, (frame_width, frame_height), interpolation=cv2.INTER_AREA) return frame
def ridimensiona(img, frame_height, frame_width): dim_esatta = "" # "a" se alt=frame_height "l" altrimenti alt, larg = img.shape[:2] if alt > larg: nuova_alt = frame_height nuova_larg = larg * frame_height / alt dim_esatta = "a" if nuova_larg > frame_width: alt = nuova_alt larg = nuova_larg nuova_larg = frame_width nuova_alt = alt * frame_width / larg dim_esatta = "l" elif larg > alt: nuova_larg = frame_width nuova_alt = alt * frame_width / larg dim_esatta = "l" if nuova_alt > frame_height: alt = nuova_alt larg = nuova_larg nuova_alt = frame_height nuova_larg = larg * frame_height / alt dim_esatta = "a" img_r = cv2.resize(img, (int(nuova_larg), int(nuova_alt)), interpolation=cv2.INTER_AREA) if dim_esatta == "a": dim_bordo = frame_width - img_r.shape[1] frame = cv2.copyMakeBorder(img_r, top=0, bottom=0, left=int(dim_bordo / 2), right=int(dim_bordo / 2), borderType=cv2.BORDER_CONSTANT, value=[0, 0, 0]) elif dim_esatta == "l": dim_bordo = frame_height - img_r.shape[0] frame = cv2.copyMakeBorder(img_r, top=int(dim_bordo / 2), bottom=int(dim_bordo / 2), left=0, right=0, borderType=cv2.BORDER_CONSTANT, value=[0, 0, 0]) # Nel caso in cui il frame con il bordo ha dimensioni diverse dovute ad arrotondamenti ad int, viene ridimensionato con le dimensioni esatte if frame.shape[0] != frame_height or frame.shape[1] != frame_width: frame = cv2.resize(frame, (frame_width, frame_height), interpolation=cv2.INTER_AREA) return frame
def resize_image(image, height=IMAGE_SIZE, width=IMAGE_SIZE): top, bottom, left, right = (0, 0, 0, 0) #獲取影象尺寸 h, w, _ = image.shape #對於長寬不相等的圖片,找到最長的一邊 longest_edge = max(h, w) #計算短邊需要增加多上畫素寬度使其與長邊等長 if h < longest_edge: dh = longest_edge - h top = dh // 2 bottom = dh - top elif w < longest_edge: dw = longest_edge - w left = dw // 2 right = dw - left else: pass #RGB顏色 BLACK = [0, 0, 0] #給影象增加邊界,是圖片長、寬等長,cv2.BORDER_CONSTANT指定邊界顏色由value指定 constant = cv2.copyMakeBorder(image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=BLACK) #調整影象大小並返回 return cv2.resize(constant, (height, width))
def resize_keep_aspectratio(image_src, dst_size): src_h, src_w = image_src.shape[:2] #print(src_h,src_w) dst_h, dst_w = dst_size #判断应该按哪个边做等比缩放 h = dst_w * (float(src_h) / src_w) #按照w做等比缩放 w = dst_h * (float(src_w) / src_h) #按照h做等比缩放 h = int(h) w = int(w) if h <= dst_h: image_dst = cv2.resize(image_src, (dst_w, int(h))) else: image_dst = cv2.resize(image_src, (int(w), dst_h)) h_, w_ = image_dst.shape[:2] #print(h_,w_) top = int(((dst_h - h_) / 2) + 10) down = int(((dst_h - h_ + 1) / 2) + 10) left = int(((dst_w - w_) / 2) + 10) right = int(((dst_w - w_ + 1) / 2) + 10) value = [255, 255, 255] borderType = cv2.BORDER_CONSTANT #print(top, down, left, right) image_dst = cv2.copyMakeBorder(image_dst, top, down, left, right, borderType, None, value) return image_dst
def createPano(hMat, imOrder, imgRGB): limits = np.zeros((len(images), 4)) for i in range(0, len(images) - 1): limits[i] = outputLimits(hMat[i + 1], imgRGB[imOrder[i]].shape[:2], imgRGB[imOrder[i + 1]].shape[:2]) xmin = round(np.min(limits[:, 0])) xmax = round(np.max(limits[:, 1])) ymin = round(np.min(limits[:, 2])) ymax = round(np.max(limits[:, 3])) pano = imgRGB[imOrder[0]] pad_widths = [ -ymin, max(ymax, pano.shape[0]) - pano.shape[0], -xmin, max(xmax, pano.shape[1]) - pano.shape[1] ] pano_pad = cv2.copyMakeBorder(pano, *pad_widths, cv2.BORDER_CONSTANT) t = [-ymin, -xmin] for i in range(1, len(hMat)): pano_pad = warp2Images(pano_pad, imgRGB[imOrder[i]], hMat[i], t) cv2.imshow(' ', pano_pad) cv2.waitKey(0) cv2.destroyAllWindows() return pano_pad
def resize_and_pad(image, size_w, size_h, pad_value=114): image_h, image_w = image.shape[:2] is_based_w = float(size_h) >= (image_h * size_w / float(image_w)) if is_based_w: target_w = size_w target_h = int(np.round(image_h * size_w / float(image_w))) else: target_w = int(np.round(image_w * size_h / float(image_h))) target_h = size_h image = cv.resize(image, (target_w, target_h), 0, 0, interpolation=cv.INTER_NEAREST) #image = cv.resize(image, (target_w, target_h), 0, 0, interpolation=cv.INTER_LINEAR) top = int(max(0, np.round((size_h - target_h) / 2))) left = int(max(0, np.round((size_w - target_w) / 2))) bottom = size_h - top - target_h right = size_w - left - target_w image = cv.copyMakeBorder(image, top, bottom, left, right, cv.BORDER_CONSTANT, value=[pad_value, pad_value, pad_value]) return image
def letterbox_image(image, desired_size): """resize image with unchanged aspect ratio using padding (width, height)""" ih, iw = image.shape[:2] w, h = desired_size scale = min(w / iw, h / ih) nw = int(iw * scale) nh = int(ih * scale) resized_image = cv2.resize(image, (nw, nh)) delta_w = w - nw delta_h = h - nh top, bottom = delta_h // 2, delta_h - (delta_h // 2) left, right = delta_w // 2, delta_w - (delta_w // 2) color = [128, 128, 128] new_image = cv2.copyMakeBorder(resized_image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) return new_image
def scale_and_centre(img, size, margin=20, background=0): """Scales and centres an image onto a new background square.""" h, w = img.shape[:2] def centre_pad(length): """Handles centering for a given length that may be odd or even.""" if length % 2 == 0: side1 = int((size - length) / 2) side2 = side1 else: side1 = int((size - length) / 2) side2 = side1 + 1 return side1, side2 def scale(r, x): return int(r * x) if h > w: t_pad = int(margin / 2) b_pad = t_pad ratio = (size - margin) / h w, h = scale(ratio, w), scale(ratio, h) l_pad, r_pad = centre_pad(w) else: l_pad = int(margin / 2) r_pad = l_pad ratio = (size - margin) / w w, h = scale(ratio, w), scale(ratio, h) t_pad, b_pad = centre_pad(h) img = cv2.resize(img, (w, h)) img = cv2.copyMakeBorder(img, t_pad, b_pad, l_pad, r_pad, cv2.BORDER_CONSTANT, None, background) return cv2.resize(img, (size, size))
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True): # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232 shape = img.shape[:2] # current shape [height, width] if isinstance(new_shape, int): new_shape = (new_shape, new_shape) # Scale ratio (new / old) r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) if not scaleup: # only scale down, do not scale up (for better test mAP) r = min(r, 1.0) # Compute padding ratio = r, r # width, height ratios new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding if auto: # minimum rectangle dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding elif scaleFill: # stretch dw, dh = 0.0, 0.0 new_unpad = (new_shape[1], new_shape[0]) ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios dw /= 2 # divide padding into 2 sides dh /= 2 if shape[::-1] != new_unpad: # resize img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border return img, ratio, (dw, dh)
def showIfWindows(model, images, orig_labels): if platform.system() == "Windows": # 3 images shown randomly randomIndices = np.random.choice(len(y_train), 5) for i in randomIndices: rand_im = images[i] rand_im_lbl_orig = orig_labels[i] rand_im_lbl_pred = model.predict(np.array([rand_im * 255])) rand_im = cv2.copyMakeBorder(rand_im, 200, 200, 200, 200, cv2.BORDER_CONSTANT, 0) # print(f"Y: {rand_im_lbl_orig}, Y_pred: {rand_im_lbl_pred}") cv2.putText( rand_im, f"Y: {rand_im_lbl_orig}, Y_pred: {[np.where(r==1)[0][0] for r in rand_im_lbl_pred]}", (5, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA, ) cv2.imshow("Sample", rand_im) cv2.waitKey(0) cv2.destroyAllWindows() else: printInColor("Showing Images only works on OS with GUI")
def resize_to_fit(img: object, new_size: int) -> object: """Resizes the image to new squared dimension (dim: new_size) keeping the proportions and padding with black pixels""" # shape retorna (h, w) ratio = new_size / (max(img.shape[:2])) new_shape = tuple([int(ratio * size) for size in img.shape[:2]]) # Realizando o resize na proporção calculada. Resize usa (w,h) new_img = cv2.resize(img, (new_shape[1], new_shape[0])) # Ajustando o pad para cada uma das dimensões (w e h) total_padw = new_size - new_img.shape[1] pad_right = total_padw // 2 pad_left = total_padw - pad_right total_padh = new_size - new_img.shape[0] pad_top = total_padh // 2 pad_bottom = total_padh - pad_top new_img = cv2.copyMakeBorder( new_img, pad_top, pad_bottom, pad_left, pad_right, cv2.BORDER_CONSTANT ) return new_img
def render(self): # 拉伸图片到窗口宽度,同时保持宽高比 # 注意:处理之前的高、宽对应原图像素,if-else语句之后对应屏幕像素 height, width = self.display_area.height, self.display_area.width if height / width > self.win_h / self.win_w: height, width = self.win_h, int(self.win_h * width / height) else: height, width = int(self.win_w * height / width), self.win_w # 显示区域 display = np.array(self.img) scaled = cv2.resize(display[self.display_area], (width, height), interpolation=cv2.INTER_NEAREST) # 用边框将图片充满窗口 top = (self.win_h - height) // 2 bot = self.win_h - height - top left = (self.win_w - width) // 2 right = self.win_w - width - left self.display = cv2.copyMakeBorder(scaled, top, bot, left, right, cv2.BORDER_CONSTANT, (0, 0, 0)) # 为了将鼠标点击投影回原图位置的变量 self.scaled_size = Area.AreaTuple(height, width) self.top_border = top self.left_border = left
def Prewitt(originImg, rows, columns, threshold, mask_1, mask_2): image = np.zeros(originImg.shape, np.uint8) imageBordered = cv2.copyMakeBorder(originImg, 1, 1, 1, 1, cv2.BORDER_REFLECT) maskRows = len(mask_1) maskColumns = len(mask_1[0]) for r in range(rows): for c in range(columns): p1, p2 = 0, 0 for maskR in range(maskRows): for maskC in range(maskColumns): p1 += int(imageBordered[r + maskR - 1, c + maskC - 1]) * mask_1[maskR][maskC] p2 += int(imageBordered[r + maskR - 1, c + maskC - 1]) * mask_2[maskR][maskC] gradient = math.sqrt(p1**2 + p2**2) if gradient >= threshold: image[r, c] = 0 else: image[r, c] = 255 cv2.imshow("Prewitt", image) cv2.waitKey(0) cv2.destroyAllWindows() return image
def img_stitch(): # Used for image stitching: grab the paths to the input images and initialize our images list imgPaths = sorted(list(paths.list_images(this_path + "imgToStitch"))) imgs = [] # loop over the image paths, load each one, and add them to our # images to stitch list for imgPath in imgPaths: img = cv2.imread(imgPath) imgs.append(img) # initialize OpenCV's image stitcher object and then perform the image # stitching stitcher = cv2.createStitcher() if imutils.is_cv3() else cv2.Stitcher_create() (status, stitched) = stitcher.stitch(imgs) # if the status is '0', then OpenCV successfully performed image # stitching if status == 0: # create a 10 pixel border surrounding the stitched image stitched = cv2.copyMakeBorder(stitched, 10, 10, 10, 10, cv2.BORDER_CONSTANT, (0, 0, 0)) # get number of images already in the ./stitchUnlabel directory imgCount = len([name for name in os.listdir(this_path + 'stitchUnlabel') if os.path.isfile(os.path.join(this_path + 'stitchUnlabel', name))]) - 1 # write the output stitched image to disk (adding 1 to imgCount bc first image is gsv1 not gsv0) cv2.imwrite(this_path + 'stitchUnlabel' + '/gsv'+ str(imgCount+1) + '.jpg', stitched) # otherwise the stitching failed, likely due to not enough keypoints) # being detected else: print("[INFO] image stitching failed ({})".format(status))
def compass(originImg, rows, columns, threshold, mask): image = np.zeros(originImg.shape, np.uint8) borderImg = cv2.copyMakeBorder(originImg, 1, 1, 1, 1, cv2.BORDER_REFLECT) for r in range(rows): for c in range(columns): imageList = [ borderImg[r - 1, c - 1], borderImg[r - 1, c], borderImg[r - 1, c + 1], borderImg[r, c + 1], borderImg[r + 1, c + 1], borderImg[r + 1, c], borderImg[r + 1, c - 1], borderImg[r, c - 1] ] maxK = 0 for offset in range(8): tempSum = 0 for num in range(8): tempSum += imageList[num] * mask[(num + offset) % 8] if tempSum > maxK: maxK = tempSum if maxK >= threshold: image[r, c] = 0 else: image[r, c] = 255 cv2.imshow("Compass", image) cv2.waitKey(0) cv2.destroyAllWindows() return image
def extract_section_coordinates_from_image(self, image, threshold_breakpoint): # Takes an raw image with a white bright background and looks for the contour of # a rectangular object and returns the coordinates representing a polygon shape of that object. # A threshold needs to be provided representing an acceptable breaking point at which the coordinates will be returned # The width of the border to wrap the image in case the object overflows the image BORDER_WIDTH = 100 image = cv2.copyMakeBorder(image, BORDER_WIDTH, BORDER_WIDTH, BORDER_WIDTH, BORDER_WIDTH, cv2.BORDER_CONSTANT, value=[255, 255, 255]) image_grayscale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # Calculate the image area image_height, image_width = image.shape[:2] image_area = image_height * image_width # Repeat to find the right threshold value for finding a rectangle found = False # Increment the threshold until a contour is found threshold_current = threshold_breakpoint while found is False: if threshold_breakpoint < 200: threshold_breakpoint = threshold_current + 5 threshold_current = threshold_breakpoint # Extract contours using threshold _, threshold = cv2.threshold(image_grayscale, threshold_breakpoint, 255, cv2.THRESH_BINARY) contours, _ = cv2.findContours(threshold, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) # Go through each contour that could be extracted from the image for contour in contours: contour_area = cv2.contourArea(contour) if contour_area > (image_area / 6) and contour_area < (image_area / 1.01): epsilon = 0.1 * cv2.arcLength(contour, True) # Close open lines into a complete wrapped shade approx = cv2.approxPolyDP(contour, epsilon, True) # When the shape can be wrapped the contour rectangle has been found if len(approx) == 4: found = True # Otherwise keep decrementing the threshold value until it's found else: threshold_breakpoint = threshold_breakpoint - 1 break # Set and return coordinates from approximate coordinates = numpy.empty((4, 2), dtype="float32") # Top-left coordinates[0] = approx[0] - BORDER_WIDTH # Top-right coordinates[1] = approx[1] - BORDER_WIDTH # Bottom-right coordinates[2] = approx[2] - BORDER_WIDTH # Bottom-left coordinates[3] = approx[3] - BORDER_WIDTH return coordinates
def pad_image(image): (h, w) = image.shape[:2] padW = int((140 - w) / 2.0) padH = int((140 - h) / 2.0) image = cv2.copyMakeBorder(image, padH, padH, padW, padW, cv2.BORDER_CONSTANT) image = cv2.resize(image, (140, 140)) return image
def add_borders(img): borderType = cv.BORDER_CONSTANT dst = cv.copyMakeBorder(img, BBOX_WIDTH, BBOX_WIDTH, BBOX_WIDTH, BBOX_WIDTH, borderType, value=BBOX_COLOR) return dst
def draw_box_with_text(img, text=None, edge_color=(255, 255, 255), border=2, mode=0): """ draws box around """ # width, height = img.shape[1::-1] # scale = max(width, height) / 400 font_scale, font_thickness = .8, 2 font_color = (0, 0, 0) if mode == 0: # standard mode img = cv2.copyMakeBorder(img, 10 * border, border, border, border, cv2.BORDER_CONSTANT, value=edge_color) elif mode == 1: # low vision img = cv2.copyMakeBorder(img, 10 * border, border, border, border, cv2.BORDER_CONSTANT, value=edge_color) font_scale, font_thickness = 1.6, 2 if text is not None: x = y = border img = cv2.putText(img, text, (x + 2, y + 15), cv2.FONT_HERSHEY_SIMPLEX, font_scale, font_color, font_thickness, lineType=cv2.LINE_AA) return img
def _apply_func_perspective(image): """ Apply a perspective to an image """ rgb_image = image.convert('RGBA') img_arr = np.array(rgb_image) a = img_arr w, h = a.shape[0], a.shape[1] if h // w > 3: img = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGBA2BGRA) img = cv2.copyMakeBorder(img, 20, 20, 0, 0, cv2.BORDER_CONSTANT, value=[255, 255, 255]) #img = cv2.imread(img) img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGRA2RGBA)) img = img.resize((48, 48), Image.ANTIALIAS) return img ''' Set random vertex to target quadrilateral ''' random_flag = random.uniform(0, 2) if random_flag > 1: vertex1 = [0, 0] vertex4 = [random.uniform(1.0000, 1.1618) * (w - 1), 0] lens = vertex4[0] - vertex1[0] vertex2 = [random.uniform(0.1, 0.1618) * (w - 1), h - 1] vertex3 = [vertex2[0] + lens * random.uniform(0.932, 1), h - 1] else: vertex4 = [(w - 1) * random.uniform(1.0000, 1.1618), 0] vertex1 = [random.uniform(0.1000, 0.2618) * (w - 1), 0] lens = vertex4[0] - vertex1[0] vertex2 = [random.uniform(0.0000, 0.0618) * (w - 1), h - 1] vertex3 = [vertex2[0] + lens * random.uniform(0.932, 1), h - 1] pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]) pts1 = np.float32([vertex1, vertex2, vertex3, vertex4]) ''' get 3*3 transform martix M ''' M = cv2.getPerspectiveTransform(pts, pts1) dsize = get_perspective_offset(M, w, h) dst = cv2.warpPerspective(a, M, dsize) img_arr = np.array(dst) img = Image.fromarray(np.uint8(img_arr)) img = img.resize((48, 48), Image.ANTIALIAS) return img
def __init__(self, global_grid, agent_handler): self._no_of_agents = Config.NO_OF_AGENTS self._grid_len = Config.GRID_LEN self._grid_width = Config.GRID_WIDTH self._sensor_range = Config.SENSOR_RANGE self._agent_color_list = agent_handler.get_all_agent_color_list() self._global_grid = copy.copy(global_grid) self._mapped_grid = 224 * np.ones( shape=[self._grid_len, self._grid_width, 3], dtype=np.uint8) self._mapped_grid = cv2.copyMakeBorder(self._mapped_grid, 10, 10, 10, 10, cv2.BORDER_CONSTANT)
def get_highlighted_image(self, target_width: int = None) -> np.ndarray: return cv2.copyMakeBorder( (self.get_resized_image(target_width) if target_width is not None else self.get_data())[self.BORDER_WIDTH:-self.BORDER_WIDTH, self.BORDER_WIDTH:-self.BORDER_WIDTH, ], top=self.BORDER_WIDTH, bottom=self.BORDER_WIDTH, left=self.BORDER_WIDTH, right=self.BORDER_WIDTH, borderType=cv2.BORDER_ISOLATED, value=self.BORDER_COLOR, )
def draw_class(self, image, class_name): height, width = image.shape[:2] bordered = cv2.copyMakeBorder(image, top=0, bottom=50, left=0, right=0, borderType=cv2.BORDER_CONSTANT, value=[245, 222, 179]) cv2.putText(bordered, class_name, (2, height + 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) return bordered
def readData(path, h,w,imgs,labs): for filename in os.listdir(path): if filename.endswith('.jpg'): filename = path + '/' + filename img = cv2.imread(filename) # cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) top,bottom,left,right = getPaddingSize(img) # 将图片放大, 扩充图片边缘部分 img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0,0,0]) img = cv2.resize(img, (h, w)) imgs.append(img) labs.append(path) return imgs,labs
def morphEx(img, morph_type, ksize): bsize = ksize // 2 bordered = cv2.copyMakeBorder(img, bsize, bsize, bsize, bsize, borderType=cv2.BORDER_CONSTANT, value=0) kernel = np.ones((ksize, ) * 2, np.uint8) filtered = cv2.morphologyEx(bordered, morph_type, kernel, borderValue=0) result = filtered[bsize:-bsize, bsize:-bsize] assert result.shape == img.shape, (result.shape, img.shape) return result
def filter_image(im, filter): im_f = np.zeros((np.size(im, 1), np.size(im, 0))) #padding im_temp = cv2.copyMakeBorder(im, top=1, bottom=1, left=1, right=1, borderType=cv2.BORDER_REPLICATE) # for i in range(0, np.size(im, 0) - 3): for j in range(0, np.size(im, 1) - 3): #x-direction #w=im_temp[j:j+3,i:i+3] #o=np.multiply(im_temp[j:3,i:3],filter) im_f[j, i] = np.sum(np.multiply(im_temp[j:j + 3, i:i + 3], filter)) return im_f
def NevatiaBabu(originImg, rows, columns, threshold, mask_1, mask_2): image = np.zeros(originImg.shape, np.uint8) borderImg = cv2.copyMakeBorder(originImg, 2, 2, 2, 2, cv2.BORDER_REPLICATE) mask_3 = arrayClockwise(mask_1) mask_4 = arrayClockwise(mask_2) mask_6 = arrayFlip(mask_4) mask_5 = arrayClockwise(mask_6) for r in range(rows): for c in range(columns): maxK = 0 for num in range(6): tempsum = 0 for maskN in range(len(mask_1)): for maskC in range(len(mask_1[0])): if num == 0: tempsum += int(borderImg[r + maskN - 2, c + maskC - 2]) * mask_1[maskN, maskC] elif num == 1: tempsum += int(borderImg[r + maskN - 2, c + maskC - 2]) * mask_2[maskN, maskC] elif num == 2: tempsum += int(borderImg[r + maskN - 2, c + maskC - 2]) * mask_6[maskN, maskC] elif num == 3: tempsum += int(borderImg[r + maskN - 2, c + maskC - 2]) * mask_3[maskN, maskC] elif num == 4: tempsum += int(borderImg[r + maskN - 2, c + maskC - 2]) * mask_4[maskN, maskC] elif num == 5: tempsum += int(borderImg[r + maskN - 2, c + maskC - 2]) * mask_5[maskN, maskC] if tempsum > maxK: maxK = tempsum if maxK >= threshold: image[r, c] = 0 else: image[r, c] = 255 cv2.imshow("NevatiaBabu", image) cv2.waitKey(0) cv2.destroyAllWindows() return image
def resize_img(im): old_size = im.shape[:2] # old_size is in (height, width) format ratio = float(img_size) / max(old_size) new_size = tuple([int(x * ratio) for x in old_size]) # new_size should be in (width, height) format im = cv2.resize(im, (new_size[1], new_size[0])) delta_w = img_size - new_size[1] delta_h = img_size - new_size[0] top, bottom = delta_h // 2, delta_h - (delta_h // 2) left, right = delta_w // 2, delta_w - (delta_w // 2) new_im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0, 0, 0]) return new_im, ratio, top, left
def resize_image(image, size=config.image_size[0]): # 缩放比例 scale = 0.0 w_s = 0.0 # 宽度偏移 h_s = 0.0 # 高度偏移 # 获取图片尺寸 h, w, _ = image.shape if h > w: scale = h / size w_s = (size - (w / scale)) / 2 else: scale = w / size h_s = (size - (h / scale)) / 2 top, bottom, left, right = (0, 0, 0, 0) # 对于长宽不等的图片,找到最长的一边 longest_edge = max(h, w) # 计算短边需要增加多少像素宽度才能与长边等长(相当于padding,长边的padding为0,短边才会有padding) if h < longest_edge: dh = longest_edge - h top = dh // 2 bottom = dh - top elif w < longest_edge: dw = longest_edge - w left = dw // 2 right = dw - left else: pass # pass是空语句,是为了保持程序结构的完整性。pass不做任何事情,一般用做占位语句。 # RGB颜色 BLACK = [128, 128, 128] # 给图片增加padding,使图片长、宽相等 # top, bottom, left, right分别是各个边界的宽度,cv2.BORDER_CONSTANT是一种border type,表示用相同的颜色填充 constant = cv.copyMakeBorder(image, top, bottom, left, right, cv.BORDER_CONSTANT, value=BLACK) # 调整图像大小并返回图像,目的是减少计算量和内存占用,提升训练速度 return cv.resize(constant, (size, size)), scale, w_s, h_s