def surf_image_match(left_image, right_image): gray_left_image = cv.cvtColor(left_image, cv.COLOR_BGR2GRAY) gray_right_image = cv.cvtColor(right_image, cv.COLOR_BGR2GRAY) gpu_gray_left_image = cv.cuda_GpuMat(gray_left_image) gpu_gray_right_image = cv.cuda_GpuMat(gray_right_image) surf = cv.cuda.SURF_CUDA_create(4000) # gpu_left_key_points, gpu_left_descriptors = surf(gpu_gray_left_image, None) # gpu_right_key_points, gpu_right_descriptors = surf(gpu_gray_right_image, None) gpu_left_key_points, gpu_left_descriptors = surf.detectWithDescriptors( gpu_gray_left_image, None) gpu_right_key_points, gpu_right_descriptors = surf.detectWithDescriptors( gpu_gray_right_image, None) # left_key_points, left_descriptors = cv.cuda_SURF_CUDA.detectWithDescriptors(surf, gpu_gray_left_image, None) # right_key_points, right_descriptors = cv.cuda_SURF_CUDA.detectWithDescriptors(surf, gpu_gray_right_image, None) matcher = cv.cuda.DescriptorMatcher_createBFMatcher(cv.NORM_L2) matches = matcher.knnMatch(gpu_left_descriptors, gpu_right_descriptors, k=2) # DMatch List good_matches = [[m] for m, n in matches if m.distance < 0.7 * n.distance] # KeyPoints left_key_points = cv.cuda_SURF_CUDA.downloadKeypoints( surf, gpu_left_key_points) right_key_points = cv.cuda_SURF_CUDA.downloadKeypoints( surf, gpu_right_key_points) # return_image = cv.drawMatchesKnn(left_image, left_key_points, right_image, right_key_points, good_matches, None) return cv.drawMatchesKnn(left_image, left_key_points, right_image, right_key_points, good_matches, None, flags=2)
def test_convolution(self): npMat = (np.random.random((128, 128)) * 255).astype(np.float32) npDims = np.array(npMat.shape) kernel = (np.random.random((3, 3)) * 1).astype(np.float32) kernelDims = np.array(kernel.shape) iS = (kernelDims / 2).astype(int) iE = npDims - kernelDims + iS cuMat = cv.cuda_GpuMat(npMat) cuKernel = cv.cuda_GpuMat(kernel) cuMatDst = cv.cuda_GpuMat(tuple(npDims - kernelDims + 1), cuMat.type()) conv = cv.cuda.createConvolution() self.assertTrue( np.allclose( conv.convolve(cuMat, cuKernel, ccorr=True).download(), cv.filter2D(npMat, -1, kernel, anchor=(-1, -1))[iS[0]:iE[0] + 1, iS[1]:iE[1] + 1])) conv.convolve(cuMat, cuKernel, cuMatDst, True) self.assertTrue( np.allclose( cuMatDst.download(), cv.filter2D(npMat, -1, kernel, anchor=(-1, -1))[iS[0]:iE[0] + 1, iS[1]:iE[1] + 1]))
def __init__(self): self.is_pylon_data = False self.image_imput = ImageImput() self.camera_publish = rospy.Publisher("/camera_image", Image, queue_size=10) self.convert_x_publish = rospy.Publisher("/data_x", Float32, queue_size=1) self.convert_y_publish = rospy.Publisher("/data_y", Float32, queue_size=1) self.z_publish = rospy.Publisher("/data_z", Float32, queue_size=1) self.h_publish = rospy.Publisher("/data_h", Float32, queue_size=1) self.depth_publish = rospy.Publisher("/data_depth", Float32, queue_size=1) # 画像配列を生成 self.image = np.arange(27).reshape(3, 3, 3) # データを生成 self.z_datas = [0] * self.PYLON_NUMBER self.h_datas = [0] * self.PYLON_NUMBER self.convert_x_datas = [0] * self.PYLON_NUMBER #GPUメモリのアロケーション self.img_gpu_src = cv2.cuda_GpuMat() self.img_gpu_dst = cv2.cuda_GpuMat() #CUDA用ガウシアンフィルタ作成 self.gaussian_filter = cv2.cuda.createGaussianFilter( cv2.CV_8UC3, cv2.CV_8UC3, ksize=self.GAUSSIAN_KERNEL_SIZE, sigma1=0, sigma2=0)
def apply_gpu(img1, img2, bbox1, bbox2, kp_center1, kp_center2): """ Still in development """ cuMat1 = cv.cuda_GpuMat(img1) cuMat2 = cv.cuda_GpuMat(img2) c_surf = cv.cuda.SURF_CUDA_create(500) kp = c_surf.detect(cuMat1, None) kp = c_surf.downloadKeypoints(kp) kp = kp_filtersort_L2(kp, img1, bbox1, kp_center1) kp, des = c_surf.detectWithDescriptors(cuMat1, None,cv.cuda_GpuMat(kp)) kp2 = c_surf.detect(cuMat2, None) kp2 = c_surf.downloadKeypoints(kp2) kp2 = kp_filtersort_L2(kp2, img2, bbox2, kp_center2) kp2, des2 = c_surf.compute(img2, kp2) # Brute Force matcher with default params (L2_NORM) cbf = cv.cuda_DescriptorMatcher.createBFMatcher(cv.NORM_L1) cmatches = cbf.match(des, des2) # Sort matches by score cmatches.sort(key=lambda x: x.distance, reverse=False) # Remove not so good matches numGoodMatches = int(len(cmatches) * 0.15) cmatches = cmatches[:numGoodMatches] return (kp, des), (kp2, des2), cmatches
def run(use_random=True, visualize=False): cuda_stream = cv2.cuda_Stream() backSub = cv2.cuda.createBackgroundSubtractorMOG2(history=100, varThreshold=16, detectShadows=False) # backSub = cv2.createBackgroundSubtractorKNN(history=30, dist2Threshold=400.0, detectShadows=True) warped_list = np.load("X:/frank.npy", mmap_mode='r') N = 1000 bg_list = [] for frame_counter in range(N): if use_random: index = random.randint(0, len(warped_list) - 1) else: index = frame_counter % len(warped_list) warped = warped_list[index] warped_gpu = cv2.cuda_GpuMat() warped_gpu.upload(warped) fgMask = backSub.apply(warped_gpu, -1, cuda_stream) fgMask = fgMask.download() fgMask = cv2.morphologyEx(fgMask, cv2.MORPH_CLOSE, kernel=np.ones((5, 5), np.uint8)) if visualize: cv2.imshow('FG Mask', fgMask) bg = cv2.cuda_GpuMat(warped_gpu.size(), warped_gpu.type()) backSub.getBackgroundImage(cuda_stream, bg) bg = bg.download() if visualize: cv2.imshow('BG', bg) key = cv2.waitKey(1) if key == 27: break cv2.imwrite("X:/final.png", bg) if visualize: cv2.waitKey(0) cv2.destroyAllWindows()
def test_surf(self): img_path = os.environ['OPENCV_TEST_DATA_PATH'] + "/gpu/features2d/aloe.png" hessianThreshold = 100 nOctaves = 3 nOctaveLayers = 2 extended = False keypointsRatio = 0.05 upright = False npMat = cv.cvtColor(cv.imread(img_path),cv.COLOR_BGR2GRAY) cuMat = cv.cuda_GpuMat(npMat) try: cuSurf = cv.cuda_SURF_CUDA.create(hessianThreshold,nOctaves,nOctaveLayers,extended,keypointsRatio,upright) surf = cv.xfeatures2d_SURF.create(hessianThreshold,nOctaves,nOctaveLayers,extended,upright) except cv.error as e: self.assertEqual(e.code, cv.Error.StsNotImplemented) self.skipTest("OPENCV_ENABLE_NONFREE is not enabled in this build.") cuKeypoints = cuSurf.detect(cuMat,cv.cuda_GpuMat()) keypointsHost = cuSurf.downloadKeypoints(cuKeypoints) keypoints = surf.detect(npMat) self.assertTrue(len(keypointsHost) == len(keypoints)) cuKeypoints, cuDescriptors = cuSurf.detectWithDescriptors(cuMat,cv.cuda_GpuMat(),cuKeypoints,useProvidedKeypoints=True) keypointsHost = cuSurf.downloadKeypoints(cuKeypoints) descriptorsHost = cuDescriptors.download() keypoints, descriptors = surf.compute(npMat,keypoints) self.assertTrue(len(keypointsHost) == len(keypoints) and descriptorsHost.shape == descriptors.shape)
def test_cudaarithm_logical(self): npMat1 = (np.random.random((128, 128)) * 255).astype(np.uint8) npMat2 = (np.random.random((128, 128)) * 255).astype(np.uint8) cuMat1 = cv.cuda_GpuMat() cuMat2 = cv.cuda_GpuMat() cuMat1.upload(npMat1) cuMat2.upload(npMat2) self.assertTrue(np.allclose(cv.cuda.bitwise_or(cuMat1, cuMat2).download(), cv.bitwise_or(npMat1, npMat2))) self.assertTrue(np.allclose(cv.cuda.bitwise_and(cuMat1, cuMat2).download(), cv.bitwise_and(npMat1, npMat2))) self.assertTrue(np.allclose(cv.cuda.bitwise_xor(cuMat1, cuMat2).download(), cv.bitwise_xor(npMat1, npMat2))) self.assertTrue(np.allclose(cv.cuda.bitwise_not(cuMat1).download(), cv.bitwise_not(npMat1))) self.assertTrue(np.allclose(cv.cuda.min(cuMat1, cuMat2).download(), cv.min(npMat1, npMat2))) self.assertTrue(np.allclose(cv.cuda.max(cuMat1, cuMat2).download(), cv.max(npMat1, npMat2)))
def test_cudafeatures2d(self): npMat1 = self.get_sample("samples/data/right01.jpg") npMat2 = self.get_sample("samples/data/right02.jpg") cuMat1 = cv.cuda_GpuMat() cuMat2 = cv.cuda_GpuMat() cuMat1.upload(npMat1) cuMat2.upload(npMat2) cuMat1 = cv.cuda.cvtColor(cuMat1, cv.COLOR_RGB2GRAY) cuMat2 = cv.cuda.cvtColor(cuMat2, cv.COLOR_RGB2GRAY) fast = cv.cuda_FastFeatureDetector.create() _kps = fast.detectAsync(cuMat1) orb = cv.cuda_ORB.create() _kps1, descs1 = orb.detectAndComputeAsync(cuMat1, None) _kps2, descs2 = orb.detectAndComputeAsync(cuMat2, None) self.assertTrue(len(orb.convert(_kps1)) == _kps1.size()[0]) self.assertTrue(len(orb.convert(_kps2)) == _kps2.size()[0]) bf = cv.cuda_DescriptorMatcher.createBFMatcher(cv.NORM_HAMMING) matches = bf.match(descs1, descs2) self.assertGreater(len(matches), 0) matches = bf.knnMatch(descs1, descs2, 2) self.assertGreater(len(matches), 0) matches = bf.radiusMatch(descs1, descs2, 0.1) self.assertGreater(len(matches), 0) self.assertTrue( True) #It is sufficient that no exceptions have been there
def test_cudaarithm(self): npMat = (np.random.random((128, 128, 3)) * 255).astype(np.uint8) cuMat = cv.cuda_GpuMat(npMat) cuMatDst = cv.cuda_GpuMat(cuMat.size(), cuMat.type()) cuMatB = cv.cuda_GpuMat(cuMat.size(), cv.CV_8UC1) cuMatG = cv.cuda_GpuMat(cuMat.size(), cv.CV_8UC1) cuMatR = cv.cuda_GpuMat(cuMat.size(), cv.CV_8UC1) self.assertTrue(np.allclose(cv.cuda.merge(cv.cuda.split(cuMat)), npMat)) cv.cuda.split(cuMat, [cuMatB, cuMatG, cuMatR]) cv.cuda.merge([cuMatB, cuMatG, cuMatR], cuMatDst) self.assertTrue(np.allclose(cuMatDst.download(), npMat)) shift = (np.random.random( (cuMat.channels(), )) * 8).astype(np.uint8).tolist() self.assertTrue( np.allclose( cv.cuda.rshift(cuMat, shift).download(), npMat >> shift)) cv.cuda.rshift(cuMat, shift, cuMatDst) self.assertTrue(np.allclose(cuMatDst.download(), npMat >> shift)) self.assertTrue( np.allclose( cv.cuda.lshift(cuMat, shift).download(), (npMat << shift).astype('uint8'))) cv.cuda.lshift(cuMat, shift, cuMatDst) self.assertTrue( np.allclose(cuMatDst.download(), (npMat << shift).astype('uint8')))
def test_cudafeatures2d(self): npMat1 = self.get_sample("samples/data/right01.jpg") npMat2 = self.get_sample("samples/data/right02.jpg") cuMat1 = cv.cuda_GpuMat() cuMat2 = cv.cuda_GpuMat() cuMat1.upload(npMat1) cuMat2.upload(npMat2) cuMat1 = cv.cuda.cvtColor(cuMat1, cv.COLOR_RGB2GRAY) cuMat2 = cv.cuda.cvtColor(cuMat2, cv.COLOR_RGB2GRAY) fast = cv.cuda_FastFeatureDetector.create() kps = fast.detectAsync(cuMat1) orb = cv.cuda_ORB.create() kps1, descs1 = orb.detectAndComputeAsync(cuMat1, None) kps2, descs2 = orb.detectAndComputeAsync(cuMat2, None) bf = cv.cuda_DescriptorMatcher.createBFMatcher(cv.NORM_HAMMING) matches = bf.match(descs1, descs2) self.assertGreater(len(matches), 0) matches = bf.knnMatch(descs1, descs2, 2) self.assertGreater(len(matches), 0) matches = bf.radiusMatch(descs1, descs2, 0.1) self.assertGreater(len(matches), 0) self.assertTrue(True) #It is sufficient that no exceptions have been there
def test_calc(self): frame1 = os.environ[ 'OPENCV_TEST_DATA_PATH'] + '/gpu/opticalflow/frame0.png' frame2 = os.environ[ 'OPENCV_TEST_DATA_PATH'] + '/gpu/opticalflow/frame1.png' npMat1 = cv.cvtColor(cv.imread(frame1), cv.COLOR_BGR2GRAY) npMat2 = cv.cvtColor(cv.imread(frame2), cv.COLOR_BGR2GRAY) cuMat1 = cv.cuda_GpuMat(npMat1) cuMat2 = cv.cuda_GpuMat(npMat2) try: nvof = cv.cuda_NvidiaOpticalFlow_1_0.create( cuMat1.shape[1], cuMat1.shape[0], 5, False, False, False, 0) flow = nvof.calc(cuMat1, cuMat2, None) self.assertTrue(flow.shape[1] > 0 and flow.shape[0] > 0) flowUpSampled = nvof.upSampler(flow[0], cuMat1.shape[1], cuMat1.shape[0], nvof.getGridSize(), None) nvof.collectGarbage() except cv.error as e: if e.code == cv.Error.StsBadFunc or e.code == cv.Error.StsBadArg or e.code == cv.Error.StsNullPtr: self.skipTest( "Algorithm is not supported in the current environment") self.assertTrue(flowUpSampled.shape[1] > 0 and flowUpSampled.shape[0] > 0)
def surf_image_stitch(left_frame, right_frame): surf = cv.cuda.SURF_CUDA_create(200) gray_left_frame = cv.cvtColor(left_frame, cv.COLOR_BGR2GRAY) gray_right_frame = cv.cvtColor(right_frame, cv.COLOR_BGR2GRAY) gpu_gray_left_frame = cv.cuda_GpuMat(gray_left_frame) gpu_gray_right_frame = cv.cuda_GpuMat(gray_right_frame) gpu_left_key_points, gpu_left_descriptors = surf.detectWithDescriptors( gpu_gray_left_frame, None) gpu_right_key_points, gpu_right_descriptors = surf.detectWithDescriptors( gpu_gray_right_frame, None) matcher = cv.cuda.DescriptorMatcher_createBFMatcher(cv.NORM_L2) matches = matcher.knnMatch(gpu_left_descriptors, gpu_right_descriptors, k=2) good_matches = [m for m, n in matches if m.distance < 0.5 * n.distance] left_key_points = cv.cuda_SURF_CUDA.downloadKeypoints( surf, gpu_left_key_points) right_key_points = cv.cuda_SURF_CUDA.downloadKeypoints( surf, gpu_right_key_points) left_points = np.array( [left_key_points[m.queryIdx].pt for m in good_matches]) right_points = np.array( [right_key_points[m.trainIdx].pt for m in good_matches]) h_matrix = cv.findHomography(left_points, right_points) left_height, left_width = left_frame.shape[:2] right_height, right_height = right_frame.shape[:2] transform_matrix = np.array([[1.0, 0, left_width], [0, 1.0, 0], [0, 0, 1.0]]) m_matrix = np.dot(transform_matrix, h_matrix[0]) corners = cv.warpPerspective(left_frame, m_matrix, (left_width * 2, left_height)) corners[0:left_height, left_width:left_width * 2] = right_frame return corners
def run(use_random=True, visualize=False): cuda_stream = cv2.cuda_Stream() backSub = cv2.cuda.createBackgroundSubtractorMOG2(history=100, varThreshold=16, detectShadows=False) # backSub = cv2.createBackgroundSubtractorKNN(history=30, dist2Threshold=400.0, detectShadows=True) warped_list = np.load("X:/warped.npy", mmap_mode='r') valid_mask_list = np.load("X:/mask.npy", mmap_mode='r') i = 0 frame_counter = 0 mode = True N = 1000 bg_list = [] for frame_counter in range(N): if use_random: index = random.randint(0, len(warped_list) - 1) else: index = frame_counter % len(warped_list) warped = warped_list[index] valid_mask = valid_mask_list[index] frame_counter += 1 if frame_counter > 500 and random.randint(0, N - frame_counter) < 200: warped_final = bg_list[random.randint(0, len(bg_list) - 1)] else: if frame_counter < 200: mean_canvas = np.ones(warped.shape, dtype="uint8") * 200 mean_canvas = cv2.bitwise_or(mean_canvas, mean_canvas, mask=cv2.bitwise_not(valid_mask)) else: mean_canvas = cv2.bitwise_or(bg, bg, mask=cv2.bitwise_not(valid_mask)) if frame_counter % 8 == 0: bg_list.append(bg) valid_mask = cv2.bitwise_or(warped, warped, mask=valid_mask) final = cv2.bitwise_or(mean_canvas, valid_mask) if visualize: cv2.imshow('Passed', final) final_gpu = cv2.cuda_GpuMat() final_gpu.upload(final) final = cv2.cuda_GpuMat(final) fgMask = backSub.apply(final, -1, cuda_stream) fgMask = fgMask.download() # fgMask = cv2.morphologyEx(fgMask, cv2.MORPH_CLOSE, kernel=np.ones((5,5),np.uint8)) # cv2.imshow('FG Mask', fgMask) bg = cv2.cuda_GpuMat(final_gpu.size(), final_gpu.type()) backSub.getBackgroundImage(cuda_stream, bg) bg = bg.download() bg_list.append(bg) if visualize: cv2.imshow('BG', bg) key = cv2.waitKey(1) if key == 27: break if key == ord('m'): mode = not mode if key == ord(' '): cv2.imwrite(str(i) + ".jpg", warped) i += 1 cv2.imwrite("X:/final.png", bg) # cv2.waitKey(0) cv2.destroyAllWindows()
def __init__(self, width, height): self.width = width self.height = height self.src_gpu = [cv2.cuda_GpuMat() for i in range(4)] self.dst_gpu = cv2.cuda_GpuMat() self.merged_cpu = np.zeros((height, width, 4), dtype=np.uint8) self.merged_gpu = cv2.cuda_GpuMat(height, width, cv2.CV_8UC4)
def test_warp(self): npMat = (np.random.random((128, 128, 3)) * 255).astype(np.uint8) size = npMat.shape[:2] M1 = create_affine_transform_matrix(size, np.pi / 2) cuMat = cv.cuda_GpuMat(npMat) cuMatDst = cv.cuda_GpuMat(size, cuMat.type()) borderType = cv.BORDER_REFLECT101 self.assertTrue( np.allclose( cv.cuda.warpAffine(cuMat, M1, size, borderMode=borderType).download(), cv.warpAffine(npMat, M1, size, borderMode=borderType))) cv.cuda.warpAffine(cuMat, M1, size, cuMatDst, borderMode=borderType) self.assertTrue( np.allclose(cuMatDst.download(), cv.warpAffine(npMat, M1, size, borderMode=borderType))) interpolation = cv.INTER_NEAREST flags = interpolation | cv.WARP_INVERSE_MAP dst_gold = cv.warpAffine(npMat, M1, size, flags=flags) cuMaps = cv.cuda.buildWarpAffineMaps(M1, True, size) dst = cv.remap(npMat, cuMaps[0].download(), cuMaps[1].download(), interpolation) self.assertTrue(np.allclose(dst, dst_gold)) xmap = cv.cuda_GpuMat(size, cv.CV_32FC1) ymap = cv.cuda_GpuMat(size, cv.CV_32FC1) cv.cuda.buildWarpAffineMaps(M1, True, size, xmap, ymap) dst = cv.remap(npMat, xmap.download(), ymap.download(), interpolation) self.assertTrue(np.allclose(dst, dst_gold)) M2 = create_perspective_transform_matrix(size, np.pi / 2) np.allclose( cv.cuda.warpPerspective(cuMat, M2, size, borderMode=borderType).download(), cv.warpPerspective(npMat, M2, size, borderMode=borderType)) cv.cuda.warpPerspective(cuMat, M2, size, cuMatDst, borderMode=borderType) self.assertTrue( np.allclose( cuMatDst.download(), cv.warpPerspective(npMat, M2, size, borderMode=borderType))) dst_gold = cv.warpPerspective(npMat, M2, size, flags=flags) cuMaps = cv.cuda.buildWarpPerspectiveMaps(M2, True, size) dst = cv.remap(npMat, cuMaps[0].download(), cuMaps[1].download(), interpolation) self.assertTrue(np.allclose(dst, dst_gold)) cv.cuda.buildWarpPerspectiveMaps(M2, True, size, xmap, ymap) dst = cv.remap(npMat, xmap.download(), ymap.download(), interpolation) self.assertTrue(np.allclose(dst, dst_gold))
def optical_flow(): size = (224, 224) cap = cv2.VideoCapture( "hmdb51/brush_hair/April_09_brush_hair_u_nm_np1_ba_goo_0.avi") ret, previous_frame = cap.read() if ret: # resize frame frame = cv2.resize(previous_frame, size) # convert to gray previous_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) previous_frame = np.float32(previous_frame) / 255.0 # upload pre-processed frame to GPU gpu_previous = cv2.cuda_GpuMat(size, cv2.CV_32FC1) gpu_previous.upload(previous_frame) while True: ret, frame = cap.read() if not ret: break frame = cv2.resize(frame, size) current_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) current_frame = np.float32(current_frame) / 255.0 gpu_current = cv2.cuda_GpuMat(size, cv2.CV_32FC1) gpu_current.upload(current_frame) gpu_flow = cv2.cuda_BroxOpticalFlow.create(0.197, 50.0, 0.8, 5, 150, 10) gpu_flow = cv2.cuda_BroxOpticalFlow.calc( gpu_flow, gpu_previous, gpu_current, None, ) optical_flow = gpu_flow.download() gpu_flow_x = cv2.cuda_GpuMat(gpu_flow.size(), cv2.CV_32FC1) gpu_flow_y = cv2.cuda_GpuMat(gpu_flow.size(), cv2.CV_32FC1) cv2.cuda.split(gpu_flow, [gpu_flow_x, gpu_flow_y]) optical_flow_x = gpu_flow_x.download() optical_flow_y = gpu_flow_y.download() a = np.concatenate((optical_flow, np.zeros((224, 224, 1))), axis=2) dist_x = optical_flow_x.max() - optical_flow_x.min() dist_y = optical_flow_y.max() - optical_flow_x.min() gpu_previous.upload(current_frame)
def color_filter(frame, hsv_mat, gray_mat, lower, upper): mask_cw = cv2.cuda_GpuMat() mask_cw_im = cv2.cuda_GpuMat() lower_white = np.array([0, 0, 0], dtype="uint8") upper_white = np.array([0, 0, 255], dtype="uint8") mask_range = in_range(frame, hsv_mat, lower, upper) mask_white = in_range(frame, gray_mat, lower_white, upper_white) mask_cw = cv2.cuda.bitwise_or(mask_white, mask_range) mask_cw_im = cv2.cuda.bitwise_and(gray_mat, mask_cw) return mask_cw_im
def cuda_test(): for i in range(1000): npTmp = np.random.random((1024, 1024)).astype(np.float32) npMat1 = np.stack([npTmp, npTmp], axis=2) npMat2 = npMat1 cuMat1 = cv.cuda_GpuMat() cuMat2 = cv.cuda_GpuMat() cuMat1.upload(npMat1) cuMat2.upload(npMat2) cv.cuda.gemm(cuMat1, cuMat2, 1, None, 0, None, 1)
def __init__(self, width, height): self.width = width self.height = height self.src_gpu = cv2.cuda_GpuMat() self.dst_gpu = [cv2.cuda_GpuMat() for i in range(4)] self.color_cpu = np.zeros((height, width, 3), dtype=np.uint8) self.alpha_cpu = np.zeros((height, width, 1), dtype=np.uint8) self.color_gpu = cv2.cuda_GpuMat(height, width, cv2.CV_8UC3) self.alpha_gpu = cv2.cuda_GpuMat(height, width, cv2.CV_8UC1)
def main_module(): cap = cv2.VideoCapture("video.mp4") img_mat = cv2.cuda_GpuMat() while cap.isOpened(): ret, frame = cap.read() # lower_black = np.array([0, 0, 0], dtype="uint8") # upper_black = np.array([230, 255, 65], dtype="uint8") # frame = cv2.imread("dahedra_armor.png") # cv2.imshow("frame", frame) img_mat.upload(frame) gray_mat = cv2.cuda.cvtColor(img_mat, cv2.COLOR_RGB2GRAY) median_filter = cv2.cuda.createMedianFilter(cv2.CV_8UC1, 7) median = median_filter.apply(gray_mat) norm = cv2.cuda_GpuMat() norm = cv2.cuda.normalize(median, 0, 255, cv2.NORM_MINMAX, 1) norm_c = norm.download() cv2.imshow("norm", norm_c) warp = bird_view(frame, norm) wp = warp.download() cv2.imshow("warp-gpu", wp) # canny_filter = cv2.cuda.createCannyEdgeDetector(50, 150) # canny = canny_filter.detect(norm) # canny_show = canny.download() # cv2.imshow("canny", canny_show) # hsv_mat = cv2.cuda.cvtColor(img_mat, cv2.COLOR_RGB2HSV) # gauss = cv2.cuda.createGaussianFilter(cv2.CV_8UC3, -1, (3, 3), 16) # # filtered_col = color_filter(frame, hsv_mat, gray_img, lower_black, upper_black) # fil = filtered_col.download() # # cv2.imshow("fil", fil) # gauss_filtered = gauss.apply(filtered_col) # # gauss_filtered = gauss.apply(hsv_mat) # # detector = cv2.cuda.createCannyEdgeDetector(50, 150) # detector.detect(gauss_filtered) # canny = detector.download() # cv2.imshow("Canny", canny) # if cv2.waitKey(10) & 0xFF == ord('q'): # break # The following frees up resources and closes all windows # cap.release() # cv2.destroyAllWindows() if cv2.waitKey(10) & 0xFF == ord('q'): break # The following frees up resources and closes all windows cap.release() cv2.destroyAllWindows()
def bird_view(frame, img_mat): image_h, image_w, _ = frame.shape # src = cv2.cuda_GpuMat() # dst = cv2.cuda_GpuMat() src = np.float32([[0, image_h], [1207, image_h], [0, 0], [image_w, 0]]) dst = np.float32([[568, image_h], [711, image_h], [0, 0], [image_w, 0]]) M = cv2.cuda_GpuMat() M_cpu = cv2.getPerspectiveTransform(src, dst) M.upload(M_cpu) bird_mat = cv2.cuda_GpuMat() bird_mat = cv2.cuda.warpPerspective(img_mat, M, (1280, 960)) return bird_mat
def test_cudaarithm_arithmetic(self): npMat1 = np.random.random((128, 128, 3)) - 0.5 npMat2 = np.random.random((128, 128, 3)) - 0.5 cuMat1 = cv.cuda_GpuMat() cuMat2 = cv.cuda_GpuMat() cuMat1.upload(npMat1) cuMat2.upload(npMat2) self.assertTrue( np.allclose( cv.cuda.add(cuMat1, cuMat2).download(), cv.add(npMat1, npMat2))) self.assertTrue( np.allclose( cv.cuda.subtract(cuMat1, cuMat2).download(), cv.subtract(npMat1, npMat2))) self.assertTrue( np.allclose( cv.cuda.multiply(cuMat1, cuMat2).download(), cv.multiply(npMat1, npMat2))) self.assertTrue( np.allclose( cv.cuda.divide(cuMat1, cuMat2).download(), cv.divide(npMat1, npMat2))) self.assertTrue( np.allclose( cv.cuda.absdiff(cuMat1, cuMat2).download(), cv.absdiff(npMat1, npMat2))) self.assertTrue( np.allclose( cv.cuda.compare(cuMat1, cuMat2, cv.CMP_GE).download(), cv.compare(npMat1, npMat2, cv.CMP_GE))) self.assertTrue( np.allclose(cv.cuda.abs(cuMat1).download(), np.abs(npMat1))) self.assertTrue( np.allclose( cv.cuda.sqrt(cv.cuda.sqr(cuMat1)).download(), cv.cuda.abs(cuMat1).download())) self.assertTrue( np.allclose(cv.cuda.log(cv.cuda.exp(cuMat1)).download(), npMat1)) self.assertTrue( np.allclose(cv.cuda.pow(cuMat1, 2).download(), cv.pow(npMat1, 2)))
def hough_init(self): self.image_canny = cv2.cuda_GpuMat((self.W_size, self.H_size), cv2.CV_8U) self.cannyFilter = cv2.cuda.createCannyEdgeDetector(low_thresh=5, high_thresh=20, apperture_size=3) self.houghFilter = cv2.cuda.createHoughLinesDetector(rho=1, theta=(np.pi / 60), threshold=3, doSort=True, maxLines=30) self.houghResult_gpu = cv2.cuda_GpuMat((30, 2), cv2.CV_32FC2)
def main(): rand = np.random.random((1024, 1024)).astype(np.float32) h_array1 = np.stack([rand, rand],axis=2) h_array2 = h_array1 d_array1 = cv.cuda_GpuMat() d_array2 = cv.cuda_GpuMat() d_array1.upload(h_array1) d_array2.upload(h_array2) start = time.time() cv.cuda.gemm(d_array1, d_array2, 1, None, 0, None, 1) end = time.time() print("Time elapsed:", end - start, "sec")
def is_same_object(left_frame, right_frame, left_boxes, left_indices, right_boxes, right_indices): gray_left_image = cv.cvtColor(left_frame, cv.COLOR_BGR2GRAY) gray_right_image = cv.cvtColor(right_frame, cv.COLOR_BGR2GRAY) gpu_gray_left_image = cv.cuda_GpuMat(gray_left_image) gpu_gray_right_image = cv.cuda_GpuMat(gray_right_image) surf = cv.cuda.SURF_CUDA_create(4000) gpu_left_key_points, gpu_left_descriptors = surf.detectWithDescriptors( gpu_gray_left_image, None) gpu_right_key_points, gpu_right_descriptors = surf.detectWithDescriptors( gpu_gray_right_image, None) matcher = cv.cuda.DescriptorMatcher_createBFMatcher(cv.NORM_L2) matches = matcher.knnMatch(gpu_left_descriptors, gpu_right_descriptors, k=2) good_match = [[m] for m, n in matches if m.distance < 0.7 * n.distance] left_key_points = cv.cuda_SURF_CUDA.downloadKeypoints( surf, gpu_left_key_points) right_key_points = cv.cuda_SURF_CUDA.downloadKeypoints( surf, gpu_right_key_points) flag = [] for left_index in left_indices: is_find = False left_idx = left_index[0] left_box = left_boxes[left_idx] for match in good_match: if (left_box[0] <= left_key_points[match[0].queryIdx].pt[0] <= (left_box[0] + left_box[2])) and ( left_box[1] <= left_key_points[match[0].queryIdx].pt[1] <= (left_box[1] + left_box[3])): right_match_point = right_key_points[match[0].trainIdx].pt for right_index in right_indices: right_idx = right_index[0] right_box = right_boxes[right_idx] if (right_box[0] <= right_match_point[0] <= right_box[0] + right_box[2]) and ( right_box[1] <= right_match_point[1] <= right_box[3]): if right_idx not in flag: flag.append(right_idx) is_find = True break if is_find: break if not is_find: flag.append(-1) return flag
def test_cudaarithm_arithmetic(self): npMat1 = (np.random.random((128, 128, 3)) * 255).astype(np.uint8) cuMat1 = cv.cuda_GpuMat(npMat1) cuMatDst = cv.cuda_GpuMat(cuMat1.size(), cuMat1.type()) cuMatB = cv.cuda_GpuMat(cuMat1.size(), cv.CV_8UC1) cuMatG = cv.cuda_GpuMat(cuMat1.size(), cv.CV_8UC1) cuMatR = cv.cuda_GpuMat(cuMat1.size(), cv.CV_8UC1) self.assertTrue( np.allclose(cv.cuda.merge(cv.cuda.split(cuMat1)), npMat1)) cv.cuda.split(cuMat1, [cuMatB, cuMatG, cuMatR]) cv.cuda.merge([cuMatB, cuMatG, cuMatR], cuMatDst) self.assertTrue(np.allclose(cuMatDst.download(), npMat1))
def preprocess(files): # copy image files i_files = files.copy() # create GPU frame to hold images gpu_frame = cv.cuda_GpuMat() for i in range(len(i_files)): # load image (CPU) screenshot = cv.imread(f'../../media/{i_files[i]}') # fit screenshot to (GPU) frame gpu_frame.upload(screenshot) # translate colors to opencv (numpy.ndarray -> cv2.cuda_GpuMat) screenshot = cv.cuda.cvtColor(gpu_frame, cv.COLOR_RGB2BGR) screenshot = cv.cuda.cvtColor(screenshot, cv.COLOR_BGR2GRAY) # inverted threshold @ 100 screenshot = cv.cuda.threshold(screenshot, 125, 255, cv.THRESH_BINARY) # resize image screenshot = cv.cuda.resize(screenshot[1], (200, 200)) # download image from GPU (cv2.cuda_GpuMat -> numpy.ndarray) screenshot = screenshot.download() # replace file name with new image i_files[i] = screenshot # output preprocessed images return i_files
def thresholding(img): #applying Canny edge detection along with dilating erode #CPU CODE # imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #CUDA CODE img_mat = cv2.cuda_GpuMat() img_mat.upload(img) gray_mat = cv2.cuda.cvtColor(img_mat, cv2.COLOR_RGB2GRAY) #CONTINUE CUDA#imgGray = gray_mat.download() kernel = np.ones((5, 5)) #CPU CODE (GaussianBlur) #imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 0) #CUDA CODE (GaussianBlur ==> bilateralFilter) #CONTINUE CUDA#imgMat_src = cv2.cuda_GpuMat() #CONTINUE CUDA#imgMat_src.upload(imgGray) #CONTINUE CUDA#imgMat_dst = cv2.cuda.bilateralFilter(imgMat_src, 5, 5, 0) imgBlur = cv2.cuda.bilateralFilter(gray_mat, 5, 5, 0) #CONTINUE CUDA#imgBlur = imgMat_dst.download() #CONTINUE CUDA#imgCanny = cv2.Canny(imgBlur, 50, 100) cannyReady = cv2.cuda.createCannyEdgeDetector(50, 100) imgCanny_mat = cannyReady.detect(imgBlur) imgCanny = imgCanny_mat.download() #imgClose = cv2.morphologyEx(imgCanny, cv2.MORPH_CLOSE, np.ones((10,10))) imgDial = cv2.dilate(imgCanny, kernel, iterations=1) imgErode = cv2.erode(imgDial, kernel, iterations=1) imgColor = colorFilter(img) combinedImage = cv2.bitwise_or(imgColor, imgErode) #combing colorfilter image and edge image return combinedImage, imgCanny, imgColor
def __init__(self, cam, gui=None): super(ImageProcPythonCommand, self).__init__() self._logger = getLogger(__name__) self._logger.addHandler(NullHandler()) self._logger.setLevel(DEBUG) self._logger.propagate = True self.camera = cam self.Line = Line_Notify(self.camera) self.gui = gui self.gsrc = cv2.cuda_GpuMat() self.gtmpl = cv2.cuda_GpuMat() self.gresult = cv2.cuda_GpuMat()
def test_cudaimgproc_cvtColor(self): npMat = (np.random.random((128, 128, 3)) * 255).astype(np.uint8) cuMat = cv.cuda_GpuMat() cuMat.upload(npMat) self.assertTrue(np.allclose(cv.cuda.cvtColor(cuMat, cv.COLOR_BGR2HSV).download(), cv.cvtColor(npMat, cv.COLOR_BGR2HSV)))
def test_cuda_interop(self): npMat = (np.random.random((128, 128, 3)) * 255).astype(np.uint8) cuMat = cv.cuda_GpuMat() cuMat.upload(npMat) self.assertTrue(cuMat.cudaPtr() != 0) stream = cv.cuda_Stream() self.assertTrue(stream.cudaPtr() != 0)
def flow_init(self): self.is_first_image = True self.image_prvs_gray = cv2.cuda_GpuMat( (self.image_size[1], self.image_size[0]), cv2.CV_8U) self.nvof = cv2.cuda_FarnebackOpticalFlow.create(numLevels=3, pyrScale=0.5, fastPyramids=False, winSize=15, numIters=3, polyN=5, polySigma=1.1, flags=0) self.flow_gpu = cv2.cuda_GpuMat( (self.image_size[1], self.image_size[0]), cv2.CV_32FC2) self.nvof.calc(self.image_prvs_gray, self.image_next_gray, None) # only to speed up the first calculation
def colorFilter(img): #applying color filter for specific color(considering yellow and white color) #CUDA CODE img_mat = cv2.cuda_GpuMat() img_mat.upload(img) hsv_mat = cv2.cuda.cvtColor(img_mat, cv2.COLOR_BGR2HSV) hsv = hsv_mat.download() #CPU CODE # hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV) lowerYellow = np.array([18, 94, 140]) #yellow color range upperYellow = np.array([48, 255, 255]) lowerWhite = np.array([0, 0, 200]) upperWhite = np.array([255, 255, 255]) maskedWhite = cv2.inRange(hsv, lowerWhite, upperWhite) maskedYellow = cv2.inRange(hsv, lowerYellow, upperYellow) combinedImage = cv2.bitwise_or(maskedWhite, maskedYellow) #CONTINUE CUDA# hsv_mat => hsv #CONTINUE CUDA# maskedWhite_mat = cv2.cuda.threshold(hsv_mat, lowerWhite, upperWhite) #CONTINUE CUDA# maskedYellow_mat = cv2.cuda.threshold(hsv_mat, lowerYellow, upperYellow) #CONTINUE CUDA# combine_mat = cv2.cuda.bitwise_or(maskedWhite_mat, maskedYellow_mat) #CONTINUE CUDA# combinedImage = combine_mat.download() return combinedImage
def test_cuda_filter_laplacian(self): npMat = (np.random.random((200, 200)) * 255).astype(np.uint16) gpuMat = cv.cuda_GpuMat() gpuMat.upload(npMat) gpuMat = cv.cuda.createLaplacianFilter(cv.CV_16UC1, -1, ksize=3).apply(gpuMat) self.assertTrue(np.allclose(gpuMat.download(), cv.Laplacian(npMat, cv.CV_16UC1, ksize=3)))
def test_cuda_imgproc_cvtColor(self): npMat = (np.random.random((200, 200, 3)) * 255).astype(np.uint8) gpuMat = cv.cuda_GpuMat() gpuMat.upload(npMat) gpuMat2 = cv.cuda.cvtColor(gpuMat, cv.COLOR_BGR2HSV) self.assertTrue(np.allclose(gpuMat2.download(), cv.cvtColor(npMat, cv.COLOR_BGR2HSV)))
def test_cudaarithm_arithmetic(self): npMat1 = np.random.random((128, 128, 3)) - 0.5 npMat2 = np.random.random((128, 128, 3)) - 0.5 cuMat1 = cv.cuda_GpuMat() cuMat2 = cv.cuda_GpuMat() cuMat1.upload(npMat1) cuMat2.upload(npMat2) self.assertTrue(np.allclose(cv.cuda.add(cuMat1, cuMat2).download(), cv.add(npMat1, npMat2))) self.assertTrue(np.allclose(cv.cuda.subtract(cuMat1, cuMat2).download(), cv.subtract(npMat1, npMat2))) self.assertTrue(np.allclose(cv.cuda.multiply(cuMat1, cuMat2).download(), cv.multiply(npMat1, npMat2))) self.assertTrue(np.allclose(cv.cuda.divide(cuMat1, cuMat2).download(), cv.divide(npMat1, npMat2))) self.assertTrue(np.allclose(cv.cuda.absdiff(cuMat1, cuMat2).download(), cv.absdiff(npMat1, npMat2))) self.assertTrue(np.allclose(cv.cuda.compare(cuMat1, cuMat2, cv.CMP_GE).download(), cv.compare(npMat1, npMat2, cv.CMP_GE))) self.assertTrue(np.allclose(cv.cuda.abs(cuMat1).download(), np.abs(npMat1))) self.assertTrue(np.allclose(cv.cuda.sqrt(cv.cuda.sqr(cuMat1)).download(), cv.cuda.abs(cuMat1).download())) self.assertTrue(np.allclose(cv.cuda.log(cv.cuda.exp(cuMat1)).download(), npMat1)) self.assertTrue(np.allclose(cv.cuda.pow(cuMat1, 2).download(), cv.pow(npMat1, 2)))
def test_cudaimgproc(self): npC1 = (np.random.random((128, 128)) * 255).astype(np.uint8) npC3 = (np.random.random((128, 128, 3)) * 255).astype(np.uint8) npC4 = (np.random.random((128, 128, 4)) * 255).astype(np.uint8) cuC1 = cv.cuda_GpuMat() cuC3 = cv.cuda_GpuMat() cuC4 = cv.cuda_GpuMat() cuC1.upload(npC1) cuC3.upload(npC3) cuC4.upload(npC4) cv.cuda.cvtColor(cuC3, cv.COLOR_RGB2HSV) cv.cuda.demosaicing(cuC1, cv.cuda.COLOR_BayerGR2BGR_MHT) cv.cuda.gammaCorrection(cuC3) cv.cuda.alphaComp(cuC4, cuC4, cv.cuda.ALPHA_XOR) cv.cuda.calcHist(cuC1) cv.cuda.equalizeHist(cuC1) cv.cuda.evenLevels(3, 0, 255) cv.cuda.meanShiftFiltering(cuC4, 10, 5) cv.cuda.meanShiftProc(cuC4, 10, 5) cv.cuda.bilateralFilter(cuC3, 3, 16, 3) cv.cuda.blendLinear cv.cuda.meanShiftSegmentation(cuC4, 10, 5, 5).download() clahe = cv.cuda.createCLAHE() clahe.apply(cuC1, cv.cuda_Stream.Null()); histLevels = cv.cuda.histEven(cuC3, 20, 0, 255) cv.cuda.histRange(cuC1, histLevels) detector = cv.cuda.createCannyEdgeDetector(0, 100) detector.detect(cuC1) detector = cv.cuda.createHoughLinesDetector(3, np.pi / 180, 20) detector.detect(cuC1) detector = cv.cuda.createHoughSegmentDetector(3, np.pi / 180, 20, 5) detector.detect(cuC1) detector = cv.cuda.createHoughCirclesDetector(3, 20, 10, 10, 20, 100) detector.detect(cuC1) detector = cv.cuda.createGeneralizedHoughBallard() #BUG: detect accept only Mat! #Even if generate_gpumat_decls is set to True, it only wraps overload CUDA functions. #The problem is that Mat and GpuMat are not fully compatible to enable system-wide overloading #detector.detect(cuC1, cuC1, cuC1) detector = cv.cuda.createGeneralizedHoughGuil() #BUG: same as above.. #detector.detect(cuC1, cuC1, cuC1) detector = cv.cuda.createHarrisCorner(cv.CV_8UC1, 15, 5, 1) detector.compute(cuC1) detector = cv.cuda.createMinEigenValCorner(cv.CV_8UC1, 15, 5, 1) detector.compute(cuC1) detector = cv.cuda.createGoodFeaturesToTrackDetector(cv.CV_8UC1) detector.detect(cuC1) matcher = cv.cuda.createTemplateMatching(cv.CV_8UC1, cv.TM_CCOEFF_NORMED) matcher.match(cuC3, cuC3) self.assertTrue(True) #It is sufficient that no exceptions have been there
def test_cuda_upload_download(self): npMat = (np.random.random((200, 200, 3)) * 255).astype(np.uint8) gpuMat = cv.cuda_GpuMat() gpuMat.upload(npMat) self.assertTrue(np.allclose(gpuMat.download(), npMat))