def test_kmeans_2d(self): # K-means 2D params count = 100 sz = (count, 2) amount = sz[0] K = 5 flags = cv.KMEANS_RANDOM_CENTERS attempts = 1 criteria = (cv.TERM_CRITERIA_MAX_ITER + cv.TERM_CRITERIA_EPS, 30, 0) in_vector = self.generate_random_points(sz) in_labels = [] # G-API data = cv.GArrayT(cv.gapi.CV_POINT2F) best_labels = cv.GArrayT(cv.gapi.CV_INT) compactness, out_labels, centers = cv.gapi.kmeans( data, K, best_labels, criteria, attempts, flags) comp = cv.GComputation(cv.GIn(data, best_labels), cv.GOut(compactness, out_labels, centers)) compact, labels, centers = comp.apply(cv.gin(in_vector, in_labels)) # Assert self.assertTrue(compact >= 0) self.assertEqual(amount, len(labels)) self.assertEqual(K, len(centers))
def test_age_gender_infer2_roi(self): # NB: Check IE if not cv.dnn.DNN_TARGET_CPU in cv.dnn.getAvailableTargets( cv.dnn.DNN_BACKEND_INFERENCE_ENGINE): return root_path = '/omz_intel_models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013' model_path = self.find_file( root_path + '.xml', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')]) weights_path = self.find_file( root_path + '.bin', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')]) device_id = 'CPU' rois = [(10, 15, 62, 62), (23, 50, 62, 62), (14, 100, 62, 62), (80, 50, 62, 62)] img_path = self.find_file( 'cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')]) img = cv.imread(img_path) # OpenCV DNN dnn_age_list = [] dnn_gender_list = [] for roi in rois: age, gender = self.infer_reference_network( model_path, weights_path, self.make_roi(img, roi)) dnn_age_list.append(age) dnn_gender_list.append(gender) # OpenCV G-API g_in = cv.GMat() g_rois = cv.GArrayT(cv.gapi.CV_RECT) inputs = cv.GInferListInputs() inputs.setInput('data', g_rois) outputs = cv.gapi.infer2("net", g_in, inputs) age_g = outputs.at("age_conv3") gender_g = outputs.at("prob") comp = cv.GComputation(cv.GIn(g_in, g_rois), cv.GOut(age_g, gender_g)) pp = cv.gapi.ie.params("net", model_path, weights_path, device_id) gapi_age_list, gapi_gender_list = comp.apply( cv.gin(img, rois), args=cv.gapi.compile_args(cv.gapi.networks(pp))) # Check for gapi_age, gapi_gender, dnn_age, dnn_gender in zip( gapi_age_list, gapi_gender_list, dnn_age_list, dnn_gender_list): self.assertEqual(0.0, cv.norm(dnn_gender, gapi_gender, cv.NORM_INF)) self.assertEqual(0.0, cv.norm(dnn_age, gapi_age, cv.NORM_INF))
def test_garray_type(self): types = [ cv.gapi.CV_BOOL, cv.gapi.CV_INT, cv.gapi.CV_DOUBLE, cv.gapi.CV_FLOAT, cv.gapi.CV_STRING, cv.gapi.CV_POINT, cv.gapi.CV_POINT2F, cv.gapi.CV_SIZE, cv.gapi.CV_RECT, cv.gapi.CV_SCALAR, cv.gapi.CV_MAT, cv.gapi.CV_GMAT ] for t in types: g_array = cv.GArrayT(t) self.assertEqual(t, g_array.type())
def test_custom_op_boundingRect(self): points = [(0, 0), (0, 1), (1, 0), (1, 1)] # OpenCV expected = cv.boundingRect(np.array(points)) # G-API g_pts = cv.GArrayT(cv.gapi.CV_POINT) g_br = boundingRect(g_pts) comp = cv.GComputation(cv.GIn(g_pts), cv.GOut(g_br)) pkg = cv.gapi.wip.kernels((custom_boundingRect, 'custom.boundingRect')) actual = comp.apply(cv.gin(points), args=cv.compile_args(pkg)) # cv.norm works with tuples ? self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
def __new__(self): return cv.GArrayT(cv.gapi.CV_GMAT)
def __new__(self): return cv.GArrayT(cv.gapi.CV_SCALAR)
def __new__(self): return cv.GArrayT(cv.gapi.CV_POINT2F)
def __new__(self): return cv.GArrayT(cv.gapi.CV_STRING)
def __new__(self): return cv.GArrayT(cv.gapi.CV_DOUBLE)
def __new__(cls, argtype): return cv.GArrayT(argtype)