コード例 #1
0
        def test_stateful_kernel(self):
            @cv.gapi.op('custom.sum', in_types=[cv.GArray.Int], out_types=[cv.GOpaque.Int])
            class GSum:
                @staticmethod
                def outMeta(arr_desc):
                    return cv.empty_gopaque_desc()


            @cv.gapi.kernel(GSum)
            class GSumImpl:
                last_result = 0

                @staticmethod
                def run(arr):
                    GSumImpl.last_result = sum(arr)
                    return GSumImpl.last_result


            g_in  = cv.GArray.Int()
            comp  = cv.GComputation(cv.GIn(g_in), cv.GOut(GSum.on(g_in)))

            s = comp.apply(cv.gin([1, 2, 3, 4]), args=cv.gapi.compile_args(cv.gapi.kernels(GSumImpl)))
            self.assertEqual(10, s)

            s = comp.apply(cv.gin([1, 2, 8, 7]), args=cv.gapi.compile_args(cv.gapi.kernels(GSumImpl)))
            self.assertEqual(18, s)

            self.assertEqual(18, GSumImpl.last_result)
コード例 #2
0
        def test_render_primitives_on_nv12_graph(self):
            y_expected = np.zeros((self.size[0], self.size[1], 1),
                                  dtype=np.uint8)
            uv_expected = np.zeros((self.size[0] // 2, self.size[1] // 2, 2),
                                   dtype=np.uint8)

            y_actual = np.array(y_expected, copy=True)
            uv_actual = np.array(uv_expected, copy=True)

            # OpenCV
            self.render_primitives_nv12_ref(y_expected, uv_expected)

            # G-API
            g_y = cv.GMat()
            g_uv = cv.GMat()
            g_prims = cv.GArray.Prim()
            g_out_y, g_out_uv = cv.gapi.wip.draw.renderNV12(g_y, g_uv, g_prims)

            comp = cv.GComputation(cv.GIn(g_y, g_uv, g_prims),
                                   cv.GOut(g_out_y, g_out_uv))
            y_actual, uv_actual = comp.apply(
                cv.gin(y_actual, uv_actual, self.prims))

            self.assertEqual(0.0, cv.norm(y_expected, y_actual, cv.NORM_INF))
            self.assertEqual(0.0, cv.norm(uv_expected, uv_actual, cv.NORM_INF))
コード例 #3
0
        def test_raise_in_outMeta(self):
            @cv.gapi.op('custom.op', in_types=[cv.GMat, cv.GMat], out_types=[cv.GMat])
            class GAdd:
                @staticmethod
                def outMeta(desc0, desc1):
                    raise NotImplementedError("outMeta isn't implemented")

            @cv.gapi.kernel(GAdd)
            class GAddImpl:
                @staticmethod
                def run(img0, img1):
                    return img0 + img1

            g_in0 = cv.GMat()
            g_in1 = cv.GMat()
            g_out = GAdd.on(g_in0, g_in1)

            comp = cv.GComputation(cv.GIn(g_in0, g_in1), cv.GOut(g_out))

            img0 = np.array([1, 2, 3])
            img1 = np.array([1, 2, 3])

            with self.assertRaises(Exception): comp.apply(cv.gin(img0, img1),
                                                          args=cv.gapi.compile_args(
                                                              cv.gapi.kernels(GAddImpl)))
コード例 #4
0
    def test_multiple_custom_kernels(self):
        sz = (3, 3, 3)
        in_mat1 = np.full(sz, 45, dtype=np.uint8)
        in_mat2 = np.full(sz, 50, dtype=np.uint8)

        # OpenCV
        expected = cv.mean(cv.split(cv.add(in_mat1, in_mat2))[1])

        # G-API
        g_in1 = cv.GMat()
        g_in2 = cv.GMat()
        g_sum = cv.gapi.add(g_in1, g_in2)
        g_b, g_r, g_g = cv.gapi.split3(g_sum)
        g_mean = cv.gapi.mean(g_b)

        comp = cv.GComputation(cv.GIn(g_in1, g_in2), cv.GOut(g_mean))

        pkg = cv.gapi_wip_kernels(
            (custom_add, 'org.opencv.core.math.add'),
            (custom_mean, 'org.opencv.core.math.mean'),
            (custom_split3, 'org.opencv.core.transform.split3'))

        actual = comp.apply(cv.gin(in_mat1, in_mat2),
                            args=cv.compile_args(pkg))

        self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
コード例 #5
0
        def test_invalid_outMeta(self):
            @cv.gapi.op('custom.op', in_types=[cv.GMat, cv.GMat], out_types=[cv.GMat])
            class GAdd:
                @staticmethod
                def outMeta(desc0, desc1):
                    # Invalid outMeta
                    return cv.empty_gopaque_desc()

            @cv.gapi.kernel(GAdd)
            class GAddImpl:
                @staticmethod
                def run(img0, img1):
                    return img0 + img1

            g_in0 = cv.GMat()
            g_in1 = cv.GMat()
            g_out = GAdd.on(g_in0, g_in1)

            comp = cv.GComputation(cv.GIn(g_in0, g_in1), cv.GOut(g_out))

            img0 = np.array([1, 2, 3])
            img1 = np.array([1, 2, 3])

            # FIXME: Cause Bad variant access.
            # Need to provide more descriptive error message.
            with self.assertRaises(Exception): comp.apply(cv.gin(img0, img1),
                                                          args=cv.gapi.compile_args(
                                                              cv.gapi.kernels(GAddImpl)))
コード例 #6
0
        def test_custom_op_goodFeaturesToTrack(self):
            # G-API
            img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
            in_mat = cv.cvtColor(cv.imread(img_path), cv.COLOR_RGB2GRAY)

            # NB: goodFeaturesToTrack configuration
            max_corners         = 50
            quality_lvl         = 0.01
            min_distance        = 10.0
            block_sz            = 3
            use_harris_detector = True
            k                   = 0.04

            # OpenCV
            expected = cv.goodFeaturesToTrack(in_mat, max_corners, quality_lvl,
                                              min_distance, mask=None,
                                              blockSize=block_sz, useHarrisDetector=use_harris_detector, k=k)

            # G-API
            g_in = cv.GMat()
            g_out = GGoodFeatures.on(g_in, max_corners, quality_lvl,
                                     min_distance, block_sz, use_harris_detector, k)

            comp = cv.GComputation(cv.GIn(g_in), cv.GOut(g_out))
            pkg = cv.gapi.kernels(GGoodFeaturesImpl)
            actual = comp.apply(cv.gin(in_mat), args=cv.gapi.compile_args(pkg))

            # NB: OpenCV & G-API have different output types.
            # OpenCV - numpy array with shape (num_points, 1, 2)
            # G-API  - list of tuples with size - num_points
            # Comparison
            self.assertEqual(0.0, cv.norm(expected.flatten(),
                                          np.array(actual, dtype=np.float32).flatten(), cv.NORM_INF))
コード例 #7
0
    def test_video_split3(self):
        path = self.find_file('cv/video/768x576.avi', [os.environ['OPENCV_TEST_DATA_PATH']])

        # OpenCV
        cap = cv.VideoCapture(path)

        # G-API
        g_in = cv.GMat()
        b, g, r = cv.gapi.split3(g_in)
        c = cv.GComputation(cv.GIn(g_in), cv.GOut(b, g, r))

        ccomp = c.compileStreaming()
        source = cv.gapi.wip.make_capture_src(path)
        ccomp.setSource(source)
        ccomp.start()

        # Assert
        max_num_frames  = 10
        proc_num_frames = 0
        while cap.isOpened():
            has_expected, frame = cap.read()
            has_actual,   actual   = ccomp.pull()

            self.assertEqual(has_expected, has_actual)

            if not has_actual:
                break

            expected = cv.split(frame)
            for e, a in zip(expected, actual):
                self.assertEqual(0.0, cv.norm(e, a, cv.NORM_INF))

            proc_num_frames += 1
            if proc_num_frames == max_num_frames:
                break;
コード例 #8
0
    def prepare_net(self):
        inputs = cv2.GInferInputs()
        g_inputs = []
        for input_name in self.inputs:
            if input_name in self.const_inputs:
                continue
            g_in = cv2.GMat()
            inputs.setInput(input_name, g_in)
            g_inputs.append(g_in)

        outputs = cv2.gapi.infer("net", inputs)
        g_outputs = [outputs.at(out_name) for out_name in self.output_names]
        self.comp = cv2.GComputation(cv2.GIn(*g_inputs), cv2.GOut(*g_outputs))
        args = ['net', str(self.model)]
        if self.weights is not None:
            args.append(str(self.weights))
        args.append(self.device.upper())
        if self.backend == 'ie':
            pp = cv2.gapi.ie.params(*args)
        else:
            pp = cv2.gapi.mx.params('net', str(self.model))
        for input_name, value in self._const_inputs.items():
            pp.constInput(input_name, value)
        if self.backend == 'ie':
            self.network_args = compile_args(cv2.gapi.networks(pp))
        else:
            mvcmd_file = os.environ.get('MVCMD_FILE', '')
            self.network_args = compile_args(
                cv2.gapi.networks(pp), cv2.gapi_mx_mvcmdFile(mvcmd_file)
            )
コード例 #9
0
ファイル: test_gapi_core.py プロジェクト: zzmalika/opencv
    def test_kmeans(self):
        # K-means params
        count = 100
        sz = (count, 2)
        in_mat = np.random.random(sz).astype(np.float32)
        K = 5
        flags = cv.KMEANS_RANDOM_CENTERS
        attempts = 1
        criteria = (cv.TERM_CRITERIA_MAX_ITER + cv.TERM_CRITERIA_EPS, 30, 0)

        # G-API
        g_in = cv.GMat()
        compactness, out_labels, centers = cv.gapi.kmeans(
            g_in, K, criteria, attempts, flags)
        comp = cv.GComputation(cv.GIn(g_in),
                               cv.GOut(compactness, out_labels, centers))

        compact, labels, centers = comp.apply(cv.gin(in_mat))

        # Assert
        self.assertTrue(compact >= 0)
        self.assertEqual(sz[0], labels.shape[0])
        self.assertEqual(1, labels.shape[1])
        self.assertTrue(labels.size != 0)
        self.assertEqual(centers.shape[1], sz[1])
        self.assertEqual(centers.shape[0], K)
        self.assertTrue(centers.size != 0)
コード例 #10
0
ファイル: test_gapi_core.py プロジェクト: zzmalika/opencv
    def test_threshold(self):
        img_path = self.find_file('cv/face/david2.jpg',
                                  [os.environ.get('OPENCV_TEST_DATA_PATH')])
        in_mat = cv.cvtColor(cv.imread(img_path), cv.COLOR_RGB2GRAY)
        maxv = (30, 30)

        # OpenCV
        expected_thresh, expected_mat = cv.threshold(in_mat, maxv[0], maxv[0],
                                                     cv.THRESH_TRIANGLE)

        # G-API
        g_in = cv.GMat()
        g_sc = cv.GScalar()
        mat, threshold = cv.gapi.threshold(g_in, g_sc, cv.THRESH_TRIANGLE)
        comp = cv.GComputation(cv.GIn(g_in, g_sc), cv.GOut(mat, threshold))

        for pkg_name, pkg in pkgs:
            actual_mat, actual_thresh = comp.apply(cv.gin(in_mat, maxv),
                                                   args=cv.compile_args(pkg))
            # Comparison
            self.assertEqual(0.0, cv.norm(expected_mat, actual_mat,
                                          cv.NORM_INF),
                             'Failed on ' + pkg_name + ' backend')
            self.assertEqual(expected_mat.dtype, actual_mat.dtype,
                             'Failed on ' + pkg_name + ' backend')
            self.assertEqual(expected_thresh, actual_thresh[0],
                             'Failed on ' + pkg_name + ' backend')
コード例 #11
0
ファイル: test_gapi_core.py プロジェクト: zzmalika/opencv
    def test_kmeans_2d(self):
        # K-means 2D params
        count = 100
        sz = (count, 2)
        amount = sz[0]
        K = 5
        flags = cv.KMEANS_RANDOM_CENTERS
        attempts = 1
        criteria = (cv.TERM_CRITERIA_MAX_ITER + cv.TERM_CRITERIA_EPS, 30, 0)
        in_vector = self.generate_random_points(sz)
        in_labels = []

        # G-API
        data = cv.GArrayT(cv.gapi.CV_POINT2F)
        best_labels = cv.GArrayT(cv.gapi.CV_INT)

        compactness, out_labels, centers = cv.gapi.kmeans(
            data, K, best_labels, criteria, attempts, flags)
        comp = cv.GComputation(cv.GIn(data, best_labels),
                               cv.GOut(compactness, out_labels, centers))

        compact, labels, centers = comp.apply(cv.gin(in_vector, in_labels))

        # Assert
        self.assertTrue(compact >= 0)
        self.assertEqual(amount, len(labels))
        self.assertEqual(K, len(centers))
コード例 #12
0
        def test_gapi_streaming_meta(self):
            ksize = 3
            path = self.find_file('cv/video/768x576.avi',
                                  [os.environ['OPENCV_TEST_DATA_PATH']])

            # G-API
            g_in = cv.GMat()
            g_ts = cv.gapi.streaming.timestamp(g_in)
            g_seqno = cv.gapi.streaming.seqNo(g_in)
            g_seqid = cv.gapi.streaming.seq_id(g_in)

            c = cv.GComputation(cv.GIn(g_in), cv.GOut(g_ts, g_seqno, g_seqid))

            ccomp = c.compileStreaming()
            source = cv.gapi.wip.make_capture_src(path)
            ccomp.setSource(cv.gin(source))
            ccomp.start()

            # Assert
            max_num_frames = 10
            curr_frame_number = 0
            while True:
                has_frame, (ts, seqno, seqid) = ccomp.pull()

                if not has_frame:
                    break

                self.assertEqual(curr_frame_number, seqno)
                self.assertEqual(curr_frame_number, seqid)

                curr_frame_number += 1
                if curr_frame_number == max_num_frames:
                    break
コード例 #13
0
        def test_gst_multiple_sources(self):
            if not cv.videoio_registry.hasBackend(cv.CAP_GSTREAMER):
                raise unittest.SkipTest(
                    "Backend is not available/disabled: GSTREAMER")

            gstpipeline = """videotestsrc is-live=true pattern=colors num-buffers=10 !
                             videorate ! videoscale !
                             video/x-raw,width=1920,height=1080,framerate=30/1 !
                             appsink name=sink1
                             videotestsrc is-live=true pattern=colors num-buffers=10 !
                             videorate ! videoscale !
                             video/x-raw,width=1920,height=1080,framerate=30/1 !
                             appsink name=sink2"""

            g_in1 = cv.GMat()
            g_in2 = cv.GMat()
            g_out = cv.gapi.add(g_in1, g_in2)
            c = cv.GComputation(cv.GIn(g_in1, g_in2), cv.GOut(g_out))

            ccomp = c.compileStreaming()

            pp = self.get_gst_pipeline(gstpipeline)
            src1 = cv.gapi.wip.get_streaming_source(pp, "sink1")
            src2 = cv.gapi.wip.get_streaming_source(pp, "sink2")

            ccomp.setSource(cv.gin(src1, src2))
            ccomp.start()

            has_frame, out = ccomp.pull()
            while has_frame:
                self.assertTrue(out.size != 0)
                has_frame, out = ccomp.pull()
コード例 #14
0
    def test_video_add(self):
        sz = (576, 768, 3)
        in_mat = np.random.randint(0, 100, sz).astype(np.uint8)

        path = self.find_file('cv/video/768x576.avi',
                              [os.environ['OPENCV_TEST_DATA_PATH']])

        # OpenCV
        cap = cv.VideoCapture(path)

        # G-API
        g_in1 = cv.GMat()
        g_in2 = cv.GMat()
        out = cv.gapi.add(g_in1, g_in2)
        c = cv.GComputation(cv.GIn(g_in1, g_in2), cv.GOut(out))

        ccomp = c.compileStreaming()
        source = cv.gapi.wip.make_capture_src(path)
        ccomp.setSource(cv.gin(source, in_mat))
        ccomp.start()

        # Assert
        while cap.isOpened():
            has_expected, frame = cap.read()
            has_actual, actual = ccomp.pull()

            self.assertEqual(has_expected, has_actual)

            if not has_actual:
                break

            expected = cv.add(frame, in_mat)
            self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
コード例 #15
0
        def test_array_with_custom_type(self):
            @cv.gapi.op('custom.op', in_types=[cv.GArray.Any, cv.GArray.Any], out_types=[cv.GArray.Any])
            class GConcat:
                @staticmethod
                def outMeta(arr_desc0, arr_desc1):
                    return cv.empty_array_desc()

            @cv.gapi.kernel(GConcat)
            class GConcatImpl:
                @staticmethod
                def run(arr0, arr1):
                    return arr0 + arr1

            g_arr0 = cv.GArray.Any()
            g_arr1 = cv.GArray.Any()
            g_out  = GConcat.on(g_arr0, g_arr1)

            comp = cv.GComputation(cv.GIn(g_arr0, g_arr1), cv.GOut(g_out))

            arr0 = ((2, 2), 2.0)
            arr1 = (3,    'str')

            out = comp.apply(cv.gin(arr0, arr1),
                             args=cv.gapi.compile_args(cv.gapi.kernels(GConcatImpl)))

            self.assertEqual(arr0 + arr1, out)
コード例 #16
0
        def test_opaq_with_custom_type(self):
            @cv.gapi.op('custom.op', in_types=[cv.GOpaque.Any, cv.GOpaque.String], out_types=[cv.GOpaque.Any])
            class GLookUp:
                @staticmethod
                def outMeta(opaq_desc0, opaq_desc1):
                    return cv.empty_gopaque_desc()

            @cv.gapi.kernel(GLookUp)
            class GLookUpImpl:
                @staticmethod
                def run(table, key):
                    return table[key]


            g_table = cv.GOpaque.Any()
            g_key   = cv.GOpaque.String()
            g_out   = GLookUp.on(g_table, g_key)

            comp = cv.GComputation(cv.GIn(g_table, g_key), cv.GOut(g_out))

            table = {
                        'int':   42,
                        'str':   'hello, world!',
                        'tuple': (42, 42)
                    }

            out = comp.apply(cv.gin(table, 'int'), args=cv.gapi.compile_args(cv.gapi.kernels(GLookUpImpl)))
            self.assertEqual(42, out)

            out = comp.apply(cv.gin(table, 'str'), args=cv.gapi.compile_args(cv.gapi.kernels(GLookUpImpl)))
            self.assertEqual('hello, world!', out)

            out = comp.apply(cv.gin(table, 'tuple'), args=cv.gapi.compile_args(cv.gapi.kernels(GLookUpImpl)))
            self.assertEqual((42, 42), out)
コード例 #17
0
        def test_pipeline_with_custom_kernels(self):
            @cv.gapi.op('custom.resize',
                        in_types=[cv.GMat, tuple],
                        out_types=[cv.GMat])
            class GResize:
                @staticmethod
                def outMeta(desc, size):
                    return desc.withSize(size)

            @cv.gapi.kernel(GResize)
            class GResizeImpl:
                @staticmethod
                def run(img, size):
                    return cv.resize(img, size)

            @cv.gapi.op('custom.transpose',
                        in_types=[cv.GMat, tuple],
                        out_types=[cv.GMat])
            class GTranspose:
                @staticmethod
                def outMeta(desc, order):
                    return desc

            @cv.gapi.kernel(GTranspose)
            class GTransposeImpl:
                @staticmethod
                def run(img, order):
                    return np.transpose(img, order)

            img_path = self.find_file(
                'cv/face/david2.jpg',
                [os.environ.get('OPENCV_TEST_DATA_PATH')])
            img = cv.imread(img_path)
            size = (32, 32)
            order = (1, 0, 2)

            # Dummy pipeline just to validate this case:
            # gapi -> custom -> custom -> gapi

            # OpenCV
            expected = cv.cvtColor(img, cv.COLOR_BGR2RGB)
            expected = cv.resize(expected, size)
            expected = np.transpose(expected, order)
            expected = cv.mean(expected)

            # G-API
            g_bgr = cv.GMat()
            g_rgb = cv.gapi.BGR2RGB(g_bgr)
            g_resized = GResize.on(g_rgb, size)
            g_transposed = GTranspose.on(g_resized, order)
            g_mean = cv.gapi.mean(g_transposed)

            comp = cv.GComputation(cv.GIn(g_bgr), cv.GOut(g_mean))
            actual = comp.apply(cv.gin(img),
                                args=cv.compile_args(
                                    cv.gapi.kernels(GResizeImpl,
                                                    GTransposeImpl)))

            self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
コード例 #18
0
        def test_age_gender_infer2_roi(self):
            # NB: Check IE
            if not cv.dnn.DNN_TARGET_CPU in cv.dnn.getAvailableTargets(
                    cv.dnn.DNN_BACKEND_INFERENCE_ENGINE):
                return

            root_path = '/omz_intel_models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013'
            model_path = self.find_file(
                root_path + '.xml',
                [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
            weights_path = self.find_file(
                root_path + '.bin',
                [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
            device_id = 'CPU'

            rois = [(10, 15, 62, 62), (23, 50, 62, 62), (14, 100, 62, 62),
                    (80, 50, 62, 62)]
            img_path = self.find_file(
                'cv/face/david2.jpg',
                [os.environ.get('OPENCV_TEST_DATA_PATH')])
            img = cv.imread(img_path)

            # OpenCV DNN
            dnn_age_list = []
            dnn_gender_list = []
            for roi in rois:
                age, gender = self.infer_reference_network(
                    model_path, weights_path, self.make_roi(img, roi))
                dnn_age_list.append(age)
                dnn_gender_list.append(gender)

            # OpenCV G-API
            g_in = cv.GMat()
            g_rois = cv.GArrayT(cv.gapi.CV_RECT)
            inputs = cv.GInferListInputs()
            inputs.setInput('data', g_rois)

            outputs = cv.gapi.infer2("net", g_in, inputs)
            age_g = outputs.at("age_conv3")
            gender_g = outputs.at("prob")

            comp = cv.GComputation(cv.GIn(g_in, g_rois),
                                   cv.GOut(age_g, gender_g))
            pp = cv.gapi.ie.params("net", model_path, weights_path, device_id)

            gapi_age_list, gapi_gender_list = comp.apply(
                cv.gin(img, rois),
                args=cv.gapi.compile_args(cv.gapi.networks(pp)))

            # Check
            for gapi_age, gapi_gender, dnn_age, dnn_gender in zip(
                    gapi_age_list, gapi_gender_list, dnn_age_list,
                    dnn_gender_list):
                self.assertEqual(0.0,
                                 cv.norm(dnn_gender, gapi_gender, cv.NORM_INF))
                self.assertEqual(0.0, cv.norm(dnn_age, gapi_age, cv.NORM_INF))
コード例 #19
0
    def test_video_good_features_to_track(self):
        path = self.find_file('cv/video/768x576.avi', [os.environ['OPENCV_TEST_DATA_PATH']])

        # NB: goodFeaturesToTrack configuration
        max_corners         = 50
        quality_lvl         = 0.01
        min_distance        = 10
        block_sz            = 3
        use_harris_detector = True
        k                   = 0.04
        mask                = None

        # OpenCV
        cap = cv.VideoCapture(path)

        # G-API
        g_in = cv.GMat()
        g_gray = cv.gapi.RGB2Gray(g_in)
        g_out = cv.gapi.goodFeaturesToTrack(g_gray, max_corners, quality_lvl,
                                            min_distance, mask, block_sz, use_harris_detector, k)

        c = cv.GComputation(cv.GIn(g_in), cv.GOut(g_out))

        ccomp = c.compileStreaming()
        source = cv.gapi.wip.make_capture_src(path)
        ccomp.setSource(source)
        ccomp.start()

        # Assert
        max_num_frames  = 10
        proc_num_frames = 0
        while cap.isOpened():
            has_expected, frame  = cap.read()
            has_actual,   actual = ccomp.pull()

            self.assertEqual(has_expected, has_actual)

            if not has_actual:
                break

            # OpenCV
            frame = cv.cvtColor(frame, cv.COLOR_RGB2GRAY)
            expected = cv.goodFeaturesToTrack(frame, max_corners, quality_lvl,
                                              min_distance, mask=mask,
                                              blockSize=block_sz, useHarrisDetector=use_harris_detector, k=k)
            for e, a in zip(expected, actual):
                # NB: OpenCV & G-API have different output shapes:
                # OpenCV - (num_points, 1, 2)
                # G-API  - (num_points, 2)
                self.assertEqual(0.0, cv.norm(e.flatten(),
                                              np.array(a, np.float32).flatten(),
                                              cv.NORM_INF))

            proc_num_frames += 1
            if proc_num_frames == max_num_frames:
                break;
コード例 #20
0
        def test_gst_multiple_sources_accuracy(self):
            if not cv.videoio_registry.hasBackend(cv.CAP_GSTREAMER):
                raise unittest.SkipTest(
                    "Backend is not available/disabled: GSTREAMER")

            path = self.find_file('highgui/video/big_buck_bunny.avi',
                                  [os.environ['OPENCV_TEST_DATA_PATH']])
            gstpipeline1 = """filesrc location=""" + path + """ ! decodebin ! videoconvert !
                              videoscale ! video/x-raw,format=NV12 ! appsink"""
            gstpipeline2 = """filesrc location=""" + path + """ ! decodebin !
                              videoflip method=clockwise ! videoconvert ! videoscale !
                              video/x-raw,format=NV12 ! appsink"""
            gstpipeline_gapi = gstpipeline1 + ' name=sink1 ' + gstpipeline2 + ' name=sink2'

            # G-API pipeline
            g_in1 = cv.GMat()
            g_in2 = cv.GMat()
            g_out1 = cv.gapi.copy(g_in1)
            g_out2 = cv.gapi.copy(g_in2)
            c = cv.GComputation(cv.GIn(g_in1, g_in2), cv.GOut(g_out1, g_out2))

            ccomp = c.compileStreaming()

            # G-API Gst-source
            pp = self.get_gst_pipeline(gstpipeline_gapi)

            src1 = cv.gapi.wip.get_streaming_source(pp, "sink1")
            src2 = cv.gapi.wip.get_streaming_source(pp, "sink2")
            ccomp.setSource(cv.gin(src1, src2))
            ccomp.start()

            # OpenCV Gst-source
            cap1 = self.open_VideoCapture_gstreamer(gstpipeline1)
            cap2 = self.open_VideoCapture_gstreamer(gstpipeline2)

            # Assert
            max_num_frames = 10
            for _ in range(max_num_frames):
                has_expected1, expected1 = cap1.read()
                has_expected2, expected2 = cap2.read()
                has_actual, (actual1, actual2) = ccomp.pull()

                self.assertEqual(has_expected1, has_expected2)
                has_expected = has_expected1 and has_expected2
                self.assertEqual(has_expected, has_actual)

                if not has_expected:
                    break

                self.assertEqual(
                    0.0,
                    cv.norm(convertNV12p2BGR(expected1), actual1, cv.NORM_INF))
                self.assertEqual(
                    0.0,
                    cv.norm(convertNV12p2BGR(expected2), actual2, cv.NORM_INF))
コード例 #21
0
ファイル: test_gapi_infer.py プロジェクト: yding10/openvino
    def test_age_gender_infer(self):

        # NB: Check IE
        if not cv.dnn.DNN_TARGET_CPU in cv.dnn.getAvailableTargets(
                cv.dnn.DNN_BACKEND_INFERENCE_ENGINE):
            return

        root_path = '/omz_intel_models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013'
        model_path = self.find_file(
            root_path + '.xml', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
        weights_path = self.find_file(
            root_path + '.bin', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
        img_path = self.find_file('cv/face/david2.jpg',
                                  [os.environ.get('OPENCV_TEST_DATA_PATH')])
        device_id = 'CPU'
        img = cv.resize(cv.imread(img_path), (62, 62))

        # OpenCV DNN
        net = cv.dnn.readNetFromModelOptimizer(model_path, weights_path)
        net.setPreferableBackend(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE)
        net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)

        blob = cv.dnn.blobFromImage(img)

        net.setInput(blob)
        dnn_age, dnn_gender = net.forward(net.getUnconnectedOutLayersNames())

        # OpenCV G-API
        g_in = cv.GMat()
        inputs = cv.GInferInputs()
        inputs.setInput('data', g_in)

        outputs = cv.gapi.infer("net", inputs)
        age_g = outputs.at("age_conv3")
        gender_g = outputs.at("prob")

        comp = cv.GComputation(cv.GIn(g_in), cv.GOut(age_g, gender_g))
        pp = cv.gapi.ie.params("net", model_path, weights_path, device_id)

        nets = cv.gapi.networks(pp)
        args = cv.compile_args(nets)
        gapi_age, gapi_gender = comp.apply(cv.gin(img),
                                           args=cv.compile_args(
                                               cv.gapi.networks(pp)))

        # Check
        self.assertEqual(0.0, cv.norm(dnn_gender, gapi_gender, cv.NORM_INF))
        self.assertEqual(0.0, cv.norm(dnn_age, gapi_age, cv.NORM_INF))
コード例 #22
0
    def test_custom_sizeR(self):
        # x, y, h, w
        roi = (10, 15, 100, 150)

        expected = (100, 150)

        # G-API
        g_r = cv.GOpaqueT(cv.gapi.CV_RECT)
        g_sz = cv.gapi.streaming.size(g_r)
        comp = cv.GComputation(cv.GIn(g_r), cv.GOut(g_sz))

        pkg = cv.gapi_wip_kernels((custom_sizeR, 'org.opencv.streaming.sizeR'))
        actual = comp.apply(cv.gin(roi), args=cv.compile_args(pkg))

        # cv.norm works with tuples ?
        self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
コード例 #23
0
        def test_render_primitives_on_bgr_graph(self):
            expected = np.zeros(self.size, dtype=np.uint8)
            actual = np.array(expected, copy=True)

            # OpenCV
            self.render_primitives_bgr_ref(expected)

            # G-API
            g_in = cv.GMat()
            g_prims = cv.GArray.Prim()
            g_out = cv.gapi.wip.draw.render3ch(g_in, g_prims)

            comp = cv.GComputation(cv.GIn(g_in, g_prims), cv.GOut(g_out))
            actual = comp.apply(cv.gin(actual, self.prims))

            self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
コード例 #24
0
        def test_custom_op_size(self):
            sz = (100, 150, 3)
            in_mat = np.full(sz, 45, dtype=np.uint8)

            # Open_cV
            expected = (100, 150)

            # G-API
            g_in = cv.GMat()
            g_sz = GSize.on(g_in)
            comp = cv.GComputation(cv.GIn(g_in), cv.GOut(g_sz))

            pkg = cv.gapi.kernels(GSizeImpl)
            actual = comp.apply(cv.gin(in_mat), args=cv.gapi.compile_args(pkg))

            self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
コード例 #25
0
        def test_custom_op_sizeR(self):
            # x, y, h, w
            roi = (10, 15, 100, 150)

            expected = (100, 150)

            # G-API
            g_r  = cv.GOpaque.Rect()
            g_sz = GSizeR.on(g_r)
            comp = cv.GComputation(cv.GIn(g_r), cv.GOut(g_sz))

            pkg = cv.gapi.kernels(GSizeRImpl)
            actual = comp.apply(cv.gin(roi), args=cv.gapi.compile_args(pkg))

            # cv.norm works with tuples ?
            self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
コード例 #26
0
        def test_custom_op_boundingRect(self):
            points = [(0,0), (0,1), (1,0), (1,1)]

            # OpenCV
            expected = cv.boundingRect(np.array(points))

            # G-API
            g_pts = cv.GArray.Point()
            g_br  = GBoundingRect.on(g_pts)
            comp  = cv.GComputation(cv.GIn(g_pts), cv.GOut(g_br))

            pkg = cv.gapi.kernels(GBoundingRectImpl)
            actual = comp.apply(cv.gin(points), args=cv.gapi.compile_args(pkg))

            # cv.norm works with tuples ?
            self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
コード例 #27
0
    def test_custom_size(self):
        sz = (100, 150, 3)
        in_mat = np.full(sz, 45, dtype=np.uint8)

        # OpenCV
        expected = (100, 150)

        # G-API
        g_in = cv.GMat()
        g_sz = cv.gapi.streaming.size(g_in)
        comp = cv.GComputation(cv.GIn(g_in), cv.GOut(g_sz))

        pkg = cv.gapi_wip_kernels((custom_size, 'org.opencv.streaming.size'))
        actual = comp.apply(cv.gin(in_mat), args=cv.compile_args(pkg))

        self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
コード例 #28
0
    def test_split3(self):
        sz = (1280, 720, 3)
        in_mat = np.random.randint(0, 100, sz).astype(np.uint8)

        # OpenCV
        expected = cv.split(in_mat)

        # G-API
        g_in = cv.GMat()
        b, g, r = cv.gapi.split3(g_in)
        comp = cv.GComputation(cv.GIn(g_in), cv.GOut(b, g, r))

        for pkg in pkgs:
            actual = comp.apply(cv.gin(in_mat), args=cv.compile_args(pkg))
            # Comparison
            for e, a in zip(expected, actual):
                self.assertEqual(0.0, cv.norm(e, a, cv.NORM_INF))
コード例 #29
0
        def test_desync(self):
            path = self.find_file('cv/video/768x576.avi',
                                  [os.environ['OPENCV_TEST_DATA_PATH']])

            # G-API
            g_in = cv.GMat()
            g_out1 = cv.gapi.copy(g_in)
            des = cv.gapi.streaming.desync(g_in)
            g_out2 = GDelay.on(des)

            c = cv.GComputation(cv.GIn(g_in), cv.GOut(g_out1, g_out2))

            kernels = cv.gapi.kernels(GDelayImpl)
            ccomp = c.compileStreaming(args=cv.gapi.compile_args(kernels))
            source = cv.gapi.wip.make_capture_src(path)
            ccomp.setSource(cv.gin(source))
            ccomp.start()

            # Assert
            max_num_frames = 10
            proc_num_frames = 0

            out_counter = 0
            desync_out_counter = 0
            none_counter = 0
            while True:
                has_frame, (out1, out2) = ccomp.pull()
                if not has_frame:
                    break

                if not out1 is None:
                    out_counter += 1
                if not out2 is None:
                    desync_out_counter += 1
                else:
                    none_counter += 1

                proc_num_frames += 1
                if proc_num_frames == max_num_frames:
                    ccomp.stop()
                    break

            self.assertLess(0, proc_num_frames)
            self.assertLess(desync_out_counter, out_counter)
            self.assertLess(0, none_counter)
コード例 #30
0
        def test_custom_op_addC(self):
            sz = (3, 3, 3)
            in_mat = np.full(sz, 45, dtype=np.uint8)
            sc = (50, 10, 20)

            # Numpy reference, make array from sc to keep uint8 dtype.
            expected = in_mat + np.array(sc, dtype=np.uint8)

            # G-API
            g_in  = cv.GMat()
            g_sc  = cv.GScalar()
            g_out = GAddC.on(g_in, g_sc, cv.CV_8UC1)
            comp  = cv.GComputation(cv.GIn(g_in, g_sc), cv.GOut(g_out))

            pkg = cv.gapi.kernels(GAddCImpl)
            actual = comp.apply(cv.gin(in_mat, sc), args=cv.gapi.compile_args(pkg))

            self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))