Ejemplo n.º 1
0
def test_specify_type(jpg_file_path):
    with qidata.open(jpg_file_path, "w") as f:
        assert (DataType.IMAGE == f.type)
        f.type = DataType.IMAGE_2D
        assert (DataType.IMAGE_2D == f.type)
        f.type = "IMAGE_2D"
        assert (DataType.IMAGE_2D == f.type)
        with pytest.raises(TypeError):
            f.type = DataType.AUDIO

    with qidata.open(jpg_file_path, "r") as f:
        assert (DataType.IMAGE_2D == f.type)
Ejemplo n.º 2
0
def test_specialized_qidatafile(file_name, class_, datatype, valid_locs,
                                invalid_locs):
    with qidata.open(conftest.sandboxed(file_name), "r") as _f:
        assert (isinstance(_f, class_))
        assert (datatype == _f.type)

    with qidata.open(conftest.sandboxed(file_name), "w") as _f:
        a = metadata_objects.Property(key="prop", value="10")
        _f.addAnnotation("jdoe", a, None)
        for invalid_loc in invalid_locs:
            with pytest.raises(Exception) as e:
                _f.addAnnotation("jdoe", a, invalid_loc)
            assert ('Location %s is invalid' %
                    str(invalid_loc) == e.value.message)

        for valid_loc in valid_locs:
            _f.addAnnotation("jdoe", a, valid_loc)
Ejemplo n.º 3
0
    def process(self, i, o):
        cam = QiDataImage()
        with qidata.open(self.params.get("image_file").get()) as _f:
            # Retrieve image, convert it if necessary, then put it in a Cv::Mat
            # and in QiDataImage
            if ("COLOR" == self.params.get("mode").get()
                    and Colorspace("BGR") != _f.raw_data.colorspace):
                # convert
                _tmp = _f.raw_data.render().numpy_image
                colorspace = 13  # AL_code for BGR
            elif ("GRAYSCALE" == self.params.get("mode").get()
                  and Colorspace("Gray") != _f.raw_data.colorspace):
                _tmp = _f.raw_data.render().numpy_image
                _tmp = cv2.cvtColor(_tmp, cv2.COLOR_BGR2GRAY)
                colorspace = 0  # AL_code for Gray
            else:
                # no convert
                _tmp = _f.raw_data.numpy_image
                colorspace = _f.raw_data.colorspace.al_code
            cam.data.fromarray(_tmp)
            cam.colorspace = colorspace

            # Register the camera's position
            cam.tf.tx = _f.transform.translation.x
            cam.tf.ty = _f.transform.translation.y
            cam.tf.tz = _f.transform.translation.z
            cam.tf.rx = _f.transform.rotation.x
            cam.tf.ry = _f.transform.rotation.y
            cam.tf.rz = _f.transform.rotation.z
            cam.tf.rw = _f.transform.rotation.w

            # Register the image's timestamp
            cam.ts.seconds = _f.timestamp.seconds
            cam.ts.nanoseconds = _f.timestamp.nanoseconds

            # Register the calibration
            cam.camera_matrix.fromarray(
                numpy.array(_f.raw_data.camera_info.camera_matrix))
            cam.distortion_coeffs = ecto.list_of_floats(
                _f.raw_data.camera_info.distortion_coeffs)
            cam.rectification_matrix.fromarray(
                numpy.array(_f.raw_data.camera_info.rectification_matrix))
            cam.projection_matrix.fromarray(
                numpy.array(_f.raw_data.camera_info.projection_matrix))

        o.get("qidata_image").set(cam)
        return ecto.OK
Ejemplo n.º 4
0
    def examineContent(self):
        """
		Examine all dataset's files to infer content information.

		For every supported file contained in the dataset, this function will:
		 - check if a DataType is already defined and infer one from the file
		   extension if not.
		 - open the file and look for present annotations

		Once all files have been studied, remaining annotations will be updated
		with any known status that might have been present before this function
		was called.
		"""
        _annotation_content = dict()
        self._files_type = dict()
        for name in self.children:
            path = os.path.join(self._folder_path, name)
            with qidata.open(path, "r") as _f:
                for annotator, annotations in _f.annotations.iteritems():
                    for annotation_type in annotations.keys():
                        _annotation_content[(
                            annotator, annotation_type
                        )] = QiDataSet.AnnotationStatus.PARTIAL
                if not self._files_type.has_key(str(_f.type)):
                    self._files_type[str(_f.type)] = []
                self._files_type[str(_f.type)].append(name)

        for _f in self.getAllFrames():
            for annotator, annotations in _f.annotations.iteritems():
                for annotation_type in annotations.keys():
                    _annotation_content[(
                        annotator,
                        annotation_type)] = QiDataSet.AnnotationStatus.PARTIAL

        # For all discovered annotation, grab the previously known status
        # If an annotation had a status before but was not seen, it does
        # need to be kept only if it was declared as TOTAL
        for key in self._annotation_content:
            if QiDataSet.AnnotationStatus.TOTAL == self._annotation_content[
                    key]:
                _annotation_content[key] = QiDataSet.AnnotationStatus.TOTAL

        self._annotation_content = _annotation_content
Ejemplo n.º 5
0
    def openChild(self, name):
        """
		Open QiDataFile contained here

		:param name: Name of the file or folder to open
		:type name: str

		.. note::
			The opening mode used to open children is the opening mode of the
			QiDataSet itself
		"""
        path = os.path.join(self._folder_path, name)
        if not name in self.children:
            raise IOError("%s is not a child of the current dataset" % name)
        if os.path.isfile(path):
            return qidata.open(path, self.mode)
        # elif os.path.isdir(path):
        # 	return QiDataSet(path, self.mode)
        else:
            raise IOError("%s is neither a file nor a folder" % name)
Ejemplo n.º 6
0
    def process(self, i, o):
        cams = []
        cam1 = QiDataImage()
        cam2 = QiDataImage()
        with qidata.open(self.params.get("image_file").get()) as _f:
            # Retrieve image, convert it if necessary, then put it in a Cv::Mat
            # and in QiDataImage
            images = []
            if hasattr(_f.raw_data, "left_image"):
                images.append(_f.raw_data.left_image)
                images.append(_f.raw_data.right_image)
            elif hasattr(_f.raw_data, "top_image"):
                images.append(_f.raw_data.top_image)
                images.append(_f.raw_data.bottom_image)
            else:
                raise RuntimeError("Given image is not stereo")

            for img in images:
                cam = QiDataImage()

                if ("COLOR" == self.params.get("mode").get()
                        and Colorspace("BGR") != _f.raw_data.colorspace):
                    # convert
                    _tmp = img.render().numpy_image
                    colorspace = 13  # AL_code for BGR

                elif ("GRAYSCALE" == self.params.get("mode").get()
                      and Colorspace("Gray") != _f.raw_data.colorspace):
                    _tmp = img.render().numpy_image
                    _tmp = cv2.cvtColor(_tmp, cv2.COLOR_BGR2GRAY)
                    colorspace = 0  # AL_code for Gray
                else:
                    # no convert
                    _tmp = img.numpy_image
                    colorspace = _f.raw_data.colorspace.al_code
                cam.data.fromarray(_tmp)
                cam.colorspace = colorspace

                # Register the camera's position
                cam.tf.tx = _f.transform.translation.x
                cam.tf.ty = _f.transform.translation.y
                cam.tf.tz = _f.transform.translation.z
                cam.tf.rx = _f.transform.rotation.x
                cam.tf.ry = _f.transform.rotation.y
                cam.tf.rz = _f.transform.rotation.z
                cam.tf.rw = _f.transform.rotation.w

                # Register the image's timestamp
                cam.ts.seconds = _f.timestamp.seconds
                cam.ts.nanoseconds = _f.timestamp.nanoseconds

                # Register the calibration
                cam.camera_matrix.fromarray(
                    numpy.array(img.camera_info.camera_matrix))
                cam.distortion_coeffs = ecto.list_of_floats(
                    img.camera_info.distortion_coeffs)
                cam.rectification_matrix.fromarray(
                    numpy.array(img.camera_info.rectification_matrix))
                cam.projection_matrix.fromarray(
                    numpy.array(img.camera_info.projection_matrix))

                cams.append(cam)

        o.get("qidata_image_main").set(cams[0])
        o.get("qidata_image_secondary").set(cams[1])
        return ecto.OK