Ejemplo n.º 1
0
class TestTutorials2(unittest.TestCase):
    def test_traversable_tutorial(self):
        # Create a Harvester object:
        self.harvester = Harvester()
        
        # Add a CTI file path:
        self.harvester.add_cti_file(
            get_cti_file_path()
        )
        self.harvester.update_device_info_list()

        # Connect to the first camera in the list:
        self.ia = self.harvester.create_image_acquirer(0)

        #
        num_images_to_acquire = 0

        # Then start image acquisition:
        self.ia.start_image_acquisition()

        while num_images_to_acquire < 100:
            #
            with self.ia.fetch_buffer() as buffer:
                # self.do_something(buffer)
                pass

            num_images_to_acquire += 1

        # We don't need the ImageAcquirer object. Destroy it:
        self.ia.destroy()
Ejemplo n.º 2
0
class TestTutorials2(unittest.TestCase):
    def test_traversable_tutorial(self):
        # Create a Harvester object:
        self.harvester = Harvester()

        # The following block is just for administrative purpose;
        # you should not include it in your code:
        cti_file_path = get_cti_file_path()
        if 'TLSimu.cti' not in cti_file_path:
            return

        # Add a CTI file path:
        self.harvester.add_cti_file(cti_file_path)
        self.harvester.update_device_info_list()

        # Connect to the first camera in the list:
        self.ia = self.harvester.create_image_acquirer(0)

        #
        num_images_to_acquire = 0

        # Then start image acquisition:
        self.ia.start_image_acquisition()

        while num_images_to_acquire < 100:
            #
            with self.ia.fetch_buffer() as buffer:
                # self.do_something(buffer)
                pass

            num_images_to_acquire += 1

        # We don't need the ImageAcquirer object. Destroy it:
        self.ia.destroy()
Ejemplo n.º 3
0
def main(unused_argv):

    if not create_output_dir(
            os.path.join(flags.FLAGS.local_data_dir, IMAGE_DIR_NAME)):
        print("Cannot create output annotations directory.")
        return

    use_s3 = True if flags.FLAGS.s3_bucket_name is not None else False

    if use_s3:
        if not s3_bucket_exists(flags.FLAGS.s3_bucket_name):
            use_s3 = False
            print(
                "Bucket: %s either does not exist or you do not have access to it"
                % flags.FLAGS.s3_bucket_name)
        else:
            print("Bucket: %s exists and you have access to it" %
                  flags.FLAGS.s3_bucket_name)

    h = Harvester()
    h.add_cti_file(flags.FLAGS.gentl_producer_path)
    if len(h.cti_files) == 0:
        print("No valid cti file found at %s" %
              flags.FLAGS.gentl_producer_path)
        h.reset()
        return
    print("Currently available genTL Producer CTI files: ", h.cti_files)

    h.update_device_info_list()
    if len(h.device_info_list) == 0:
        print("No compatible devices detected.")
        h.reset()
        return

    print("Available devices List: ", h.device_info_list)
    print("Using device: ", h.device_info_list[0])

    cam = h.create_image_acquirer(list_index=0)

    apply_camera_settings(cam)

    save_queue = queue.Queue()

    save_thread = threading.Thread(target=save_images,
                                   args=(
                                       save_queue,
                                       use_s3,
                                   ))

    save_thread.start()

    acquire_images(cam, save_queue)

    save_thread.join()

    # clean up
    cam.destroy()
    h.reset()

    print("Exiting.")
Ejemplo n.º 4
0
    def open(self) -> None:
        """TBW."""
        if self._simulate:
            return
        h = Harvester()

        locs = [
            r"~/tools/mvImpact/lib/x86_64/mvGenTLProducer.cti",
            r"C:\Program Files\MATRIX VISION\mvIMPACT Acquire\bin\x64\mvGenTLProducer.cti"
        ]
        cti = ""
        for loc in locs:
            if Path(loc).expanduser().exists():
                cti = loc
        if not cti:
            raise FileNotFoundError(
                "Could not locate cti file: mvGenTLProducer.cti")

        cti = str(Path(cti).expanduser())
        h.add_file(cti)
        h.update()
        len(h.device_info_list)
        h.device_info_list[0]
        print("creating ia....")
        ia = h.create_image_acquirer(0)
        print("ia created")
        # ia = h.create_image_acquirer(serial_number='050200047485')
        # ia.remote_device.node_map.Width.value = 1024  # max: 1312
        # ia.remote_device.node_map.Height.value = 1024  # max: 1082
        # ia.remote_device.node_map.PixelFormat.value = 'Mono12'

        from harvesters.core import ImageAcquirer
        print(ImageAcquirer.Events.__members__)
        from harvesters.core import Callback

        class CallbackOnNewBuffer(Callback):
            def __init__(self, ia: ImageAcquirer):
                #
                super().__init__()
                #
                self._ia = ia

            def emit(self, context):
                # # You would implement this method by yourself.
                # with _ia.fetch_buffer() as buffer:
                #     # Work with the fetched buffer.
                #     print(buffer)
                print(datetime.utcnow(), "New image")

        on_new_buffer = CallbackOnNewBuffer(self)
        ia.add_callback(ia.Events.NEW_BUFFER_AVAILABLE, on_new_buffer)

        ia.start_acquisition(run_in_background=True)
        self._dev = h
        self._ia = ia
        self.on_new_buffer = on_new_buffer
Ejemplo n.º 5
0
def main():
    h = Harvester()
    h.add_cti_file('C:/Program Files/JAI/SDK/bin/JaiGevTL.cti')
    # JaiUSB3vTL  JaiGevTL
    print(h.cti_files)
    h.update_device_info_list()
    print("file", h.device_info_list)
    print("dty", list(dict.fromkeys(h.device_info_list)))

    # x = unique(h.device_info_list)
    # print("pp",x)
    print("devices end")
    ia = h.create_image_acquirer(0)
    print("dsssx", ia.device)
    print("infor", ia.device.node_map.device_info)
    # ia.device.node_map.Test
    # ia.device.node_mapdsx key.PixelFormat.value = 'RGB12'
    # ia.device.node_map.RGB12Packed.value = 'RGB12'
    # ia.device.node_map.has_node()
    # ia.device.node_map.TestPattern = 'HorizontalColorBar'
    # ia.acquirer.remote_device.node_map
    print(ia.is_acquiring())

    try:
        print("acqui")
        ia.start_image_acquisition()
        print("acquired", ia.is_acquiring())
        print("acqui 2", ia.fetch_buffer())
        i = 0
        done = False
        while not done:
            with ia.fetch_buffer() as buffer:
                print("checkr 1")
                img = buffer.payload.components[0].data
                img = img.reshape(buffer.payload.components[0].height,
                                  buffer.payload.components[0].width)
                # img_copy = img.copy()
                img_copy = cv2.cvtColor(img, cv2.COLOR_BayerRG2RGB)
                cv2.namedWindow("window",
                                cv2.WINDOW_KEEPRATIO | cv2.WINDOW_NORMAL)
                cv2.imshow("window", img_copy)
                fps = ia.statistics.fps
                print("FPS: ", fps)
                if cv2.waitKey(10) == ord('q'):
                    done = True
                    print('break')
                i = i + 1
    except Exception as e:
        traceback.print_exc(file=sys.stdout)
    finally:
        ia.stop_image_acquisition()
        ia.destroy()
        cv2.destroyAllWindows()
        print('fin')
        h.reset()
Ejemplo n.º 6
0
def camera_stream():
    global outputFrame, lock
    
    h = Harvester()
    h.add_cti_file('/opt/mvIMPACT_Acquire/lib/x86_64/mvGenTLProducer.cti')
    h.update_device_info_list()
    ia = h.create_image_acquirer(0)
    #ia.device.node_map.PixelFormat.value = 'BayerRG8'
    #ia.device.node_map.TestPattern = 'HorizontalColorBar'
    try:
        ia.start_image_acquisition()
        i = 0
        done = False

        while not done:
            with ia.fetch_buffer() as buffer:
                img = buffer.payload.components[0].data
                img = img.reshape(
                    buffer.payload.components[0].height, buffer.payload.components[0].width)
                img_copy = img.copy()
                img_copy = cv2.cvtColor(img, cv2.COLOR_BayerRG2RGB)

                if i == 0:
                    first = img_copy.copy()

                change = np.allclose(first, img_copy, 3)

                #print(change)
                if not change:

                    # cv2.namedWindow("window", cv2.WINDOW_KEEPRATIO | cv2.WINDOW_NORMAL)
                    # cv2.imshow("window", img_copy)
                    #cv2.imwrite(f'./images/image_{i}.png', img_copy)
                    
                    outputFrame = img_copy.copy()
                    print(outputFrame)

                first = img_copy.copy()

                if cv2.waitKey(10) == ord('q'):
                    fps = ia.statistics.fps
                    print("FPS: ", fps)
                    done = True
                    print('break')
                i = i + 1
                if i == 500:
                    break
    except Exception as e:
        traceback.print_exc(file=sys.stdout)
    finally:
        ia.stop_image_acquisition()
        ia.destroy()
        cv2.destroyAllWindows()
        print('fin')
        h.reset()
Ejemplo n.º 7
0
class HarvestersSource(Node, Observable):
    def __init__(self):
        Node.__init__(self, "Harvesters")
        Observable.__init__(self, self._on_subscribe)
        self.cfg = self.config[SECTION]
        self.harvester = Harvester()
        if self.cfg["disableInternalLogger"]:
            self.harvester._logger.setLevel(100)

        self.cti_file = self.cfg["ctiFile"]
        self.harvester.add_file(self.cti_file)
        self.logger.info(f"Loaded harvester cti file {self.cti_file}")
        self.harvester.update()
        self.logger.info(
            f"Found {len(self.harvester.device_info_list)} devices.")

    def _on_subscribe(self, observer: Observer, scheduler=None):
        self.acquirer = self.harvester.create_image_acquirer(list_index=0)
        self.reload_camera_driver()
        self.acquirer.add_callback(
            self.acquirer.Events.NEW_BUFFER_AVAILABLE,
            CallbackOnNewBuffer(self.acquirer, observer))
        self.acquirer.start_image_acquisition(run_in_background=True)
        self.logger.info("Started acquisition in background")

        def dispose():
            def _async_dispose(*args):
                # prevent join in the same thread.
                self.logger.info("Stopping image acquisition")
                self.acquirer.stop_image_acquisition()
                self.acquirer.destroy()
                self.logger.info("Stopped image acquisition")

            (scheduler or NewThreadScheduler()).schedule(_async_dispose)

        return Disposable(dispose)

    def reload_camera_driver(self):
        node = self.acquirer.device.node_map
        node.LineSelector.value = self.cfg["lineSelector"]
        node.LineMode.value = self.cfg["lineMode"]
        node.LineInverter.value = self.cfg["lineInverter"]
        node.LineSource.value = "ExposureActive"
        node.ExposureTime.value = self.cfg["exposureTime"]
        if node.has_node("acquisitionFrameRateMode"):
            # Allied Vision Camera
            node.AcquisitionFrameRateMode.value = "Basic"
        node.AcquisitionFrameRate.value = self.cfg["acquisitionFrameRate"]

        if self.cfg["extraNodes"]:
            extra_nodes: dict = self.cfg["extraNodes"]
            for k, v in extra_nodes.items():
                node.get_node(k).value = v
Ejemplo n.º 8
0
def run_camera():
    # Create a Harvester object:
    h = Harvester()

    # Load a GenTL Producer; you can load many more if you want to:   ##find producer
    h.add_file(
        'C:\Program Files\Allied Vision\Vimba_4.2\VimbaGigETL\Bin\Win64\VimbaGigETL.cti'
    )

    # Enumerate the available devices that GenTL Producers can handle:
    h.update()
    print(h.device_info_list)

    # Select a target device and create an ImageAcquire object that
    # controls the device:
    ia = h.create_image_acquirer(0)

    # Configure the target device; it looks very small but this is just
    # for demonstration:
    # ia.remote_device.node_map.Width.value = 1936
    # ia.remote_device.node_map.Height.value = 1216
    # ia.remote_device.node_map.PixelFormat.value = 'RGB8Packed'

    # Allow the ImageAcquire object to start image acquisition:
    ia.start_acquisition()

    # We are going to fetch a buffer filled up with an image:
    # Note that you'll have to queue the buffer back to the
    # ImageAcquire object once you consumed the buffer; the
    # with statement takes care of it on behalf of you:
    with ia.fetch_buffer() as buffer:
        # Let's create an alias of the 2D image component; it can be
        # lengthy which is not good for typing. In addition, note that
        # a 3D camera can give you 2 or more components:
        component = buffer.payload.components[0]

        # Reshape the NumPy array into a 2D array:
        image_rgb = component.data.reshape(component.height, component.width,
                                           3)

        image_bgr = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2BGR)

        # img_inference runs both models
        processing_time, boxes, scores, labels = img_inference(image_bgr)

    # Stop the ImageAcquier object acquiring images:
    ia.stop_acquisition()

    # We're going to leave here shortly:
    ia.destroy()
    h.reset()
Ejemplo n.º 9
0
 def run(self):
     h = Harvester()
     h.add_file(self._cti_file_path)
     h.update()
     try:
         ia = h.create_image_acquirer(0)
     except:
         # Transfer the exception anyway:
         self._message_queue.put(sys.exc_info())
     else:
         ia.start_acquisition()
         ia.stop_acquisition()
         ia.destroy()
         h.reset()
def create(observer, sc=None):
    h = Harvester()
    h.add_cti_file(
        "C:\\Program Files\\MATRIX VISION\\mvIMPACT Acquire\\bin\\x64\\mvGenTLProducer.cti"
    )
    h.update_device_info_list()
    acquirer = h.create_image_acquirer(0)
    print("create")

    def on_new_buffer_arrival():
        buffer = acquirer.fetch_buffer()
        print("buffer")
        observer.on_next(buffer)
        buffer.queue()

    def dispose():
        # scheduler.NewThreadScheduler().schedule(lambda *args: acquirer.stop_image_acquisition())
        acquirer.stop_image_acquisition()

    acquirer.on_new_buffer_arrival = on_new_buffer_arrival
    acquirer.start_image_acquisition()
    return Disposable(dispose)
Ejemplo n.º 11
0
class Camera(QtCore.QObject):
    def __init__(self, parent=None):
        super().__init__(parent)
        self.harvester = Harvester()
        self.acquirer = None

    def load_cti_from_env(self):
        if arch.is_64bits:
            env_name = "GENICAM_GENTL64_PATH"
        else:
            env_name = "GENICAM_GENTL32_PATH"

        genicam_paths = os.environ[env_name].split(os.pathsep)
        cti_files = glob.glob(os.path.join(genicam_paths, "*.cti"))

        for cti in cti_files:
            self.harvester.add_cti_file(cti)

    def reload_device(self):
        self.harvester.update_device_info_list()

    def list_devices(self, reload=False) -> DeviceInfoList:
        if reload:
            self.reload_device()
        return self.harvester.device_info_list

    def use_device(self, cam_id, cfg_file):
        devices = self.list_devices()

        self.acquirer = self.harvester.create_image_acquirer(id_=cam_id)

        parser = configparser.ConfigParser()
        parser.read(cfg_file)
        config: dict = parser["DEVICE_CONFIG"]
        for k, v in config.items():
            node_map: NodeMap = self.acquirer.device.node_map
import png



h = Harvester()

# h.add_file('/opt/cvb-13.02.002/drivers/genicam/libGevTL.cti')
# h.add_file('/opt/baumer-gapi-sdk/lib/libbgapi2_gige.cti')
h.add_file('/opt/mvIMPACT_Acquire/lib/x86_64/mvGenTLProducer.cti')

h.update()

print(h.device_info_list)

#activates cam, light green
ia = h.create_image_acquirer(0)
#print(dir(ia.remote_device.node_map))

# ia.remote_device.node_map.load_xml_from_string('/home/gus/GigE-V/xml/Teledyne DALSA/TeledyneDALSA_Nano-IMX267-304_Mono_9M-12M_40769e92_ECA18.0014.xml')

ia.remote_device.node_map.acquisitionFrameRateControlMode = 'Programmable'
ia.remote_device.node_map.AcquisitionMode.value = 'Continuous'
ia.remote_device.node_map.ExposureMode.value = 'Timed'
ia.remote_device.node_map.ExposureTime.value = 200024
ia.remote_device.node_map.AcquisitionFrameRate = 0.06
ia.remote_device.node_map.exposureAlignment = 'Synchronous'
ia.remote_device.node_map.Width = 4112
ia.remote_device.node_map.Height = 3008
ia.remote_device.node_map.autoBrightnessMode = 'Off'
ia.remote_device.node_map.devicePacketResendBufferSize = 11
ia.remote_device.node_map.DeviceLinkThroughputLimitMode	= 'On'
Ejemplo n.º 13
0
def main(unused_argv):

    use_s3 = True if flags.FLAGS.s3_bucket_name is not None else False

    if use_s3:
        if not s3_bucket_exists(flags.FLAGS.s3_bucket_name):
            use_s3 = False
            print(
                "Bucket: %s either does not exist or you do not have access to it"
                % flags.FLAGS.s3_bucket_name
            )
        else:
            print(
                "Bucket: %s exists and you have access to it"
                % flags.FLAGS.s3_bucket_name
            )

    if use_s3:
        # Get the newest model
        s3_download_highest_numbered_file(
            flags.FLAGS.s3_bucket_name,
            "/".join([flags.FLAGS.s3_data_dir, MODEL_STATE_DIR_NAME]),
            os.path.join(flags.FLAGS.local_data_dir, MODEL_STATE_DIR_NAME),
            MODEL_STATE_FILE_TYPE,
            flags.FLAGS.network,
        )

    label_file_path = os.path.join(flags.FLAGS.local_data_dir, LABEL_FILE_NAME)
    if not os.path.isfile(label_file_path):
        print("Missing file %s" % label_file_path)
        return

    # read in the category labels
    labels = open(label_file_path).read().splitlines()

    if len(labels) == 0:
        print("No label categories found in %s" % label_file_path)
        return

    # Add the background as the first class
    labels.insert(0, "background")

    print("Labels found:")
    print(labels)

    saved_model_file_path = (
        flags.FLAGS.model_path
        if flags.FLAGS.model_path is not None
        else get_newest_saved_model_path(
            os.path.join(flags.FLAGS.local_data_dir, MODEL_STATE_DIR_NAME),
            flags.FLAGS.network,
        )
    )

    if saved_model_file_path is None:
        print("No saved model state found")
        return

    h = Harvester()
    h.add_cti_file(flags.FLAGS.gentl_producer_path)
    if len(h.cti_files) == 0:
        print("No valid cti file found at %s" % flags.FLAGS.gentl_producer_path)
        h.reset()
        return
    print("Currently available genTL Producer CTI files: ", h.cti_files)

    h.update_device_info_list()
    if len(h.device_info_list) == 0:
        print("No compatible devices detected.")
        h.reset()
        return

    print("Available devices List: ", h.device_info_list)
    print("Using device: ", h.device_info_list[0])

    cam = h.create_image_acquirer(list_index=0)

    apply_camera_settings(cam)

    display_images(cam, labels, saved_model_file_path)

    # clean up
    cam.destroy()
    h.reset()

    print("Exiting.")
Ejemplo n.º 14
0
class HarvestersSource(ImageSource):
    """
    This source disregard analyzer back pressure. User has to control the frame rate.
    """
    config_prefix = "Harvesters_Source"

    def __init__(self):
        from harvesters.core import Harvester

        super().__init__()
        self.config = config.SettingAccessor(self.config_prefix)
        self.logger = logging.getLogger("console")
        self._stop = subject.Subject()
        self.driver = Harvester()
        self.driver.add_cti_file(self.config["cti_path"])
        self.acquirer = None
        self.simex_instance = None
        self.running = False
        self.scheduler = scheduler.NewThreadScheduler()
        self.scheduler.schedule(
            lambda sc, state: self.driver.update_device_info_list())

    def is_running(self):
        return self.running

    @staticmethod
    @config.DefaultSettingRegistration(config_prefix)
    def defaultSettings(configPrefix):
        config.default_settings(configPrefix, [
            config.SettingRegistry("cti_path",
                                   "/tmp/TL.cti",
                                   type="str",
                                   title="Path to .cti file"),
            config.SettingRegistry("gain", 0, type="float"),
            config.SettingRegistry(
                "invert_polarity", False, type="bool",
                title="Invert polarity"),
            config.SettingRegistry(
                "id", "", type="str", title="Camera Id (use camera_id.py)"),
            config.SettingRegistry(
                "fps", 5, type="float", title="frame per second")
        ])

    def _read_buffer(self):
        try:
            buffer: Buffer = self.acquirer.fetch_buffer(timeout=0.1)
            payload = buffer.payload
            component = payload.components[0]
            width = component.width
            height = component.height
            content = component.data.reshape(height, width)
            time = buffer.timestamp_ns
            self.next_image(
                AcquiredImage(content.copy(), time / 1e9, f"{time}.jpg"))
            buffer.queue()
        except TimeoutException as ex:
            pass
        except Exception as ex:
            self.logger.error(ex)

    def reload_camera_driver(self):
        id_ = self.config["id"]
        if not id_:
            self.acquirer = self.driver.create_image_acquirer(list_index=0)
        else:
            self.acquirer = self.driver.create_image_acquirer(id_=id)

        self.acquirer.on_new_buffer_arrival = self._read_buffer
        node = self.acquirer.device.node_map
        node.LineSelector.value = 'Line1'
        node.LineMode.value = 'Output'
        node.LineInverter.value = True
        node.LineSource.value = "ExposureActive"
        node.ExposureTime.value = 45.0
        node.AcquisitionFrameRateMode.value = "Basic"
        node.AcquisitionFrameRate.value = self.config["fps"]

    def start(self):
        super().start()

        self.reload_camera_driver()

        if self.acquirer:
            self.acquirer.start_image_acquisition()

        self.running = True

    def stop(self):
        if self.acquirer:
            self.acquirer.stop_image_acquisition()
            self.acquirer.destroy()
        self._stop.on_next(True)
        self.running = False
        super().stop()
Ejemplo n.º 15
0
class ImageAcquirer:
    # Storage for camera modules
    GigE = []

    # Maximum fetch tries until reconnect and abort
    fetchTimeout = None
    fetchSoftReboot = None
    fetchAbort = None

    cv2 = None
    np = None

    # Function runs when initializing class
    def __init__(self,
                 SetCameraLighting,
                 Config_module=None,
                 Warnings_module=None):

        # Misc
        from time import sleep
        import sys

        self.sleep = sleep
        self.warnings = Warnings_module
        self.sys = sys

        # Store custom module
        self.FileConfig = Config_module

        # Lights function
        self.SetCameraLighting = SetCameraLighting

        # GenICam helper
        from harvesters.core import Harvester, TimeoutException

        # Init harvester
        self.harvester = Harvester()
        self.TimeoutException = TimeoutException

        # Temperature
        self.criticalTemp = self.FileConfig.Get(
            "Cameras")["Generic"]["Temperature"]["Critical"]
        self.warningTemp = self.FileConfig.Get(
            "Cameras")["Generic"]["Temperature"]["Warning"]

        # Storage for camera modules
        self.n_camera = self.FileConfig.Get("QuickSettings")["ActiveCameras"]

        self.ImportCTI()  # import cti file
        self.Scan()  # check if producer is available
        self.Create()  # define image Acquirer objects from discovered devices
        self.Config()  # configure image acquirer objects

        self.ImportOpenCV()  # Create opencv module

    # Import cti file from GenTL producer
    def ImportCTI(self):
        # path to GenTL producer
        CTIPath = self.FileConfig.Get("Cameras")["Generic"]["CTIPath"]

        from os import path
        if path.isfile(CTIPath):
            self.harvester.add_file(CTIPath)
        else:
            print(
                "\nCould not find the GenTL producer for GigE\nCheck the file path given in VQuIT_config.json>Cameras>Generic>CTIPath"
            )
            self.sys.exit(1)

    def ImportOpenCV(self):
        if self.cv2 is None:
            print("Importing OpenCV")
            import cv2
            self.cv2 = cv2
        return self.cv2

    def ImportNumpy(self):
        if self.np is None:
            print("Importing Numpy")
            import numpy as np
            self.np = np
        return self.np

    # Scan for available producers
    def Scan(self):
        tries = 100
        for i in range(0, tries):
            self.harvester.update()
            foundDevices = len(self.harvester.device_info_list)
            print('Scanning for available cameras... ' + str(foundDevices) +
                  " of " + str(self.n_camera) + " (Attempt " + str(i + 1) +
                  " of " + str(tries) + ")",
                  end='\r')
            if foundDevices >= self.n_camera:
                break
            self.sleep(1)

        if len(self.harvester.device_info_list) < self.n_camera:
            print("Error: Found ", len(self.harvester.device_info_list),
                  " of ", self.n_camera, "requested producers in network")
            self.sys.exit(1)
        # print(self.harvester.device_info_list)     # Show details of connected devices

    # Create image acquirer objects
    def Create(self):
        cameraInfo = self.FileConfig.Get("Cameras")["Advanced"]
        for i in range(0, self.n_camera):
            try:
                # Create camera instances in order written in VQuIT_Config.json>Cameras>Advanced
                newIA = self.harvester.create_image_acquirer(
                    id_=cameraInfo[i]["ID"])
                self.GigE.append(newIA)
            except:
                print(
                    "Error: ID '" + str(cameraInfo[i]["ID"]) +
                    "' not found\nMake sure no other instances are connected to the cameras"
                )
                exit()

    # Configure image acquirer objects
    def Config(self):
        # Load configuration file (Use ["Description"] instead of ["Value"] to get a description of said parameter)
        qs = self.FileConfig.Get("QuickSettings")
        c = self.FileConfig.Get("Cameras")

        cameraInfo = c["Advanced"]
        imgFormat = c["Generic"]["ImageFormatControl"]
        acquisition = c["Generic"]["AcquisitionControl"]
        transport = c["Generic"]["TransportLayerControl"]
        trigger = c["Generic"]["TimedTriggered_Parameters"]
        fetchError = c["Generic"]["FetchError"]

        # Maximum fetch tries per camera
        self.fetchTimeout = fetchError["Timeout"]
        self.fetchSoftReboot = fetchError["SoftReboot"]
        self.fetchAbort = fetchError["Abort"]

        # Jumbo packets
        jumboPackets = qs["JumboPackets"]
        if jumboPackets:
            print("Jumbo packets Active\n")
            packetSize = transport["GevSCPSPacketSize"]["Value"][0]
        else:
            print("\r")
            self.warnings.warn(
                "Running script without jumbo packets can cause quality and reliability issues"
            )
            self.sleep(0.2)
            packetSize = transport["GevSCPSPacketSize"]["Value"][1]

        # Binning
        binning = qs["Binning"]
        if binning:
            print("Binning Active")
            imgWidth = int(imgFormat["Resolution"]["Width"] / 4)
            imgHeight = int(imgFormat["Resolution"]["Height"] / 4)
            binningType = imgFormat["BinningType"]["Value"][1]
        else:
            imgWidth = imgFormat["Resolution"]["Width"]
            imgHeight = imgFormat["Resolution"]["Height"]
            binningType = imgFormat["BinningType"]["Value"][0]

        # Set standard camera parameters
        for cameraID in range(0, len(self.GigE)):
            print("Setting up camera " + cameraInfo[cameraID]["Camera"] +
                  "...",
                  end="\r")
            # ImageFormatControl
            self.GigE[
                cameraID].remote_device.node_map.PixelFormat.value = imgFormat[
                    "PixelFormat"]["Value"][0]
            self.GigE[
                cameraID].remote_device.node_map.Binning.value = binningType
            self.GigE[cameraID].remote_device.node_map.ReverseX.value = False
            self.GigE[cameraID].remote_device.node_map.ReverseY.value = False

            # AcquisitionControl
            self.GigE[
                cameraID].remote_device.node_map.ExposureMode.value = acquisition[
                    "ExposureMode"]["Value"][0]

            # TransportLayerControl
            self.GigE[
                cameraID].remote_device.node_map.GevSCPSPacketSize.value = packetSize  # Stock: 1060 | recommended 8228

            # TimedTriggered parameters
            self.GigE[
                cameraID].remote_device.node_map.FrameAverage.value = trigger[
                    "FrameAverage"]["Value"]
            self.GigE[
                cameraID].remote_device.node_map.MultiExposureNumber.value = trigger[
                    "MultiExposureNumber"]["Value"]
            self.GigE[cameraID].remote_device.node_map.MultiExposureInactiveRaw.value = \
                trigger["MultiExposureInactive"][
                    "Value"]

            # Not in use
            # AcquisitionPeriod (Integration time - irrelevant when using TimedTriggered)
            # value: microseconds (min: 102775 µs @4096 x 3008 - BayerRG8 - Binning Disabled (Max frame rate 9.73 Hz) , max: 60s)

        # Set resolution
        self.SetROI(imgHeight, imgWidth)

    # Start image acquisition
    def Start(self):
        print("\nStart image acquisition\n")
        for i in range(0, len(self.GigE)):
            self.GigE[i].start_acquisition()

    # Set Region Of Interest resolution and center resulting image (very experimental)
    def SetROI(self, height, width, disableAcquisition=None):
        # Check if height and width are valid
        heightMax = 3000  # Absolute max 3008
        widthMax = 3072  # Absolute max 4096

        if height <= heightMax and width <= widthMax:
            # Increment height and width until a valid combination (for which the pixel area is dividable by 4096) is found
            # (not fool proof but work with most decent aspect ratios that increment with 100)
            self.warnings.warn(
                "Dynamic ROI is still an experimental feature and can cause errors"
            )
            rightValue = False
            heightMaxReached = False
            while not rightValue:
                # Try height and width
                if ((height * width) % 4096) == 0:
                    rightValue = True
                else:
                    # Try height + 1 and width
                    height += 1
                    if height > heightMax:
                        heightMaxReached = True
                        height = heightMax

                    if ((height * width) % 4096) == 0:
                        rightValue = True
                    else:
                        # Try height and width + 1
                        if heightMaxReached is False:
                            height -= 1

                        width += 1
                        if width > widthMax:
                            width = widthMax

                        if ((height * width) % 4096) == 0:
                            rightValue = True
                        else:
                            # Set height + 1 and width + 1 and run loop again
                            height += 1
                            if height > heightMax:
                                heightMaxReached = True
                                height = heightMax
            print("Dynamic ROI calculator result: " + str(width) + "x" +
                  str(height))

        # Change settings for all available cameras
        for cameraID in range(0, len(self.GigE)):

            # Check if requested resolution does not exceed the max for each camera
            widthMax = self.GigE[
                cameraID].remote_device.node_map.WidthMax.value
            heightMax = self.GigE[
                cameraID].remote_device.node_map.HeightMax.value
            widthMin = 512
            heightMin = 512

            # Check boundaries
            if width in range(widthMin, (widthMax + 1)) and height in range(
                    heightMin, (heightMax + 1)):

                # Image acquisition cannot be on when changing this setting
                if disableAcquisition is True:
                    self.GigE[cameraID].stop_acquisition()

                # Set width and height
                self.GigE[cameraID].remote_device.node_map.Width.value = width
                self.GigE[
                    cameraID].remote_device.node_map.Height.value = height

                # Set offsets
                offsetX = round((widthMax - width) / 2)
                offsetY = round((widthMax - width) / 2)
                self.GigE[
                    cameraID].remote_device.node_map.OffsetX.value = offsetX
                self.GigE[
                    cameraID].remote_device.node_map.OffsetY.value = offsetY

                # Turn image acquisition back on
                if disableAcquisition is True:
                    self.GigE[cameraID].start_acquisition()
            else:
                raise ValueError("Requested ROI (" + str(width) + "x" +
                                 str(height) + ") must lie between " +
                                 str(widthMin) + "x" + str(heightMin) +
                                 " and " + str(widthMax) + "x" +
                                 str(heightMax) + " for camera " +
                                 str(cameraID))

    def SetCameraConfig(self, productInfo):

        # Stop image acquisition to make changes
        self.Stop()

        # Set configuration for all cameras based on acode of product
        for cameraID in range(0, len(self.GigE)):
            if (cameraID % 2) == 0:
                # Camera number is even -> bottom camera
                cameraPosition = "BottomCameras"
            else:
                # Top camera
                cameraPosition = "TopCameras"
            cameraConfig = productInfo["Configuration"][cameraPosition]

            self.camConfig(cameraID,
                           exposure=cameraConfig["ExposureTime"],
                           gain=cameraConfig["Gain"],
                           blackLevel=cameraConfig["BlackLevel"])

        # Set ROI
        self.SetROI(2560, 2560)

        # Restart image acquisition after changes are made
        self.Start()

        # self.data_Top_Lighting = []
        # for lights in cameraConfig["Lighting"]["U"]:
        #     self.data_Top_Lighting.append(lights)
        # for lights in cameraConfig["Lighting"]["D"]:
        #     self.data_Top_Lighting.append(lights)

    # Tweak camera settings on the go
    def camConfig(self, camNr, exposure=None, gain=None, blackLevel=None):
        if exposure:
            self.GigE[
                camNr].remote_device.node_map.ExposureTimeRaw.value = exposure
        if gain:
            self.GigE[camNr].remote_device.node_map.GainRaw.value = gain
        if blackLevel:
            self.GigE[
                camNr].remote_device.node_map.BlackLevelRaw.value = blackLevel

    # Retrieve camera data
    def RequestFrame(self, camNr):
        cv2 = self.ImportOpenCV()

        # Loop process until successful
        loop = 0
        fetchImage = True
        while fetchImage:
            loop += 1
            try:
                if loop > 1 and (loop % 2) is not 0:
                    # Wait before sending new trigger every odd try that is not the first
                    self.sleep(0.5)

                # Turn on lights
                self.SetCameraLighting(camNr, 1)

                # Trigger camera
                self.GigE[
                    camNr].remote_device.node_map.TriggerSoftware.execute()

                # Wait for buffer until timeout
                print("Camera " + str(camNr) + ": Fetch buffer (try " +
                      str(loop) + ")...",
                      end='\r')
                with self.GigE[camNr].fetch_buffer(
                        timeout=self.fetchTimeout) as buffer:
                    print("Camera " + str(camNr) + ": Fetched (try " +
                          str(loop) + ")",
                          end='\r')
                    # access the image payload
                    component = buffer.payload.components[0]

                    if component is not None:
                        image = component.data.reshape(component.height,
                                                       component.width)

                        # Turn off lights
                        self.SetCameraLighting(camNr, 0)

                        # BayerRG -> RGB (Does not work proper when image is already scaled down)
                        image = cv2.cvtColor(image, cv2.COLOR_BayerRG2RGB)

                        # Transpose + flip to rotate fetched images by +-90 deg
                        image = cv2.transpose(image)
                        if camNr % 2 == 0:
                            # Flip x to rotate bottom cameras -90 deg
                            flipCode = 0
                        else:
                            # Flip y to rotate top cameras +90 deg
                            flipCode = 1
                        image = cv2.flip(image, flipCode=flipCode)

                        return image

            except self.TimeoutException:
                print("Camera " + str(camNr) + ": Fetch timeout (try " +
                      str(loop) + ")")
            except KeyboardInterrupt:
                print("Camera " + str(camNr) +
                      ": Fetch interrupted by user (try " + str(loop) + ")")
            # except:
            #     print("Camera " + str(camNr) + ": Unexpected error (try " + str(loop) + ")")

            if loop >= self.fetchSoftReboot:
                print("Camera" + str(camNr) +
                      ": Failed...trying soft reboot (try " + str(loop) + ")")
                self.SoftReboot()

            if loop >= self.fetchAbort:
                print("Check camera" + str(camNr) + ": Too manny tries (try " +
                      str(loop) + " of " + str(self.fetchAbort) + ")")
                fetchImage = False

        # Something went wrong
        return False

    # Get camera temperature
    def getTemperature(self, camNr):
        return float(
            self.GigE[camNr].remote_device.node_map.DeviceTemperatureRaw.value
            / 100)

    # Return thermal performance of the camera
    def thermalCondition(self):
        for i in range(0, self.n_camera):
            temp = self.getTemperature(i)
            if temp > self.criticalTemp:
                self.warnings.warn("Camera temperature critical")
                return "Critical"
            elif temp > self.warningTemp:
                self.warnings.warn("Camera temperature above " +
                                   str(self.warningTemp))
                return "Warning"
        return "Normal"

    # Get camera features
    def getCameraAttributes(self):
        return dir(self.GigE[0].remote_device.node_map)

    # Stop image acquisition
    def Stop(self):
        print("Stop image acquisition")
        for i in range(0, len(self.GigE)):
            self.GigE[i].stop_acquisition()

    # Stop image acquisition
    def Destroy(self):
        print("Destroy image acquire objects")
        for i in range(0, len(self.GigE)):
            self.GigE[i].destroy()

    # Reset harvester
    def Reset(self):
        self.harvester.reset()

    # Soft reboot
    def SoftReboot(self):
        self.Stop()
        self.Start()
Ejemplo n.º 16
0
    
#And now you have to update the list of remote devices; it fills up your device information list and you'll select a remote device to control from the list:

h.update()
#The following code will let you know the remote devices that you can control:

h.device_info_list
#Our friendly GenTL Producer, so called TLSimu, gives you the following information:

# [(unique_id='TLSimuMono', vendor='EMVA_D', model='TLSimuMono', tl_type='Custom', user_defined_name='Center', serial_number='SN_InterfaceA_0', version='1.2.3'),
#  (unique_id='TLSimuColor', vendor='EMVA_D', model='TLSimuColor', tl_type='Custom', user_defined_name='Center', serial_number='SN_InterfaceA_1', version='1.2.3'),
#  (unique_id='TLSimuMono', vendor='EMVA_D', model='TLSimuMono', tl_type='Custom', user_defined_name='Center', serial_number='SN_InterfaceB_0', version='1.2.3'),
#  (unique_id='TLSimuColor', vendor='EMVA_D', model='TLSimuColor', tl_type='Custom', user_defined_name='Center', serial_number='SN_InterfaceB_1', version='1.2.3')]
# And you create an image acquirer object specifying a target remote device. The image acquirer does the image acquisition task for you. In the following example it's trying to create an acquirer object of the first candidate remote device in the device information list:

ia = h.create_image_acquirer(0)
#Or equivalently:

ia = h.create_image_acquirer(list_index=0)
#You can connect the same remote device passing more unique information to the method. In the following case, we specify a serial number of the target remote device:

ia = h.create_image_acquirer(serial_number='SN_InterfaceA_0')
#You can specify a target remote device using properties that are provided through the device_info_list property of the Harvester class object. Note that it is invalid if the specifiers gives you two ore more remote devices. Please specify sufficient information so that the combination gives you a unique target remote device.

W#e named the image acquirer object ia in the above example but in a practical occasion, you may give it a purpose oriented name like ia_face_detection. Note that a camera itself does NOT acquirer/receive images but it just transmits them. In a machine vision application, there should be two roles at least: One transmits images and the other acquires them. The ImageAcquirer class objects play the latter role and it holds a camera as the remote_device object, the source of images.

#Anyway, then now we start image acquisition:

ia.start_acquisition()
#Once you started image acquisition, you should definitely want to get an image. Images are delivered to the acquirer allocated buffers. To fetch a buffer that has been filled up with an image, you can have 2 options; the first option is to use the with statement:
Ejemplo n.º 17
0
import time

from harvesters.core import Harvester


def process(ia):
    ia.fetch_buffer().queue()
    print("buffer")


if __name__ == '__main__':
    h = Harvester()
    h.add_cti_file("/opt/mvIMPACT_Acquire/lib/x86_64/mvGenTLProducer.cti")
    h.update_device_info_list()

    for i in range(10):
        ia = h.create_image_acquirer(id_="VID1AB2_PID0001_671090012")
        print(f"{i}:Created")
        ia.on_new_buffer_arrival = lambda: process(ia)
        ia.start_image_acquisition()
        print(f"{i}:started")

        time.sleep(2)

        ia.stop_image_acquisition()
        print(f"{i}:stopped")

        ia.destroy()
        print(f"{i}:Destroyed")
Ejemplo n.º 18
0
class TestTutorials2(unittest.TestCase):
    def setUp(self) -> None:
        # The following block is just for administrative purpose;
        # you should not include it in your code:
        self.cti_file_path = get_cti_file_path()
        if 'TLSimu.cti' not in self.cti_file_path:
            self.skipTest('The target is not TLSimu.')

        # Create a Harvester object:
        self.harvester = Harvester()

    def tearDown(self) -> None:
        #
        self.harvester.reset()

    def test_traversable_tutorial(self):
        # Add a CTI file path:
        self.harvester.add_file(self.cti_file_path)
        self.harvester.update()

        # Connect to the first camera in the list:
        ia = self.harvester.create_image_acquirer(0)

        #
        num_images_to_acquire = 0

        # Then start image acquisition:
        ia.start_acquisition()

        while num_images_to_acquire < 100:
            #
            with ia.fetch_buffer() as buffer:
                # self.do_something(buffer)
                pass

            num_images_to_acquire += 1

        # We don't need the ImageAcquirer object. Destroy it:
        ia.destroy()

    def test_ticket_127(self):
        #
        self.harvester.add_cti_file(self.cti_file_path)
        self.harvester.remove_cti_file(self.cti_file_path)

        #
        self.harvester.add_cti_file(self.cti_file_path)
        self.harvester.remove_cti_files()

        #
        self.harvester.add_cti_file(self.cti_file_path)
        self.assertIsNotNone(self.harvester.cti_files)

        #
        self.harvester.update_device_info_list()

        # Connect to the first camera in the list:
        ia = self.harvester.create_image_acquirer(0)

        #
        ia.start_image_acquisition()
        self.assertTrue(ia.is_acquiring_images())
        ia.stop_image_acquisition()
        self.assertFalse(ia.is_acquiring_images())
Ejemplo n.º 19
0
def generate(previewName):
    """Video streaming generator function."""
    if previewName == 'harvesters':

        h = Harvester()
        h.add_cti_file('/opt/mvIMPACT_Acquire/lib/x86_64/mvGenTLProducer.cti')
        h.update_device_info_list()
        ia = h.create_image_acquirer(0)
        ia.remote_device.node_map.ExposureTimeRaw.value = 20_000
        #ia.dremote_deviceevice.node_map.PixelFormat.value = 'BayerRG8'
        #ia.remote_device.node_map.TestPattern = 'HorizontalColorBar'
        time.sleep(1)
        try:
            ia.start_image_acquisition()
            i = 0
            done = False

            while not done:
                with ia.fetch_buffer() as buffer:
                    img = buffer.payload.components[0].data
                    img = img.reshape(buffer.payload.components[0].height,
                                      buffer.payload.components[0].width)
                    img_copy = img.copy()
                    img_copy = cv2.cvtColor(img, cv2.COLOR_BayerRG2RGB)

                    if i == 0:
                        first = img_copy.copy()

                    is_change = np.allclose(first, img_copy, 3)
                    #print(is_change)
                    if not is_change:

                        # cv2.namedWindow("window", cv2.WINDOW_KEEPRATIO | cv2.WINDOW_NORMAL)
                        # cv2.imshow("window", img_copy)
                        # cv2.imwrite(f'./images/image_{i}.png', img_copy)
                        img_copy_ = cv2.resize(img_copy, (640, 480))

                        frame = cv2.imencode('.jpg', img_copy_)[1].tobytes()
                        yield (b'--frame\r\n'
                               b'Content-Type: image/jpeg\r\n\r\n' + frame +
                               b'\r\n')

                    first = img_copy.copy()

                    if cv2.waitKey(10) == ord('q'):
                        fps = ia.statistics.fps
                        print("FPS: ", fps)
                        done = True
                        print('break')
                    i = i + 1
                    # if i == 200:
                    #     break
        except Exception as e:
            traceback.print_exc(file=sys.stdout)
        finally:
            ia.stop_image_acquisition()
            ia.destroy()
            print('fin')
            h.reset()

    else:

        cap = cv2.VideoCapture(0)

        # Read until video is completed
        while (cap.isOpened()):
            # Capture frame-by-frame
            ret, img = cap.read()
            if ret == True:
                img = cv2.resize(img, (0, 0), fx=1, fy=1)
                frame = cv2.imencode('.jpg', img)[1].tobytes()
                yield (b'--frame\r\n'
                       b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
                time.sleep(0.001)
            else:
                break
Ejemplo n.º 20
0
class Camera:
    def __init__(self):
        self.connection_exceptions = ()
        self.__harv = Harvester()
        self.__img_cap = None
        pass

    def connect(self):
        try:
            self.__harv.add_file('/usr/lib/ids/cti/ids_gevgentl.cti')
            self.__harv.update()
        except Exception as e:
            print(e)
        pass

    def disconnect(self):
        self.__img_cap.destroy()
        self.__harv.reset()
        pass

    def initialize(self):
        try:
            self.__img_cap = self.__harv.create_image_acquirer(list_index=0)
            self.__img_cap.remote_device.node_map.PixelFormat.value = 'Mono8'
        except Exception as e:
            print(e)
        pass

    def capture(self) -> str:
        try:
            # Start acquisition
            self.__img_cap.start_acquisition()

            with self.__img_cap.fetch_buffer() as buffer:
                component = buffer.payload.components[0]
                # reshape to a 2D array
                _2d = component.data.reshape(component.height, component.width)

            # Stop acquisition
            self.__img_cap.stop_acquisition()

            # SERIALIZE _2d
            _2d_bytes = pickle.dumps(_2d)
            encoded = base64.b64encode(_2d_bytes)

            print(_2d)
            print(_2d_bytes)
            print(encoded)

            # MUST RETURN A JSON STRING AS:
            image = {
                "filename": "my_image{}".format(randrange(2000)),
                "image": encoded.decode('ascii')
            }

            # To consume
            #decoded = base64.b64decode(image['image'])
            #_2d_loaded = pickle.loads(decoded)

            #print(decoded)
            #print(_2d_loaded)

            return json.dumps(image)
        except Exception as e:
            print(e)
            return None

    def configure(self, parameters):
        pass
Ejemplo n.º 21
0
class Camera_harvester(Camera_template):
    def __init__(self, producer_paths=None):
        super(Camera_harvester, self).__init__()

        self.name = "Harvester"

        ##Harvester object used to communicate with Harvester module
        self.h = Harvester()

        ##paths to GenTL producers
        self.paths = []
        if (producer_paths):
            for path in producer_paths:
                self.add_gentl_producer(path)

        ##Image acquifier object used by Harvester
        self.ia = None

    def get_camera_list(self, ):
        """!@brief Connected camera discovery
        @details Uses Harvester object to discover all connected cameras
        @return List of Dictionaries cantaining informations about cameras
        """

        self.h.update()
        self.devices_info = []
        for device in self.h.device_info_list:
            d = {
                'id_': device.id_,
                'model': device.model,
                'vendor': device.vendor
            }
            self.devices_info.append(d)
        return self.devices_info

    def select_camera(self, selected_device):
        """!@brief choose camera to connect to
        @details Select camera you will be using and set Camera object accordingly
        @param[in] selected_device ID of a camera you want to connect to
        """

        #translate selected device to index in harvester's device info list
        for index, camera in enumerate(self.devices_info):
            if camera['id_'] == selected_device:
                harvester_index = index
                break

        self.active_camera = harvester_index
        self.ia = self.h.create_image_acquirer(harvester_index)

        try:
            self.ia.remote_device.node_map.GevSCPSPacketSize.value = 1500
        except:
            pass

    def get_parameters(self,
                       feature_queue,
                       flag,
                       visibility=Config_level.Unknown):
        """!@brief Read parameters from camera
        @details Loads all available camera parameters
        @param[in] feature_queue each parameter's dictionary is put into 
            this queue
        @param[in] flag used to signal that the method finished (threading object)
        @param[in] visibility Defines level of parameters that should be put in
            the queue
        @return True if success else False
        """

        features = dir(self.ia.remote_device.node_map)

        for feature in features:
            if (feature.startswith('_')):
                continue
            try:
                feature_obj = getattr(self.ia.remote_device.node_map,
                                      feature).node
                feature = getattr(self.ia.remote_device.node_map, feature)
                #Some information is accessible through harvester feature,
                #for some information we need to go deeper into Genapi itself
                feat_acc = feature_obj.get_access_mode()
            except:
                continue

            #according to genicam standard
            #0 - not implemented
            #1 - not availablle
            #3 - write only
            #4 - read only
            #5 - read and write
            if (feat_acc == 0 or feat_acc == 1):
                continue

            feat_vis = feature_obj.visibility
            if (feat_vis < visibility):
                features_out = {}
                features_out['name'] = feature_obj.name
                #disp_name = feature.get_display_name()
                features_out['attr_name'] = feature_obj.display_name

                #Set feature's write mode
                try:
                    if (feat_acc == 5 or feat_acc == 3):
                        attr = False
                    else:
                        attr = True
                except:
                    attr = None

                features_out['attr_enabled'] = attr

                #Get feature's type if it exists
                #intfIValue = 0       #: IValue interface
                #intfIBase = 1        #: IBase interface
                #intfIInteger = 2     #: IInteger interface
                #intfIBoolean = 3     #: IBoolean interface
                #intfICommand = 4     #: ICommand interface
                #intfIFloat = 5       #: IFloat interface
                #intfIString = 6      #: IString interface
                #intfIRegister = 7    #: IRegister interface
                #intfICategory = 8    #: ICategory interface
                #intfIEnumeration = 9 #: IEnumeration interface
                #ntfIEnumEntry = 10   #: IEnumEntry interface
                #intfIPort       = 11  #: IPort interface

                try:
                    attr = feature_obj.principal_interface_type
                    if (attr == 2):
                        attr = "IntFeature"
                    elif (attr == 3):
                        attr = "BoolFeature"
                    elif (attr == 4):
                        attr = "CommandFeature"
                    elif (attr == 5):
                        attr = "FloatFeature"
                    elif (attr == 6):
                        attr = "StringFeature"
                    elif (attr == 9):
                        attr = "EnumFeature"
                    else:
                        attr = None

                except:
                    attr = None

                features_out['attr_type'] = attr
                features_out['attr_enums'] = None
                features_out['attr_cat'] = None

                #Get feature's value if it exists
                try:
                    attr = feature.value
                except:
                    attr = None

                features_out['attr_value'] = attr

                #Get feature's range if it exists
                try:
                    attr = [feature.min, feature.max]
                except:
                    attr = None

                features_out['attr_range'] = attr

                #Get feature's increment if it exists
                try:
                    attr = feature.inc
                except:
                    attr = None

                features_out['attr_increment'] = attr

                #Get feature's max length if it exists
                try:
                    attr = feature.max_length
                except:
                    attr = None

                features_out['attr_max_length'] = attr

                try:
                    attr = feature_obj.tooltip
                except:
                    attr = None

                features_out['attr_tooltip'] = attr

                feature_queue.put(features_out)
        flag.set()
        return

    def read_param_value(self, param_name):
        """!@brief Used to get value of one parameter based on its name
        @param[in] param_name Name of the parametr whose value we want to read
        @return A value of the selected parameter
        """

        try:
            val = getattr(self.ia.remote_device.node_map, param_name).value
            return val
        except:
            return None

    def set_parameter(self, parameter_name, new_value):
        """!@brief Method for setting camera's parameters
        @details Sets parameter to value defined by new_value
        @param[in] parameter_name A name of the parameter to be changed
        @param[in] new_value Variable compatible with value key in parameter
        @return True if success else returns False
        """
        try:
            getattr(self.ia.remote_device.node_map,
                    parameter_name).value = new_value
        except:
            return False

    def execute_command(self, command_feature):
        """@brief Execute command feature type
        @param[in] command_feature Name of the selected command feature
        """
        try:
            getattr(self.ia.remote_device.node_map, command_feature).execute()
        except:
            pass

    def get_single_frame(self, ):
        """!@brief Grab single frame from camera
        @return Unmodified frame from camera
        """
        self.ia.start_acquisition()

        with self.ia.fetch_buffer() as buffer:
            frame = buffer.payload.components[0]
            pixel_format = self.ia.remote_device.node_map.PixelFormat.value
            return [frame.data, pixel_format]

        self.ia.stop_acquisition()

    def load_config(self, path):
        """!@brief Load existing camera configuration
        @param[in] path Defines a path and a name of the file containing the
            configuration of the camera
        @return True if success else False
        """
        param = {}
        val = None
        attr_type = None
        with open(path, 'r') as config:
            config_dense = (line for line in config if line)
            for line in config_dense:
                line = line.rstrip('\n')
                if line.startswith('attr_value') and param:
                    val = line.split('=')

                    if (attr_type[1] == 'IntFeature'):
                        self.set_parameter(param, int(val[1]))
                    elif (attr_type[1] == 'FloatFeature'):
                        self.set_parameter(param, float(val[1]))
                    elif (attr_type[1] == 'EnumFeature'):
                        self.set_parameter(param, val[1])
                    elif (attr_type[1] == 'BoolFeature'):
                        if (val[1] == 'True'):
                            self.set_parameter(param, True)
                        else:
                            self.set_parameter(param, False)
                    elif (attr_type[1] == 'StringFeature'):
                        self.set_parameter(param, val[1])

                    val = None
                    param.clear()
                    attr_type = None
                elif line.startswith('attr_type') and param:
                    attr_type = line.split('=')

                elif line.startswith('name'):
                    param['name'] = line.split('=')[1]
                    val = None

            return True

    def save_config(self, path):
        """!@brief Saves configuration of a camera to .xml file
        @param[in] path A path where the file will be saved
        """
        #At the time of writing this code not Harvester nor genapi for Python
        #supported saving .xml config, so the format of saved data created
        #here is nonstandard and simplified for now.
        #More here https://github.com/genicam/harvesters/issues/152

        parameters = queue.Queue()
        tmp_flag = threading.Event()
        self.get_parameters(parameters, tmp_flag, Config_level.Invisible)

        with open(path, 'w') as config:
            while not parameters.empty():
                param = parameters.get_nowait()
                for key, val in param.items():
                    config.write(key + "=" + str(val) + '\n')
                config.write('\n')

    def _frame_producer(self):
        """!@brief Gets frames from camera while continuous acquisition is active
        @details Loads frames from camera as they come and stores them
            in a frame queue for consumer thread to process. The thread 
            runs until stream_stop_switch is set
        """
        self.ia.start_acquisition()

        while (not self._stream_stop_switch.is_set()):
            with self.ia.fetch_buffer() as buffer:
                frame = buffer.payload.components[0]
                pixel_format = self.ia.remote_device.node_map.PixelFormat.value
                global_queue.frame_queue[self.cam_id].put_nowait(
                    [frame.data.copy(), pixel_format])
                #data should contain numpy array which should be compatible
                #with opencv image ig not do some conversion here
        self.ia.stop_acquisition()

    def disconnect_camera(self):
        """!@brief Disconnect camera and restores the object to its initial state"""

        self.stop_recording()
        self.disconnect_harvester()
        global_queue.remove_frame_queue(self.cam_id)
        self.__init__()

#______________Unique methods___________________

    def disconnect_harvester(self, ):
        """!@brief Destroys harvester object so other APIs can access cameras
        """
        self.h.reset()

    def add_gentl_producer(self, producer_path):
        """!@brief Add a new frame producer to the harvester object
        @details Adds .cti file specified by producer_path to the harvester object
        @param[in] producer_path Path to a .cti file
        @return list of all active producers
        """
        if (not producer_path in self.paths):
            if (producer_path.endswith(".cti")):
                self.h.add_file(producer_path)
                self.paths.append(producer_path)

        return self.paths

    def remove_gentl_producer(self, producer_path):
        """!@brief Remove existing frame producer from the harvester object
        @details Removes .cti file specified by producer_path from the harvester object
        @param[in] producer_path Path to a .cti file
        @return tuple of a list of remaining gentl producers and boolean value signaling whether the removal was succesful
        """
        if (producer_path in self.paths):
            self.paths.remove(producer_path)
            self.h.remove_file(producer_path)
            return (self.paths, True)
        else:
            return (None, False)

    def get_gentl_producers(self):
        """!@brief Used to get a list of all path used by Harvesters in a
        present moment
        @return List of defined cti file paths
        """
        return self.paths