Exemplo n.º 1
0
    def acquisition_oneshot(self):
        """Concrete implementation of :meth:`pymanip.video.Camera.acquisition_oneshot` for the AVT camera.
        """
        self.camera.AcquisitionMode = "SingleFrame"
        self.frame = self.camera.getFrame()
        self.frame.announceFrame()
        self.camera.startCapture()
        try:
            self.frame.queueFrameCapture()
            self.camera.runFeatureCommand("AcquisitionStart")
            self.camera.runFeatureCommand("AcquisitionStop")
            self.frame.waitFrameCapture()
            # print('timestamp =', self.frame.timestamp)
            # print('pixel_bytes =', self.frame.pixel_bytes)
            if self.frame.pixel_bytes == 1:
                dt = np.uint8
            elif self.frame.pixel_bytes == 2:
                dt = np.uint16
            else:
                raise NotImplementedError
            img = MetadataArray(
                np.ndarray(
                    buffer=self.frame.getBufferByteData(),
                    dtype=dt,
                    shape=(self.frame.height, self.frame.width),
                ).copy(),
                metadata={"timestamp": self.frame.timestamp * 1e-7},
            )

        finally:
            self.camera.endCapture()
            self.camera.revokeAllFrames()

        return img
Exemplo n.º 2
0
    def acquisition_oneshot(self, timeout=1.0):
        """
        Simple one shot image grabbing.
        Returns an autonomous numpy array
        """

        # Make sure no acquisition is running & flush
        if self.CameraAcquiring.getValue():
            self.AcquisitionStop()
        SDK3.Flush(self.handle)
        self.MetadataEnable.setValue(True)
        pc_clock = time.time()
        timestamp_clock = self.TimestampClock.getValue()
        timestamp_frequency = self.TimestampClockFrequency.getValue()

        # Init buffer & queue
        bufSize = self.ImageSizeBytes.getValue()
        buf = np.empty(bufSize, "uint8")
        SDK3.QueueBuffer(self.handle,
                         buf.ctypes.data_as(SDK3.POINTER(SDK3.AT_U8)),
                         buf.nbytes)

        # Start acquisition
        self.AcquisitionStart()
        print("Start acquisition at framerate:", self.FrameRate.getValue())

        try:
            # Wait for buffer
            exposure_ms = self.ExposureTime.getValue() * 1000
            framerate_ms = 1000 / self.FrameRate.getValue()
            timeout_ms = int(max((2 * exposure_ms, 2 * framerate_ms, 1000)))

            pData, lData = SDK3.WaitBuffer(self.handle, timeout_ms)

            # Convert buffer into numpy image
            rbuf, cbuf = self.AOIWidth.getValue(), self.AOIHeight.getValue()
            img = np.empty((rbuf, cbuf), np.uint16)
            xs, ys = img.shape[:2]
            a_s = self.AOIStride.getValue()
            dt = self.PixelEncoding.getString()
            ticks = parse_metadata(buf)
            ts = (ticks - timestamp_clock) / timestamp_frequency + pc_clock
            SDK3.ConvertBuffer(
                buf.ctypes.data_as(ctypes.POINTER(ctypes.c_uint8)),
                img.ctypes.data_as(ctypes.POINTER(ctypes.c_uint8)),
                xs,
                ys,
                a_s,
                dt,
                "Mono16",
            )
        finally:
            self.AcquisitionStop()

        return MetadataArray(img.reshape((cbuf, rbuf), order="C"),
                             metadata={"timestamp": ts})
Exemplo n.º 3
0
    async def acquisition_async(
        self,
        num=np.inf,
        timeout=None,
        raw=False,
        initialising_cams=None,
        raise_on_timeout=True,
    ):
        """Concrete implementation of :meth:`pymanip.video.Camera.acquisition_async` for the Ximea camera.

        timeout in milliseconds.
        """
        loop = asyncio.get_event_loop()
        if timeout is None:
            timeout = max((5000, 5 * self.get_exposure_time() * 1000))
        img = xiapi.Image()
        self.cam.start_acquisition()
        try:
            count = 0
            while count < num:
                if (count == 0 and initialising_cams is not None
                        and self in initialising_cams):
                    initialising_cams.remove(self)
                try:
                    await self.get_image(loop, img, timeout)
                except CameraTimeout:
                    if raise_on_timeout:
                        raise
                    else:
                        stop_signal = yield None
                        if stop_signal:
                            break
                        else:
                            continue
                stop_signal = yield MetadataArray(
                    img.get_image_data_numpy(),  # no copy
                    metadata={
                        "counter": count,
                        "timestamp": img.tsSec + img.tsUSec * 1e-6,
                    },
                )
                count += 1
                if stop_signal:
                    break
        finally:
            self.cam.stop_acquisition()
        if stop_signal:
            yield True
Exemplo n.º 4
0
    async def acquisition_async(
        self,
        num=np.inf,
        timeout=1000,
        raw=False,
        framerate=None,
        external_trigger=False,
        initialising_cams=None,
        raise_on_timeout=True,
    ):
        """Concrete implementation of :meth:`pymanip.video.Camera.acquisition_async` for the AVT camera.
        """
        self.camera.AcquisitionMode = "Continuous"
        if framerate is not None:
            # Not usable if HighSNRIImages>0, external triggering or
            # IIDCPacketSizeAuto are active
            self.camera.AcquisitionFrameRate = framerate
        if external_trigger:
            self.camera.TriggerMode = "On"
            self.camera.TriggerSource = "InputLines"
        self.frame = self.camera.getFrame()
        self.frame.announceFrame()
        self.camera.startCapture()
        self.camera.runFeatureCommand("AcquisitionStart")
        self.buffer_queued = False
        try:
            count = 0
            while count < num:
                if not self.buffer_queued:
                    self.frame.queueFrameCapture()
                    self.buffer_queued = True
                if (count == 0 and initialising_cams is not None
                        and self in initialising_cams):
                    initialising_cams.remove(self)
                errorCode = await self.frame.waitFrameCapture_async(
                    int(timeout))
                if errorCode == -12:
                    if raise_on_timeout:
                        raise CameraTimeout("cam" + str(self.num) + " timeout")
                    else:
                        stop_signal = yield None
                        if stop_signal:
                            break
                        else:
                            continue
                elif errorCode != 0:
                    raise VimbaException(errorCode)
                if self.frame.pixel_bytes == 1:
                    dt = np.uint8
                elif self.frame.pixel_bytes == 2:
                    dt = np.uint16
                else:
                    raise NotImplementedError
                self.buffer_queued = False
                stop_signal = yield MetadataArray(
                    np.ndarray(
                        buffer=self.frame.getBufferByteData(),
                        dtype=dt,
                        shape=(self.frame.height, self.frame.width),
                    ),
                    metadata={
                        "counter": count,
                        "timestamp": self.frame.timestamp * 1e-7,
                    },
                )
                count += 1
                if stop_signal:
                    break

        finally:
            self.camera.runFeatureCommand("AcquisitionStop")
            self.camera.endCapture()
            self.camera.revokeAllFrames()
        if stop_signal:
            yield True
Exemplo n.º 5
0
    async def acquisition_async(
        self,
        num=np.inf,
        timeout=None,
        raw=False,
        initialising_cams=None,
        raise_on_timeout=True,
    ):
        """Concrete implementation of :meth:`pymanip.video.Camera.acquisition_async` for the Andor camera.

        .. todo::
            add support for initialising_cams
        """

        loop = asyncio.get_event_loop()

        # Make sure no acquisition is running & flush
        if self.CameraAcquiring.getValue():
            self.AcquisitionStop()
        SDK3.Flush(self.handle)
        self.buffer_queued = False

        # Set acquisition mode
        self.CycleMode.setString("Continuous")
        # self.FrameRate.setValue(float(framerate))
        self.MetadataEnable.setValue(True)
        pc_clock = time.time()
        timestamp_clock = self.TimestampClock.getValue()
        timestamp_frequency = self.TimestampClockFrequency.getValue()
        print("ts clock =", timestamp_clock)
        print("ts freq =", timestamp_frequency)

        # Init buffers
        bufSize = self.ImageSizeBytes.getValue()
        buf = np.empty(bufSize, "uint8")
        rbuf, cbuf = self.AOIWidth.getValue(), self.AOIHeight.getValue()
        img = np.empty((rbuf, cbuf), np.uint16)
        xs, ys = img.shape[:2]
        a_s = self.AOIStride.getValue()
        dt = self.PixelEncoding.getString()
        print("Original pixel encoding:", dt)

        # Start acquisition
        self.AcquisitionStart()
        print("Started acquisition at framerate:", self.FrameRate.getValue())
        print("Exposure time is {:.1f} ms".format(self.ExposureTime.getValue() * 1000))
        if timeout is None:
            exposure_ms = self.ExposureTime.getValue() * 1000
            framerate_ms = 1000 / self.FrameRate.getValue()
            timeout_ms = int(max((2 * exposure_ms, 2 * framerate_ms, 1000)))
            timeout = timeout_ms

        try:
            count = 0
            while count < num:
                if not self.buffer_queued:
                    SDK3.QueueBuffer(
                        self.handle,
                        buf.ctypes.data_as(SDK3.POINTER(SDK3.AT_U8)),
                        buf.nbytes,
                    )
                    self.buffer_queued = True
                try:
                    pData, lData = await loop.run_in_executor(
                        None, SDK3.WaitBuffer, self.handle, timeout
                    )
                except Exception:
                    if raise_on_timeout:
                        raise CameraTimeout()
                    else:
                        stop_signal = yield None
                        if stop_signal:
                            break
                        else:
                            continue
                # Convert buffer and yield image
                SDK3.ConvertBuffer(
                    buf.ctypes.data_as(ctypes.POINTER(ctypes.c_uint8)),
                    img.ctypes.data_as(ctypes.POINTER(ctypes.c_uint8)),
                    xs,
                    ys,
                    a_s,
                    dt,
                    "Mono16",
                )
                ticks = parse_metadata(buf)
                ts = (ticks - timestamp_clock) / timestamp_frequency + pc_clock
                # if count == 5:
                #    print('image min max:', np.min(img), np.max(img))
                # if count < 10:
                #    print('FPGA ticks =', ticks)
                #    print('Timestamp =', ts)
                self.buffer_queued = False
                stop_signal = yield MetadataArray(
                    img.reshape((cbuf, rbuf), order="C"),
                    metadata={"counter": count, "timestamp": ts},
                )
                count = count + 1
                if stop_signal:
                    break

        finally:
            self.AcquisitionStop()
        if stop_signal:
            yield True
Exemplo n.º 6
0
    async def acquisition_async(
        self,
        num=np.inf,
        timeout=None,
        raw=False,
        initialising_cams=None,
        raise_on_timeout=True,
    ):
        """
        Multiple image acquisition
        yields a shared memory numpy array valid only
        before generator object cleanup.

        timeout in milliseconds
        """

        loop = asyncio.get_event_loop()

        if timeout is None:
            delay, exposure = self.current_delay_exposure_time()
            timeout = int(max((2000 * exposure, 1000)))

        # Arm camera
        if pf.PCO_GetRecordingState(self.handle):
            pf.PCO_SetRecordingState(self.handle, False)
        pf.PCO_ArmCamera(self.handle)
        warn, err, status = self.health_status()
        if err != 0:
            raise RuntimeError("Camera has error status!")
        XResAct, YResAct, XResMax, YResMax = pf.PCO_GetSizes(self.handle)

        with PCO_Buffer(self.handle, XResAct, YResAct) as buf1, PCO_Buffer(
                self.handle, XResAct, YResAct) as buf2, PCO_Buffer(
                    self.handle, XResAct,
                    YResAct) as buf3, PCO_Buffer(self.handle, XResAct,
                                                 YResAct) as buf4:

            buffers = (buf1, buf2, buf3, buf4)
            try:
                pf.PCO_SetImageParameters(
                    self.handle,
                    XResAct,
                    YResAct,
                    pf.IMAGEPARAMETERS_READ_WHILE_RECORDING,
                )
                pf.PCO_SetRecordingState(self.handle, True)
                for buffer in buffers:
                    pf.PCO_AddBufferEx(self.handle, 0, 0, buffer.bufNr,
                                       XResAct, YResAct, 16)
                count = 0
                buffer_ring = itertools.cycle(buffers)
                while count < num:
                    if (count == 0 and initialising_cams is not None
                            and self in initialising_cams):
                        initialising_cams.remove(self)

                    waitstat = await loop.run_in_executor(
                        None,
                        win32event.WaitForMultipleObjects,
                        [buffer.event_handle for buffer in buffers],
                        0,
                        timeout,
                    )
                    if waitstat == win32event.WAIT_TIMEOUT:
                        if raise_on_timeout:
                            raise CameraTimeout(f"Timeout ({timeout:})")
                        else:
                            stop_signal = yield None
                            if not stop_signal:
                                continue
                            else:
                                break
                    for ii, buffer in zip(range(4), buffer_ring):
                        waitstat = await loop.run_in_executor(
                            None, win32event.WaitForSingleObject,
                            buffer.event_handle, 0)
                        if waitstat == win32event.WAIT_OBJECT_0:
                            win32event.ResetEvent(buffer.event_handle)
                            statusDLL, statusDrv = pf.PCO_GetBufferStatus(
                                self.handle, buffer.bufNr)
                            if statusDrv != 0:
                                raise RuntimeError(
                                    "buffer {:} error status {:}".format(
                                        buffer.bufNr, statusDrv))
                            if raw:
                                data = {"buffer": buffer.bytes()}
                                if self.timestamp_mode:
                                    counter, dt = PCO_get_binary_timestamp(
                                        buffer.bufPtr[:14])
                                    data["counter"] = counter
                                    data["timestamp"] = dt
                                stop_signal = yield data
                            else:
                                if self.metadata_mode:
                                    metadata = pf.PCO_GetMetaData(
                                        self.handle, buffer.bufNr)
                                    stop_signal = yield MetadataArray(
                                        buffer.as_array(), metadata=metadata)
                                elif self.timestamp_mode:
                                    counter, dt = PCO_get_binary_timestamp(
                                        buffer.bufPtr[:14])
                                    stop_signal = yield MetadataArray(
                                        buffer.as_array(),
                                        metadata={
                                            "counter": counter,
                                            "timestamp": dt
                                        },
                                    )
                                else:
                                    stop_signal = yield buffer.as_array()
                            count += 1
                            pf.PCO_AddBufferEx(self.handle, 0, 0, buffer.bufNr,
                                               XResAct, YResAct, 16)
                        else:
                            break
                        if stop_signal:
                            break
                    if stop_signal:
                        break
            finally:
                pf.PCO_SetRecordingState(self.handle, False)
                pf.PCO_CancelImages(self.handle)
        if stop_signal:
            yield True
Exemplo n.º 7
0
    async def acquisition_async(
        self,
        num=np.inf,
        timeout=1000,
        raw=False,
        initialising_cams=None,
        raise_on_timeout=True,
    ):
        """Concrete implementation
        """
        loop = asyncio.get_event_loop()

        nRet = ueye.is_CaptureVideo(self.hCam, ueye.IS_DONT_WAIT)
        if nRet != ueye.IS_SUCCESS:
            raise RuntimeError("is_CaptureVideo ERROR")

        try:
            nRet = ueye.is_InquireImageMem(
                self.hCam,
                self.pcImageMemory,
                self.MemID,
                self.width,
                self.height,
                self.nBitsPerPixel,
                self.pitch,
            )
            image_info = ueye.UEYEIMAGEINFO()
            if nRet != ueye.IS_SUCCESS:
                raise RuntimeError("is_InquireImageMem ERROR")

            count = 0
            while count < num:
                nRet = ueye.is_EnableEvent(self.hCam, ueye.IS_SET_EVENT_FRAME)
                if nRet != ueye.IS_SUCCESS:
                    raise RuntimeError("is_EnableEvent ERROR")
                nRet = await loop.run_in_executor(None, ueye.is_WaitEvent,
                                                  self.hCam,
                                                  ueye.IS_SET_EVENT_FRAME,
                                                  timeout)
                if nRet == ueye.IS_TIMED_OUT:
                    if raise_on_timeout:
                        raise RuntimeError("Timeout")
                    else:
                        stop_signal = yield None
                        if stop_signal:
                            break
                        else:
                            continue
                elif nRet != ueye.IS_SUCCESS:
                    raise RuntimeError("is_WaitEvent ERROR")
                array = ueye.get_data(
                    self.pcImageMemory,
                    self.width,
                    self.height,
                    self.nBitsPerPixel,
                    self.pitch,
                    copy=False,
                )

                nRet = ueye.is_GetImageInfo(self.hCam, self.MemID, image_info,
                                            ctypes.sizeof(image_info))
                if nRet != ueye.IS_SUCCESS:
                    raise RuntimeError("is_GetImageInfo ERROR")

                count = count + 1
                ts = datetime(
                    image_info.TimestampSystem.wYear.value,
                    image_info.TimestampSystem.wMonth.value,
                    image_info.TimestampSystem.wDay.value,
                    image_info.TimestampSystem.wHour.value,
                    image_info.TimestampSystem.wMinute.value,
                    image_info.TimestampSystem.wSecond.value,
                    image_info.TimestampSystem.wMilliseconds * 1000,
                )
                stop_signal = yield MetadataArray(
                    array.reshape((self.height.value, self.width.value)),
                    metadata={
                        "counter": count,
                        "timestamp": ts.timestamp()
                    },
                )
                if stop_signal:
                    break

        finally:
            nRet = ueye.is_StopLiveVideo(self.hCam, ueye.IS_DONT_WAIT)
            if nRet != ueye.IS_SUCCESS:
                raise RuntimeError("is_StopLiveVideo ERROR")
            nRet = ueye.is_DisableEvent(self.hCam, ueye.IS_SET_EVENT_FRAME)
            if nRet != ueye.IS_SUCCESS:
                raise RuntimeError("is_DisableEvent ERROR")
        if stop_signal:
            yield True
Exemplo n.º 8
0
    async def main(
        self,
        keep_in_RAM=False,
        additionnal_trig=0,
        live=False,
        unprocessed=False,
        delay_save=False,
        no_save=False,
    ):
        """Main entry point for acquisition tasks. This asynchronous task can be called
        with :func:`asyncio.run`, or combined with other user-defined tasks.

        :param keep_in_RAM: do not save to disk, but keep images in a list
        :type keep_in_RAM: bool
        :param additionnal_trig: additionnal number of pulses sent to the camera
        :type additionnal_trig: int
        :param live: toggle live preview
        :type live: bool
        :param unprocessed: do not call :meth:`process_image` method.
        :type unprocessed: bool
        :return: camera_timestamps, camera_counter
        :rtype: :class:`numpy.ndarray`, :class:`numpy.ndarray`
        """
        if self.trigger_gbf is not None:
            with self.trigger_gbf:
                if live or self.nframes < 2 or not self.burst_mode:
                    self.trigger_gbf.configure_square(0.0,
                                                      5.0,
                                                      freq=self.framerate)
                else:
                    self.trigger_gbf.configure_burst(
                        self.framerate, self.nframes + additionnal_trig)
                    self.save_parameter(fps=self.framerate,
                                        num_imgs=self.nframes)

        if delay_save and all([
                hasattr(self.camera_list[cam_no], "fast_acquisition_to_ram")
                for cam_no in range(len(self.camera_list))
        ]):
            if live:
                raise ValueError("delay_save and live are not compatible")
            print("Acquisition in fast mode to RAM")

            # Acquisition to RAM
            total_timeout_s = 5 + (self.nframes / self.framerate)
            acquisition_tasks = [
                self._fast_acquisition_to_ram(cam_no, total_timeout_s)
                for cam_no in range(len(self.camera_list))
            ]
            dt_start = datetime.now()
            dt_end = dt_start + timedelta(seconds=total_timeout_s)
            print("Acquisition start time:",
                  dt_start.strftime("%d-%m-%Y %H:%M:%S"))
            print("              end time:",
                  dt_end.strftime("%d-%m-%Y %H:%M:%S"))
            results = await asyncio.gather(*acquisition_tasks,
                                           self._start_clock())
            self.running = False

            # Convert from lists to queues (no data copy involved)
            for cam_no, (ts_all, count_all,
                         images_all) in zip(range(len(self.camera_list)),
                                            results):
                for ts, count, image in zip(ts_all, count_all, images_all):
                    self.image_queues[cam_no].put(
                        MetadataArray(
                            image,
                            metadata={
                                "counter": count,
                                "timestamp": ts,
                            },
                        ))
        else:
            acquisition_tasks = [
                self._acquire_images(cam_no)
                for cam_no in range(len(self.camera_list))
            ]
            if live:
                save_tasks = [self._live_preview(unprocessed)]
            elif self.output_format == "mp4":
                save_tasks = [self._start_clock()]
                if not delay_save:
                    save_tasks = save_tasks + [
                        self._save_video(cam_no, unprocessed=unprocessed)
                        for cam_no in range(len(self.camera_list))
                    ]

            else:
                if self.trigger_gbf is not None and self.burst_mode:
                    save_tasks = [self._start_clock()]
                else:
                    save_tasks = []
                if not delay_save:
                    save_tasks = save_tasks + [
                        self._save_images(keep_in_RAM, unprocessed)
                    ]

            await self.monitor(*acquisition_tasks,
                               *save_tasks,
                               server_port=None)

        # Post-acquisition save if the save_tasks were not included (in fast mode, or in regular mode)
        if delay_save:
            if self.output_format == "mp4":
                for cam_no in range(len(self.camera_list)):
                    await self._save_video(cam_no, unprocessed=unprocessed)
            else:
                await self._save_images(keep_in_RAM, unprocessed, no_save)

        # Post-acquisition information
        _, camera_timestamps = self.logged_variable("ts")
        _, camera_counter = self.logged_variable("count")

        if isinstance(camera_timestamps[0], str):
            camera_timestamps = np.array([
                datetime.fromisoformat(ts).timestamp()
                for ts in camera_timestamps
            ])

        if camera_timestamps.size > 2:
            dt = camera_timestamps[1:] - camera_timestamps[:-1]
            mean_dt = np.mean(dt)
            mean_fps = 1.0 / mean_dt
            min_dt = np.min(dt)
            max_dt = np.max(dt)
            min_fps = 1.0 / max_dt
            max_fps = 1.0 / min_dt
            print(
                f"fps = {mean_fps:.3f} (between {min_fps:.3f} and {max_fps:.3f})"
            )

        return camera_timestamps, camera_counter
Exemplo n.º 9
0
    async def acquisition_async(
        self,
        num=np.inf,
        timeout=None,
        raw=False,
        initialising_cams=None,
        raise_on_timeout=True,
    ):
        """Concrete implementation of :meth:`pymanip.video.Camera.acquisition_async` for the Photometrics camera.

        timeout in milliseconds.
        """
        loop = asyncio.get_event_loop()
        if timeout is None:
            timeout = max((5000, 5 * self.get_exposure_time() * 1000))
        try:
            count = 0
            """
            if np.isfinite(num):
                self.cam.start_seq(num_frames=num)
            else:
                self.cam.start_live()
            """
            self.cam.start_live()
            while count < num:
                if (count == 0 and initialising_cams is not None
                        and self in initialising_cams):
                    initialising_cams.remove(self)
                try:
                    frame, fps, frame_count = await self.get_image(
                        loop, timeout / 1000)
                except CameraTimeout:
                    print("Camera timeout")
                    if raise_on_timeout:
                        raise
                    else:
                        stop_signal = yield None
                        if stop_signal:
                            break
                        else:
                            continue
                # d'après la doc, timestampBOF*timestampResN est en nanoseconds
                # mais il n'y a pas timestampResN, et il semble que c'est plutôt en picoseconds ?
                stop_signal = yield MetadataArray(
                    frame["pixel_data"],  # no copy
                    metadata={
                        "counter":
                        frame_count,
                        "timestamp":
                        frame["meta_data"]["frame_header"]["timestampBOF"] /
                        1e12,
                    },
                )
                if count == 0:
                    for k, v in frame["meta_data"]["frame_header"].items():
                        print(k, v)
                    print("roi_headers", frame["meta_data"]["roi_headers"])
                count += 1
                if stop_signal:
                    break
        finally:
            self.cam.finish()
        if stop_signal:
            yield True