Exemplo n.º 1
0
class Recorder:
    def __init__(self, resize_width=300, resize_height=300):
        self.cam = Camera()

        with Camera() as cam:  # Azért kell, hogy a színt be lehessen állítani
            if 'Bayer' in cam.PixelFormat:
                cam.PixelFormat = "RGB8"
            cam.start()

        self.cam.init()
        self.cam.start()
        self.frame = cv2.cvtColor(cv2.flip(self.cam.get_array(), 0), cv2.COLOR_BGR2RGB)
        self.frame_resized = []
        self.started = False
        self.read_lock = threading.Lock()
        self.resize_width = resize_width
        self.resize_height = resize_height

    def start(self):
        if self.started:
            print('[!] Asynchronous video capturing has already been started.')
            return None
        self.started = True
        self.thread = threading.Thread(target=self.update, args=())
        self.thread.start()
        return self

    def update(self):

        while self.started:
            frame = cv2.cvtColor(cv2.flip(self.cam.get_array(), 0), cv2.COLOR_BGR2RGB)
            frame_cropped = frame[:, 160:160+960]
            with self.read_lock:
                self.frame = frame

                image = cv2.resize(frame_cropped, (self.resize_width, self.resize_height))

                image = image / 127.5
                image = image - 1.

                image = numpy.expand_dims(image, axis=0)

                self.frame_resized = image

    def read(self):
        with self.read_lock:
            #frame = self.frame.copy()
            frame_resized = self.frame_resized.copy()
        return frame_resized

    def stop(self):
        self.started = False
        self.thread.join()

    def __exit__(self, exec_type, exc_value, traceback):
        self.cam.stop()
        self.cam.close()
Exemplo n.º 2
0
class FrescoCamera:
    def __init__(self):
        self.cam = Camera()
        self.cam.init()
        self.cam.start()
        self.cam.cam.AcquisitionFrameRateAuto = 'Off'
        self.cam.cam.AcquisitionFrameRateEnabled = True

    def __delete__(self, instance):
        self.cam.stop()
        self.cam.close()

    def get_current_image(self):
        images = [self.cam.get_array() for n in range(20)]
        return images[19]
Exemplo n.º 3
0
class Camera(Instrument):
    # ? https://pypi.org/project/simple-pyspin/

    _config = configuration.from_yml(
        r'W:\Test Data Backup\instruments\config\camera.yml')
    display_name = _config.field(str)
    EXPOSURE_TIME_US = _config.field(float)
    TX_WAIT_S = 0.

    def _instrument_cleanup(self) -> None:
        self.interface.close()

    def _instrument_setup(self) -> None:
        # TODO: gain settings? AOI settings? neither are used in labview version
        self.interface = Spinnaker()
        self.interface.init()
        self.interface.PixelFormat = 'RGB8'
        self.interface.ExposureAuto = 'Off'
        self.interface.ExposureMode = 'Timed'
        self.interface.ExposureTime = self.EXPOSURE_TIME_US

    @proxy.exposed
    def capture(self):
        """
        takes 212ms including start and stop
        this camera model is specced as 7.5FPS
        """
        self.interface.start()
        img = self.interface.get_array()
        self.interface.stop()
        return img

    def _instrument_check(self) -> None:
        _ = self.interface.initialized

    def _instrument_debug(self) -> None:
        from cv2 import cv2
        cv2.imshow('debug image', self.capture())
        cv2.waitKey(0)
Exemplo n.º 4
0
                plt.ylabel('Intensity (counts)')
                plt.title('Coherent Backscatter Fit')
                fig.canvas.draw()

        fig.canvas.flush_events()

    if 'webcam' in SHOW_CAMERAS:
        _, data = webcam_socket.grab_json_data()
        if data is not None:
            frame = from_png(data['annotated'], color=True)
            cv2.imshow('Webcam', frame)

            print(timestamp, 'webcam')

    if 'plume' in SHOW_CAMERAS:
        plume_image = plume_camera.get_array()

        # Extract plume location
        cx, cy, intensity, _ = fit_image(plume_image)
        height, width = plume_image.shape
        cx = round(cx.n * width / 100)
        cy = round(cy.n * height / 100)
        plume_image = plume_image[max(cy - plume_size, 0):cy + plume_size,
                                  max(cx - plume_size, 0):cx + plume_size]
        cv2.imshow('Plume', plume_image)

        print(timestamp, 'plume')

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
Exemplo n.º 5
0
class FlirDevice(object):
    '''
    Scopefoundry compatible class to run a FLIR camera with spinnaker software
    For Pointgrey grasshopper, the bit depth is 16bit or 8bit, specified in the PixelFormat attribute, 
    simple_pyspin is not compatible with 12bit readout. 
    '''
    def __init__(self, debug=False, dummy=False):
        self.debug = debug
        self.dummy = dummy

        if not self.dummy:
            self.cam = Camera()
            self.cam.init()
            self.cam.ExposureAuto = 'Off'
            self.cam.ExposureMode = 'Timed'
            self.cam.GainAuto = 'Off'
            self.cam.AcquisitionFrameRateEnabled = True
            self.cam.AcquisitionFrameRateAuto = 'Off'
            self.cam.AutoFunctionAOIsControl = 'Off'
            self.cam.AcquisitionMode = 'Continuous'
            self.cam.ChunkEnable = False
            self.cam.EventNotification = 'Off'
            self.cam.GammaEnabled = False
            self.cam.PixelFormat = 'Mono16'  # specify here the bit depth
            self.cam.BlackLevel = 1.0  #
            self.cam.TriggerMode = 'Off'
            self.cam.OnBoardColorProcessEnabled = False

    def set_acquisitionmode(self, mode):
        self.cam.AcquisitionMode = mode

    def get_acquisitionmode(self):
        mode = self.cam.AcquisitionMode
        return (mode)

    def close(self):
        self.cam.close()

    def set_framenum(self, Nframes):
        if self.cam.AcquisitionMode == 'MultiFrame':
            self.cam.AcquisitionFrameCount = Nframes

    def read_temperature(self):
        resp = self.cam.DeviceTemperature
        return resp

    def get_width(self):
        w = self.cam.Width
        return w

    def get_height(self):
        h = self.cam.Height
        return h

    def acq_start(self):
        self.cam.start()

    def get_nparray(self):
        return self.cam.get_array()

    def acq_stop(self):
        self.cam.stop()

    def get_exposure(self):
        "get the exposure time in ms"
        return self.cam.ExposureTime / 1000

    def set_exposure(self, desired_time):
        "set the exposure time in ms"
        maxexp = self.cam.get_info('ExposureTime')['max']
        minexp = self.cam.get_info('ExposureTime')['min']
        exptime = min(maxexp, max(desired_time * 1000, minexp))
        self.cam.ExposureTime = exptime

    def get_rate(self):
        return self.cam.AcquisitionFrameRate

    def set_rate(self, desired_framerate):
        """ Set the framerate in Hz
            FLIR Grasshopper runs at:
            163.57 fps at 8bit, full frame
            87.08 fps at 12 bit, not supported by simple_pyspin
            82.47 fps at 16bit is used here
        """
        maxfr = self.cam.get_info('AcquisitionFrameRate')['max']
        minfr = self.cam.get_info('AcquisitionFrameRate')['min']
        framerate = max(minfr, min(desired_framerate, maxfr))
        self.cam.AcquisitionFrameRate = framerate

    def get_gain(self):
        return self.cam.Gain

    def set_gain(self, desired_gain):
        "set the gain in dB"
        maxgain = self.cam.get_info('Gain')['max']
        self.cam.Gain = min(desired_gain, maxgain)

    def get_idname(self):
        cam_name = self.cam.DeviceVendorName.strip(
        ) + ' ' + self.cam.DeviceModelName.strip()
        return cam_name
Exemplo n.º 6
0
    input = numpy.expand_dims(input, axis=0)
    input = imagenet_utils.preprocess_input(input, mode='tf')
    output = model.predict(input)
    # rescale prediction

    return (int(output[0, 0] * 960 + 160), int(output[0, 1] * 960))


## PROCESSING VIDEO STREAM ########
if __name__ == '__main__':

    model = keras.models.load_model('best.hdf5')

    camera = Camera()
    camera.init()
    if 'Bayer' in camera.PixelFormat:
        camera.PixelFormat = "RGB8"
    camera.start()

    while (True):
        frame = camera.get_array()
        frame = cv2.cvtColor(cv2.flip(frame, -1), cv2.COLOR_BGR2RGB)
        predicted = predict(frame, model)
        cv2.circle(frame, predicted, 5, (0, 0, 255), -1)
        cv2.imshow('prediction', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    camera.stop()
    camera.release()
Exemplo n.º 7
0
class Acquisition():
    """
    Clase para la adquisición de imágenes de la cámara
    """

    def __init__(self):
        pass

    def initCamera(self):
        """ 
        Detecta e inicializa la cámara
        """

        print('init Camera...')
        self.cam = Camera()
        self.cam.init()  
        self.configCamera()

    def configCamera(self):
        """
        Configura algunos parametros de la cámara
        """

        print('config camera...')
        self.cam.Width = self.cam.SensorWidth # 1440
        self.cam.Height = self.cam.SensorHeight # 900
        self.cam.PixelFormat = "RGB8"   
        print(self.cam.PixelFormat)

    def getRgbImage(self):
        """
        Obtiene la imagen rgb de la cámara

        return:

            rgbImage: imagen de cv2
        """

        self.cam.start()
        frame = self.cam.get_array()
        self.cam.stop()
        self.rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGRA2RGB)
        #self.rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        return self.rgbImage

    def captureRgbImage(self):
        """
        captura la imagen rgb

        return:

            rgbImageCaptured: imagen de cv2
        """

        self.rgbImageCaptured = self.rgbImage
        return self.rgbImageCaptured

    def saveRgbImage(self, pathImage):         
        """
        Guarda la imagen capturada

        paramter:

                pathImage: ruta en la que se guarda la imagen
        """

        cv2.imwrite(pathImage, self.rgbImageCaptured)