コード例 #1
0
    def Set_Background(self):
        #self.bg_raw = ueye.get_data(self.mem_ptr, self.width, self.height, self.bitspixel, self.lineinc, copy=False)
        #time.sleep(0.5)
        #self.bg = np.reshape(self.bg_raw, (self.height, self.width, 1)).astype(np.float64)

        kk = 0
        while (kk <= self.Img_Sum):
            kk = kk + 1

            if kk == 1:
                self.bg = (ueye.get_data(self.mem_ptr,
                                         self.width,
                                         self.height,
                                         self.bitspixel,
                                         self.lineinc,
                                         copy=True)).astype(np.float64)
            else:
                self.bg0 = (ueye.get_data(self.mem_ptr,
                                          self.width,
                                          self.height,
                                          self.bitspixel,
                                          self.lineinc,
                                          copy=True)).astype(np.float64)
                self.bg = self.bg * (kk - 1) / kk + self.bg0 / kk
                del self.bg0

            time.sleep(1 / self.fps)

        self.bg = np.reshape(self.bg,
                             (self.height, self.width, 1)).astype(np.float64)
コード例 #2
0
def cameraNewFrame(frame, hCam):
    """Retrieve a new frame from the camera"""
    # constants
    ret = 1
    cnt = 0
    while ret and cnt <= 100:
        cnt += 1
        ret = ueye.is_WaitForNextImage(hCam,
                                       timeOutMS,
                                       mBuff.mem_ptr,
                                       mBuff.mem_id)
        rr = ueye.is_GetActSeqBuf(hCam, buffCurrent.mem_id, buffLast.mem_ptr, buffLast.mem_ptr)
    if (not ret):
        logger.debug(f"ret = {ret}, copying data over to numpy array")
        fwidth, fheight = frame.shape[1::-1]

        array = ueye.get_data(mBuff.mem_ptr, fwidth, fheight, bpp, pitch, copy=True)
        arrayrs = np.reshape(array, (len(array) // fwidth, fwidth))
        cwidth, cheight = arrayrs.shape[1::-1]
        ueye.is_UnlockSeqBuf(hCam, mBuff.mem_id, mBuff.mem_ptr)

        # bytes_per_pixel = int(nBitsPerPixel / 8)
        # ...reshape it in an numpy array...
        # Fill existing buffer with new data
        # frame = np.reshape(array,(height, width, bytes_per_pixel))

        # frame[:] = np.reshape(array,(height, width))
        frame[:fheight, :fwidth] = arrayrs[:fheight, :fwidth]

        return ret, frame
    else:
        logger.error("Reading error with new frame ")
        return ret, frame
コード例 #3
0
    def read(self):
        # Continuous image display
        if self.nRet == ueye.IS_SUCCESS:

            # In order to display the image in an OpenCV window we need to...
            # ...extract the data of our image memory
            array = ueye.get_data(self.pcImageMemory,
                                  self.width,
                                  self.height,
                                  self.nBitsPerPixel,
                                  self.pitch,
                                  copy=False)

            # self.bytes_per_pixel = int(self.nBitsPerPixel / 8)

            # ...reshape it in an numpy array...
            frame = np.reshape(
                array,
                (self.height.value, self.width.value, self.bytes_per_pixel))

            # ...resize the image by a half
            # frame = cv2.resize( frame, (0, 0), fx=0.5, fy=0.5 )
            return True, frame
        else:
            return False, None
コード例 #4
0
    def getCurrentFrame(self):
        nRet = ueye.is_FreezeVideo(self.hCam, ueye.IS_WAIT)
        if nRet != ueye.IS_SUCCESS:
            print("is_GetCameraInfo ERROR")
        # Enables the queue mode for existing image memory sequences
        nRet = ueye.is_InquireImageMem(self.hCam, self.pcImageMemory,
                                       self.MemID, self.width, self.height,
                                       self.nBitsPerPixel, self.pitch)

        if nRet != ueye.IS_SUCCESS:
            print("is_InquireImageMem ERROR")

        print("getting image")
        array = ueye.get_data(self.pcImageMemory,
                              self.width,
                              self.height,
                              self.nBitsPerPixel,
                              self.pitch,
                              copy=False)
        frame = np.reshape(
            array, (self.height.value, self.width.value, self.bytes_per_pixel))
        print(frame.shape)

        coadd = np.zeros(frame.shape[0:2]).astype('float')
        #coadd[:,:] = (frame[:,:,1]*255).astype('float')+frame[:,:,0].astype('float')
        coadd[:, :] = frame[:, :,
                            1].astype('float') * 255 + frame[:, :,
                                                             0].astype('float')
        return coadd
コード例 #5
0
    def acquire(self):

        # while(nRet == ueye.IS_SUCCESS):

        # In order to display the image in an OpenCV window we need to extract the
        # data of our image memory, reshape it as a numpy array and define it as a
        # cv2 object.
        # -----------

        array = ueye.get_data(self.pcImageMemory,
                              self.width,
                              self.height,
                              self.nBitsPerPixel,
                              self.pitch,
                              copy=False)

        # bytes_per_pixel = int(nBitsPerPixel / 8)

        frame = np.reshape(
            array, (self.height.value, self.width.value, self.bytes_per_pixel))
        frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)

        # # ...resize the image by a half
        # frame = cv2.resize(frame,(0,0),fx=0.5, fy=0.5)

        #---------------------------------------------------------------------------------------------------------------------------------------
        #Include image data processing here
        #---------------------------------------------------------------------------------------------------------------------------------------

        frame = qtg.QImage(frame, frame.shape[1], frame.shape[0],
                           qtg.QImage.Format_RGB888)
        frame = qtg.QPixmap.fromImage(frame)
        self.cam_acquiring.emit(frame)
        print(frame)
コード例 #6
0
    def piped_acquisition(self, pipe):
        self.status = 'RUN'
        self.pipe = pipe
        print("Acquisition started!")

        while self.status == 'RUN':

            if ueye.is_WaitEvent(self.cam, ueye.IS_SET_EVENT_FRAME,
                                 5000) == ueye.IS_SUCCESS:
                data = ueye.get_data(self.image_memory, self.width,
                                     self.height, self.bits_per_pixel,
                                     self.pitch, False)
                frame = np.reshape(data, (self.height.value, self.width.value,
                                          self.bytes_per_pixel))
                raw_frame = (frame[:, :,
                                   1]) * 256 + frame[:, :,
                                                     0]  # Image raw in 10bits
                self.pipe.send(raw_frame)

            else:
                self.status = 'IDLE'

        print("Getting out")

        self.__disable_events()
        self.__exit()
コード例 #7
0
    def show_image(self):
        nRet = ueye.is_InitCamera(self.h_cam, None)
        nRet = ueye.is_SetDisplayMode(self.h_cam, ueye.IS_SET_DM_DIB)
        nRet = ueye.is_AOI(self.h_cam, ueye.IS_AOI_IMAGE_GET_AOI, self.rectAOI,
                           ueye.sizeof(self.rectAOI))

        self.width = self.rectAOI.s32Width
        self.height = self.rectAOI.s32Height

        nRet = ueye.is_AllocImageMem(self.h_cam, self.width, self.height,
                                     self.nBitsPerPixel, self.pcImageMemory,
                                     self.MemID)
        nRet = ueye.is_SetImageMem(self.h_cam, self.pcImageMemory, self.MemID)
        nRet = ueye.is_SetColorMode(self.h_cam, self.ColorMode)
        nRet = ueye.is_CaptureVideo(self.h_cam, ueye.IS_DONT_WAIT)
        nRet = ueye.is_InquireImageMem(self.h_cam, self.pcImageMemory,
                                       self.MemID, self.width, self.height,
                                       self.nBitsPerPixel, self.pitch)

        while nRet == ueye.IS_SUCCESS:
            array = ueye.get_data(self.pcImageMemory,
                                  self.width,
                                  self.height,
                                  self.nBitsPerPixel,
                                  self.pitch,
                                  copy=False)
            frame = np.reshape(
                array,
                (self.height.value, self.width.value, self.bytes_per_pixel))
            frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)
            size = (self.height, self.width)
            new_camera_matrix, roi = cv2.getOptimalNewCameraMatrix(
                self.camera_matrix, self.dist_coeff, size, 1, size)
            dst = cv2.undistort(frame, self.camera_matrix, self.dist_coeff,
                                None, new_camera_matrix)
            x, y, w, h = roi
            self.dst = dst[y:y + h, x:x + w]

            self.detect_colors()

            self.extrinsic_calibration()

            cv2.imshow("camera", self.dst)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

            elif cv2.waitKey(1) & 0xFF == ord('t'):
                cv2.imwrite("/home/lennart/dorna/camera/images/gps.bmp",
                            self.dst)

            elif cv2.waitKey(100) & 0xFF == ord('l'):
                self.found_container = False
                self.container_world_position.clear()
                print("Behälterposition zurückgesetzt")

        ueye.is_FreeImageMem(self.h_cam, self.pcImageMemory, self.MemID)
        ueye.is_ExitCamera(self.h_cam)
        cv2.destroyAllWindows()
コード例 #8
0
ファイル: find_markers.py プロジェクト: igor-ccu/robot-arm
def cap():
    frame = ueye.get_data(mem_ptr,
                          width,
                          height,
                          bitspixel,
                          lineinc,
                          copy=True)
    return calib(np.reshape(frame, (height, width, 3)))
コード例 #9
0
    def get_image(self):
        # Extract data from our image memory...
        array = ueye.get_data(self.pcImageMemory, self.rectAOI.s32Width, self.rectAOI.s32Height,
                              self.nBitsPerPixel, self.pitch, copy=False)

        # ...and reshape it in an numpy array
        frame = np.reshape(array, (self.rectAOI.s32Height.value, self.rectAOI.s32Width.value, self.bytes_per_pixel))

        return frame
コード例 #10
0
ファイル: cueye.py プロジェクト: attokdz/ueye_ioc
	def ContinousGrabbing(self):
		while self.grabbing:
			if self.GetNextBuffer():
				return True
			self.LastImage=ueye.get_data(self.LastSeqBuf1, self.imgWidth, self.imgHeight, self.bitsPixel, self.LineInc, True).astype(np.int16)
			if self.bg:
				self.LastImage-=self._bgImage
				self.LastImage=self.LastImage.clip(min=0)
			self.grabbingCB()
コード例 #11
0
 def __init__(self, h_cam, img_buff):
     self.h_cam = h_cam
     self.img_buff = img_buff
     self.mem_info = MemoryInfo(h_cam, img_buff)
     self.color_mode = ueye.is_SetColorMode(h_cam, ueye.IS_GET_COLOR_MODE)
     self.bits_per_pixel = get_bits_per_pixel(self.color_mode)
     self.array = ueye.get_data(self.img_buff.mem_ptr, self.mem_info.width,
                                self.mem_info.height, self.mem_info.bits,
                                self.mem_info.pitch, True)
コード例 #12
0
def main_loop():
    color_trackbars()

    while (CameraApi.nRet == ueye.IS_SUCCESS):

        # In order to display the image in an OpenCV window we need to...
        # ...extract the data of our image memory
        array = ueye.get_data(CameraApi.pcImageMemory,
                              CameraApi.width,
                              CameraApi.height,
                              CameraApi.nBitsPerPixel,
                              CameraApi.pitch,
                              copy=False)

        # ...reshape it in an numpy array...
        frame = np.reshape(array,
                           (CameraApi.height.value, CameraApi.width.value,
                            CameraApi.bytes_per_pixel))

        # ...resize the image by a half
        # frame = cv2.resize(frame,(0,0), fx=0.5, fy=0.5)

        #---------------------------------------------------------------------------------------------------------------------------------------
        #Convert camera feed from BGRA to BGR
        frame_to_bgr = cv2.cvtColor(frame, cv2.COLOR_BGRA2BGR)

        #Apply a Gaussian blur that has 11x11 kernel size to the BGR frame
        frame_to_bgr = cv2.GaussianBlur(frame_to_bgr, (5, 5), 0)

        cv2.imshow("BGR Frame", frame_to_bgr)

        #Convert camera feed from BGR color space to HSV color space
        hsv_frame = cv2.cvtColor(frame_to_bgr, cv2.COLOR_BGR2HSV)

        frame_threshold(frame_to_bgr, hsv_frame)

        #---------------------------------------------------------------------------------------------------------------------------------------

        # Press q if you want to end the loop
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    #---------------------------------------------------------------------------------------------------------------------------------------

    # Releases an image memory that was allocated using is_AllocImageMem() and removes it from the driver management
    ueye.is_FreeImageMem(CameraApi.hCam, CameraApi.pcImageMemory,
                         CameraApi.MemID)

    # Disables the hCam camera handle and releases the data structures and memory areas taken up by the uEye camera
    ueye.is_ExitCamera(CameraApi.hCam)

    # Destroys the OpenCv windows
    cv2.destroyAllWindows()

    print()
    print("END")
コード例 #13
0
 def acquire(self):
     """
     Acquire a single frame from the camera.
     
     """
     
     array = ueye.get_data(self.ppcImgMem, self.width, self.height, self.bitspixel, self.pitch, copy=False)
     frame = np.reshape(array,(self.height.value, self.width.value, self.bytesppixel))
     frame = frame[:,:,0]
     copiedFrame = np.copy(frame)
     
     return copiedFrame
コード例 #14
0
 def get_data(self):
     array = ueye.get_data(self.pcImageMemory,
                           self.width,
                           self.height,
                           self.nBitsPerPixel,
                           self.pitch,
                           copy=False)
     frame = np.reshape(
         array, (self.height.value, self.width.value, self.bytes_per_pixel))
     #img_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
     #frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)
     return frame
コード例 #15
0
 def get_data(self):
     array = ueye.get_data(self.pcImageMemory,
                           self.width,
                           self.height,
                           self.nBitsPerPixel,
                           self.pitch,
                           copy=False)
     self.bytes_per_pixel = int(self.nBitsPerPixel / 8)
     # ...reshape it in an numpy array...
     self.data = np.reshape(
         array, (self.height.value, self.width.value, self.bytes_per_pixel))
     return self.data
コード例 #16
0
    def getdata(self, img_buff):

        if(self.mem_info is None) or (self.color_mode is None) or (self.bits_per_pixel is None):
            self.mem_info = MemoryInfo(self.h_cam, img_buff)
            self.color_mode = ueye.is_SetColorMode(self.h_cam, ueye.IS_GET_COLOR_MODE)
            self.bits_per_pixel = get_bits_per_pixel(self.color_mode)

        self.array = ueye.get_data(self.img_buff.mem_ptrlast,
                                   self.mem_info.width,
                                   self.mem_info.height,
                                   self.mem_info.bits,
                                   self.mem_info.pitch,
                                   True)
コード例 #17
0
 def get_video_frame(self):
     array = ueye.get_data(
         self._ppc_img_mem,
         self._width,
         self._height,
         self._nBitsPerPixel,
         self._pitch,
         copy=False,
     )
     frame = np.reshape(
         array,
         (int(self._height), int(self._width), int(self._bytes_per_pixel)))
     return frame
コード例 #18
0
    def get_next_image(self):
        # In order to display the image in an OpenCV window we need to...
        # ...extract the data of our image memory
        array = ueye.get_data(self.pcImageMemory, self.width, self.height, self.nBitsPerPixel, self.pitch, copy=False)

        # bytes_per_pixel = int(nBitsPerPixel / 8)        

        # ...reshape it in an numpy array...
        frame = np.reshape(array,(self.height.value, self.width.value, self.bytes_per_pixel))

         # ...resize the image by a half
        frame = cv2.resize(frame,(0,0),fx=0.5, fy=0.5) #TODO maybe remove

        return frame
コード例 #19
0
ファイル: idscam.py プロジェクト: featureNull/pscout_desktop
 def grab(self):
     time.sleep(1 / self.framerate)
     array = ueye.get_data(self.membuf,
                           self.imgwidth,
                           self.imgheight,
                           bits_per_pixel,
                           self.pitch,
                           copy=False)
     bytes_per_pixel = int(bits_per_pixel / 8)
     frame = np.reshape(array,
                        (self.imgheight, self.imgwidth, bytes_per_pixel))
     # this is bgra so convert it in qt compatible rgb
     b, g, r, a = cv2.split(frame)
     rgbframe = cv2.merge((r, g, b))
     return rgbframe
コード例 #20
0
    def run(self):
        if not os.path.isdir("/home/pi/swim4all/Tirocinio/Photo/" + str(self.x.day) + "-" + str(self.x.month) + "-" + str(self.x.year) + "/"):
            os.makedirs("/home/pi/swim4all/Tirocinio/Photo/" + str(self.x.day) + "-" + str(self.x.month) + "-" + str(self.x.year) + "/")
        
        tempo_conteggio = time.time()

        count = 0

        while self.isRunning:
            t_old = time.time()
            img_buffer = ImageBuffer()
            self.cam.nRet = ueye.is_WaitForNextImage(self.cam.cam, self.timeout, img_buffer.mem_ptr, img_buffer.mem_id)

            if self.cam.nRet == ueye.IS_SUCCESS:
                mem_info = MemoryInfo(self.cam.cam, img_buffer)
                array = ueye.get_data(img_buffer.mem_ptr, mem_info.width, mem_info.height, mem_info.bits, mem_info.pitch, copy=True)
                self.cam.unlock_seq(img_buffer.mem_id, img_buffer.mem_ptr)
                
                if self.cam.mode_filename == 1:
                    filename = "/home/pi/swim4all/Tirocinio/Photo/" + str(self.x.day) + "-" + str(self.x.month) + "-" + str(self.x.year) + "/" + str(self.cam.camID) + "-" + str(time.time()) + ".png"
                else:
                    filename = "/home/pi/swim4all/Tirocinio/Photo/" + str(self.x.day) + "-" + str(self.x.month) + "-" + str(self.x.year) + "/" + str(time.time()) + "-" + str(self.cam.camID) + ".png"
                
                self.file_param.pwchFileName = filename
                self.file_param.nFiletype = ueye.IS_IMG_PNG
                self.file_param.ppcImageMem = None
                self.file_param.pnImageId = None
                nRet = ueye.is_ImageFile(self.cam.cam, ueye.IS_IMAGE_FILE_CMD_SAVE, self.file_param, ueye.sizeof(self.file_param))
                if nRet != ueye.IS_SUCCESS:
                    error_log(nRet, "is_ImageFile")
                    if not self.file.closed:
                        self.file.write("FPS: " + "Salvataggio non riuscito" + "\n") 
                else:
                    t = time.time()
                    self.FPS = 1 / (t - t_old)
                    if not self.file.closed:
                        self.file.write("FPS: " + str(self.FPS) + "\n")
            else:
                error_log(self.cam.nRet, "is_WaitForNextImage")
                if not self.file.closed:
                        self.file.write("FPS: " + "Frame perso" + "\n")
                
            
            count += 1
            if count == 100:
                tempo_fine_conteggio = time.time()
                print("FPS: ", count / (tempo_fine_conteggio - tempo_conteggio))
                print()
コード例 #21
0
def give_da_stream():

    hCam, sInfo, cInfo, pcImageMemory, MemID, rectAOI, pitch, nBitsPerPixel, channels, m_nColorMode, bytes_per_pixel, height, width = init_camera(
    )

    nRet = ueye.is_CaptureVideo(hCam, ueye.IS_DONT_WAIT)
    nRet = ueye.is_InquireImageMem(hCam, pcImageMemory, MemID, width, height,
                                   nBitsPerPixel, pitch)

    try:

        while (nRet == ueye.IS_SUCCESS):

            # In order to display the image in an OpenCV window we need to...
            # ...extract the data of our image memory
            array = ueye.get_data(pcImageMemory,
                                  width,
                                  height,
                                  nBitsPerPixel,
                                  pitch,
                                  copy=False)

            # bytes_per_pixel = int(nBitsPerPixel / 8)

            # ...reshape it in an numpy array...
            frame = np.reshape(array,
                               (height.value, width.value, bytes_per_pixel))

            # ...resize the image by a half
            # frame = cv2.resize(frame,(0,0),fx=0.5, fy=0.5)

            # Press q if you want to end the loop

            # Press q if you want to end the loop

            yield frame
            # ---------------------------------------------------------------------------------------------------------------------------------------
            # Include image data processing here

            # ---------------------------------------------------------------------------------------------------------------------------------------

    finally:
        # Releases an image memory that was allocated using is_AllocImageMem() and removes it from the driver management
        ueye.is_FreeImageMem(hCam, pcImageMemory, MemID)

        # Disables the hCam camera handle and releases the data structures and memory areas taken up by the uEye camera
        ueye.is_ExitCamera(hCam)
コード例 #22
0
ファイル: camera.py プロジェクト: narn01101110/APE-1
    def get_video_frame(self):
        if not self._video_capture:
            return None

        if pue.IS_SUCCESS:
            array = pue.get_data(
                self._ppc_img_mem,
                self._c_width,
                self._c_height,
                self._c_pixel_bits,
                self._pitch,
                copy=False,
            )
            frame = np.reshape(
                array, (self._height, self._width, self._bytes_per_pixel))
            return frame
        else:
            return None
コード例 #23
0
    def read(self):
        nRet = ueye.is_WaitEvent(self.hCam, ueye.IS_SET_EVENT_FRAME, 1000)
        if (nRet != ueye.IS_SUCCESS):
            print("pic capture failed")

        #extract image data from memory
        array = ueye.get_data(self.pcImageMemory,
                              self.width,
                              self.height,
                              self.nBitsPerPixel,
                              self.pitch,
                              copy=False)

        # reshape into numpy array
        gray = np.reshape(
            array, (self.height.value, self.width.value, self.bytes_per_pixel))

        return gray
コード例 #24
0
def take_picture(hCam, pcImageMemory, MemID, pitch, nBitsPerPixel,
                 bytes_per_pixel, height, width):

    array = ueye.get_data(pcImageMemory,
                          width,
                          height,
                          nBitsPerPixel,
                          pitch,
                          copy=False)
    frame = np.reshape(array, (height.value, width.value, bytes_per_pixel))

    ueye.is_FreeImageMem(hCam, pcImageMemory, MemID)

    # Disables the hCam camera handle and releases the data structures and memory areas taken up by the uEye camera
    ueye.is_ExitCamera(hCam)

    # Destroys the OpenCv windows
    cv2.destroyAllWindows()
    return frame
コード例 #25
0
    def captureVideo(self):

        count = 0
        prevCaptureTime = 0
        imageInfo = ueye.UEYEIMAGEINFO()
        while (True):

            array = ueye.get_data(self.pcImageMemory,
                                  self.width,
                                  self.height,
                                  self.nBitsPerPixel,
                                  self.pitch,
                                  copy=False)
            frame = np.reshape(
                array,
                (self.height.value, self.width.value, self.bytes_per_pixel))

            nRet = ueye.is_GetImageInfo(self.hCam, self.MemID, imageInfo,
                                        ueye.sizeof(imageInfo))
            if nRet != ueye.IS_SUCCESS:
                print("GET IMAGE INFO ERROR")

            captureTime = imageInfo.u64TimestampDevice
            if ((captureTime > prevCaptureTime) and (captureTime != 0)):

                exposureTime = ueye.double()
                retVal = ueye.is_Exposure(self.hCam,
                                          ueye.IS_EXPOSURE_CMD_GET_EXPOSURE,
                                          exposureTime, 8)

                self.timeStampsFile.write(
                    str(count).zfill(5) + " " + str(captureTime - 0) + " " +
                    str(exposureTime) + "\n")
                cv2.imwrite("images/" + str(count).zfill(5) + ".jpg", frame)

                count = count + 1
                prevCaptureTime = captureTime - 0
                cv2.imshow("captureVideo", frame)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        cv2.destroyAllWindows()
コード例 #26
0
 def getImageData(self):
     # --- set AOI --- #
     rect_aoi = ueye.IS_RECT()
     hasWorked = ueye.is_AOI(self.cam, ueye.IS_AOI_IMAGE_GET_AOI, rect_aoi,
                             ueye.sizeof(rect_aoi))
     self.check(hasWorked, 'getImageData')
     # ---  --- #
     x = ueye.int()
     y = ueye.int()
     bits = ueye.int()
     pitch = ueye.int()
     self.frame_width = rect_aoi.s32Width.value
     self.frame_height = rect_aoi.s32Height.value
     hasWorked = ueye.is_InquireImageMem(self.cam, self.img_buffer.mem_ptr,
                                         self.img_buffer.mem_id, x, y, bits,
                                         pitch)
     self.check(hasWorked, 'getImageData')
     self.imgdata = ueye.get_data(self.img_buffer.mem_ptr, self.frame_width,
                                  self.frame_height, bits, pitch, True)
コード例 #27
0
    def measure(self):
        print('Creating figure')
        fig = plt.figure(figsize=(6, 6))
        ax = fig.add_subplot(111)
        ax.set_title('colorMap')
        ax.set_aspect('equal')
        plt.imshow(np.zeros((10, 10)), cmap='viridis')
        plt.show()
        if (self.nRet != ueye.IS_SUCCESS):
            return
        # Continuous image display
        for i in range(2):
            print('Starting loop')
            # In order to display the image in an OpenCV window we need to...
            # ...extract the data of our image memory
            array = ueye.get_data(self.ppcImgMem,
                                  self.width,
                                  self.height,
                                  self.bitspixel,
                                  self.pitch,
                                  copy=False)
            # bytes_per_pixel = int(nBitsPerPixel / 8)

            # ...reshape it in an numpy array...
            frame = np.reshape(
                array, (self.height.value, self.width.value, self.bytesppixel))
            plt.imshow(frame[:, :, 0], cmap='viridis')
            # ...resize the image by a half
            #frame = cv2.resize(frame,(0,0),fx=0.5, fy=0.5)

            #-----------------------------------------------------------------------------------------------------------
            #Include image data processing here

            #------------------------------------------------------------------------------------------------------------

            #...and finally display it
            #cv2.imshow("SimpleLive_Python_uEye_OpenCV", frame)

            # Press q if you want to end the loop
            #if cv2.waitKey(1) & 0xFF == ord('q'):
            #    break
            plt.show()
            time.sleep(1)
コード例 #28
0
ファイル: ids.py プロジェクト: jsalort/pymanip
    def acquisition_oneshot(self, timeout_ms=1000):
        nRet = ueye.is_EnableEvent(self.hCam, ueye.IS_SET_EVENT_FRAME)
        if nRet != ueye.IS_SUCCESS:
            raise RuntimeError("is_EnableEvent ERROR")

        nRet = ueye.is_FreezeVideo(self.hCam, ueye.IS_DONT_WAIT)
        if nRet != ueye.IS_SUCCESS:
            raise RuntimeError("is_CaptureVideo ERROR")
        nRet = ueye.is_WaitEvent(self.hCam, ueye.IS_SET_EVENT_FRAME,
                                 timeout_ms)
        if nRet != ueye.IS_SUCCESS:
            raise RuntimeError("is_WaitEvent ERROR")

        nRet = ueye.is_InquireImageMem(
            self.hCam,
            self.pcImageMemory,
            self.MemID,
            self.width,
            self.height,
            self.nBitsPerPixel,
            self.pitch,
        )
        if nRet != ueye.IS_SUCCESS:
            raise RuntimeError("is_InquireImageMem ERROR")
        array = ueye.get_data(
            self.pcImageMemory,
            self.width,
            self.height,
            self.nBitsPerPixel,
            self.pitch,
            copy=True,
        )

        nRet = ueye.is_DisableEvent(self.hCam, ueye.IS_SET_EVENT_FRAME)
        if nRet != ueye.IS_SUCCESS:
            raise RuntimeError("is_DisableEvent ERROR")

        return array.reshape((self.height.value, self.width.value))
コード例 #29
0
    def piped_acquisition(self, pipe):
        self.status = 'IDLE'
        self.pipe = pipe
        t = threading.Thread(target=self.control_pipe)
        t.start()

        print("Acquisition started!")

        while True:
            if self.status == 'RUN':
                if ueye.is_WaitEvent(self.cam, ueye.IS_SET_EVENT_FRAME, 5000) == ueye.IS_SUCCESS:
                    data = ueye.get_data(self.image_memory, self.width, self.height, self.bits_per_pixel, self.pitch,
                                         False)
                    frame = np.reshape(data, (self.height.value, self.width.value, self.bytes_per_pixel))
                    # self.pipe.send(frame)  # send raw image
                    # raw_frame = frame[:, :, 1] * 256 + frame[:, :, 0]  # Image raw in 10bits
                    # self.pipe.send(raw_frame)
                    cube = hypercube(frame, 426, 339, 3, 3)
                    self.pipe.send(cube)  # send MS image
            elif self.status == 'IDLE':
                time.sleep(0.25)
            else:
                break
コード例 #30
0
thing=ueye.is_Exposure(hCam,ueye.IS_EXPOSURE_CMD_SET_EXPOSURE, a,b )
thing=ueye.is_Exposure(hCam,ueye.IS_EXPOSURE_CMD_GET_EXPOSURE, c,b )
print(c)
gain=ueye.UINT(80)
thingy=ueye.is_SetHardwareGain(hCam,gain, ueye.IS_IGNORE_PARAMETER, ueye.IS_IGNORE_PARAMETER, ueye.IS_IGNORE_PARAMETER)
#---------------------------------------------------------------------------------------------------------------------------------------
nRet = ueye.is_FreezeVideo(hCam, ueye.IS_WAIT)
Iinfo=ueye.UEYEIMAGEINFO()
n=1

while(nRet == ueye.IS_SUCCESS):

    
    # In order to display the image in an OpenCV window we need to...
    # ...extract the data of our image memory
    array = ueye.get_data(pcImageMemory, width, height, nBitsPerPixel, pitch, copy=False)
    

    # bytes_per_pixel = int(nBitsPerPixel / 8)

    # ...reshape it in an numpy array...
    frame = np.reshape(array,(height.value, width.value, bytes_per_pixel))

    # ...resize the image by a half
    frame2 = cv2.resize(frame,(0,0),fx=0.3, fy=0.3)
    
#---------------------------------------------------------------------------------------------------------------------------------------
    #Include image data processing here

#---------------------------------------------------------------------------------------------------------------------------------------