def display_image(self): fps = ueye.DOUBLE() ueye.is_GetFramesPerSecond(self.hCam, fps) timeout = int((5 / fps) * 1000) h_event = None if platform.system() == 'Windows': h_event = win32event.CreateEvent(None, False, False, None) self.event = ueye.HANDLE(int(h_event)) ueye.is_InitEvent(self.hCam, self.event, self.frame_event_id) ueye.is_EnableEvent(self.hCam, self.frame_event_id) while True: ret = None if not self.capturing: break if platform.system() == 'Windows': ret = win32event.WaitForSingleObject(h_event, timeout) elif platform.system() == 'Linux': ret = ueye.is_WaitEvent(self.hCam, self.frame_event_id, timeout) if ret == 0: converted_image_data = self.convert_image_data() self.image_data_copy = ( ueye.CHAR * int(self.img_data.width * self.img_data.height * 3))() ueye.is_CopyImageMem( hCam=self.hCam, pcSource=converted_image_data.memory_pointer, nID=converted_image_data.memory_id, pcDest=self.image_data_copy) bytes_per_pixel = 3 self.image_data_copy = numpy.reshape( self.image_data_copy, (int(self.img_data.height), int( self.img_data.width), bytes_per_pixel)) self.image_data_copy = self.image_data_copy.view(numpy.uint8) self.pil_image = Image.fromarray(self.image_data_copy) self.graphics_scene.clear() self.width, self.height = self.pil_image.size self.qt_image = ImageQt.ImageQt(self.pil_image) self.pix_map = QPixmap.fromImage(self.qt_image) self.graphics_scene.addPixmap(self.pix_map) self.graphics_view.fitInView( QRectF(0, 0, self.width, self.height), Qt.KeepAspectRatio) self.graphics_scene.update() app.processEvents()
def get_framerate(self): """ Get frame rate. Only in free run mode. """ new_fps = ueye.c_double(self.current_fps) self.nRet = ueye.is_GetFramesPerSecond(self.cam, new_fps) if self.nRet != ueye.IS_SUCCESS: error_log(self.nRet, "is_GetFramesPerSecond") print(new_fps.value)
def GetFPS(self): """ Get the camera frame rate in frames per second (float). Returns ------- float Camera frame rate (frames per second) """ fps = ueye.double() self.check_success(ueye.is_GetFramesPerSecond(self.h, fps)) return fps.value
def get_fps(self): """ Get the current fps. Returns ======= fps: number Current fps. """ if self.current_fps is not None: return self.current_fps fps = ueye.c_double() check(ueye.is_GetFramesPerSecond(self.h_cam, fps)) return fps
def __init__(self, cam_id, name): self._cam = ueye.HIDS(cam_id) self._cam_name = name self._sInfo = ueye.SENSORINFO() self._sFPS = ueye.DOUBLE() self._connect() # Query additional information about the sensor type used in the camera err = ueye.is_GetSensorInfo(self._cam, self._sInfo) if err != ueye.IS_SUCCESS: raise CameraException(self._cam, 'ueye>close>GetSensorInfo>', err) # Reset camera to default settings err = ueye.is_ResetToDefault(self._cam) if err != ueye.IS_SUCCESS: raise CameraException(self._cam, 'ueye>close>ResetToDefault>', err) # Set display mode to DIB err = ueye.is_SetDisplayMode(self._cam, ueye.IS_SET_DM_DIB) if err != ueye.IS_SUCCESS: raise CameraException(self._cam, 'ueye>close>SetDisplayMode>', err) # Core Camera Variables self._width = ueye.INT(self._sInfo.nMaxWidth.value) self._height = ueye.INT(self._sInfo.nMaxHeight.value) self._pitch = ueye.INT() self._ppc_img_mem = ueye.c_mem_p() self._mem_id = ueye.INT() self._nBitsPerPixel = ueye.INT() self._m_nColorMode = ueye.INT() self._bytes_per_pixel = ueye.INT() self._video_capture = False self._done_saving = True # Allicate memory for frames self._allocate_memory() # Start collection of frames self.start_video_capture() # Get frames per second err = ueye.is_GetFramesPerSecond(self._cam, self._sFPS) if err != ueye.IS_SUCCESS: raise CameraException(self._cam, 'ueye>close>GetFramesPerSecond>', err) # Start new thread to save frame threading.Thread(target=self._update).start()
def get_fps(self): fps = ueye.double() ueye.is_GetFramesPerSecond(self.h_cam, fps) return fps
# Allocates an image memory for an image having its dimensions defined by width and height and its color depth defined by nBitsPerPixel nRet = ueye.is_AllocImageMem(hCam, width, height, nBitsPerPixel, pcImageMemory, MemID) if nRet != ueye.IS_SUCCESS: print("is_AllocImageMem ERROR") else: print("Mem Allocation OK") # Makes the specified image memory the active memory nRet = ueye.is_SetImageMem(hCam, pcImageMemory, MemID) if nRet != ueye.IS_SUCCESS: print("is_SetImageMem ERROR") else: # Set the desired color mode nRet = ueye.is_SetColorMode(hCam, m_nColorMode) nRet = ueye.is_GetFramesPerSecond(hCam, OldFrameRate) if nRet != ueye.IS_SUCCESS: print("GetFrame ERROR") else: print("FramesPerSecond:\t", OldFrameRate.value) # Set FRAME RATE nRet = ueye.is_SetFrameRate(hCam, myFrameRate, c_double()) if nRet != ueye.IS_SUCCESS: print("SetFrame ERROR:\t", nRet) else: print("FramesPerSecond:\t", myFrameRate) # Set EXPOSURE TIME # nRet = is_Exposure(m_hCam, IS_EXPOSURE_CMD_SET_EXPOSURE, (void*)&m_ExposureTime, sizeof(m_ExposureTime)); #IS_EXPOSURE_CMD_SET_EXPOSURE = 12
def Movie(self): self.Movie_Switch = -1 * self.Movie_Switch if self.Movie_Switch == 1: self.Movie_Text_Browser.setText("On") self.Movie_Text_Browser.setAlignment(QtCore.Qt.AlignCenter) elif self.Movie_Switch == -1: self.Movie_Text_Browser.setText("Off") self.Movie_Text_Browser.setAlignment(QtCore.Qt.AlignCenter) while (self.Movie_Switch == 1): self.fps = ueye.c_double() self.nRet = ueye.is_GetFramesPerSecond(self.hcam, self.fps) self.FPS_Text_Browser.setText(str("%.2f" % self.fps).zfill(5)) self.FPS_Text_Browser.setAlignment(QtCore.Qt.AlignRight) self.exposure = ueye.c_double() self.nRet = ueye.is_Exposure(self.hcam, ueye.IS_EXPOSURE_CMD_GET_EXPOSURE, self.exposure, 8) self.Exp_Text_Browser.setText(str("%.2f" % self.exposure).zfill(5)) self.Exp_Text_Browser.setAlignment(QtCore.Qt.AlignRight) #kk = 0 #while(kk <= self.Img_Sum): # kk = kk+1 # # if kk == 1: # self.img_raw = (ueye.get_data(self.mem_ptr, self.width, self.height, self.bitspixel, self.lineinc, copy=True)).astype(np.float64) # else: # self.img_raw0 = (ueye.get_data(self.mem_ptr, self.width, self.height, self.bitspixel, self.lineinc, copy=True)).astype(np.float64) # self.img_raw = self.img_raw*(kk-1)/kk+self.img_raw0/kk # del self.img_raw0 # # time.sleep(1/self.fps) self.img_raw = ueye.get_data(self.mem_ptr, self.width, self.height, self.bitspixel, self.lineinc, copy=True) if self.Diff_Switch == -1: self.img = np.reshape(self.img_raw, (self.height, self.width, 1)).astype( np.uint16) elif self.Diff_Switch == 1: self.img0 = (np.reshape(self.img_raw, (self.height, self.width, 1))).astype( np.float64) self.img = np.round( (self.img0 - self.bg + 4095) / 2).astype(np.uint16) if self.Contrast_Switch == 1: self.img = np.reshape(self.img, (self.height, self.width)) # Contrast stretching self.pL, self.pH = np.percentile(self.img, (2, 98)) self.img = exposure.rescale_intensity(self.img, in_range=(self.pL, self.pH), out_range=(0, 4095)) # Equalization #self.img = img_as_ubyte(exposure.equalize_hist(self.img)) # Adaptive Equalization #self.img = img_as_ubyte(exposure.equalize_adapthist(self.img, clip_limit=0.5)) self.img = np.reshape(self.img, (self.height, self.width, 1)) self.FinalImage = self.img if self.Rec_Switch == 1: if 'vid' in locals(): vid = np.append(vid, self.FinalImage, axis=2) if vid.shape[2] > 10000: vid = np.delete(vid, 0, axis=2) self.Time_1 = time.time() self.Time = np.append(self.Time, self.Time_1 - self.Time_0) self.Time_0 = self.Time_1 else: vid = np.zeros([self.height, self.width]) vid = self.FinalImage self.Time_0 = time.time() self.Time = 0 if 'vid' in locals() and self.Rec_Switch == -1: # selecting file path filePath, _ = QFileDialog.getSaveFileName( self, "Save Video", "", "avi(*.avi);;All Files(*.*) ") # if file path is blank return back if filePath == "": del vid del self.Time #return else: self.fps = np.round(1 / np.median(self.Time)) out = cv2.VideoWriter(filePath, cv2.VideoWriter_fourcc(*'DIVX'), self.fps, (self.width, self.height), isColor=False) for i in range(0, vid.shape[2]): img = (np.round(vid[:, :, i] / 4095 * 255) - 1).astype( np.uint8) out.write(img) out.release() del vid del self.Time self.img_resize = cv2.resize(self.FinalImage, (0, 0), fx=0.5, fy=0.5) self.img_resize = (self.img_resize / 4095 * 255).astype(np.uint8) #self.img_resize = QtGui.QImage(self.img_resize, self.img_resize.shape[1], self.img_resize.shape[0], QtGui.QImage.Format_Indexed8) self.img_resize = QtGui.QImage(self.img_resize, self.img_resize.shape[1], self.img_resize.shape[0], QtGui.QImage.Format_Indexed8) self.Movie_Frame.setPixmap(QtGui.QPixmap.fromImage( self.img_resize)) if cv2.waitKey(1) & 0xFF == ord('q'): break