def __init__(self, parent=None): super(MainWindow, self).__init__(parent) self.setupUi(self) self.videoOn = False self.videoToggleButton.clicked.connect(self.videoToggleUI) self.shotButton.clicked.connect(self.singleCaptureUI) self.actionOpen_Image.triggered.connect(self.openImage) self.actionOpen_Circle_Dictionary.triggered.connect(self.circleDictUpload) self.actionOpenData.triggered.connect(self.dataFileOpen) self.actionExit.triggered.connect(self.exitProgram) self.actionGainUp.triggered.connect(self.vidGainUp) self.actionGainDown.triggered.connect(self.vidGainDown) self.actionon.triggered.connect(self.autoOn) self.actionoff.triggered.connect(self.autoOff) self.actionNewSlide.triggered.connect(self.startNewSlide) self.arrayCount = 1 self.slideNumber = 1 self.plotting_widget.setLayout(QVBoxLayout()) self.im_widget = pg.ImageView(self) self.im_widget.ui.histogram.hide() self.im_widget.ui.roiBtn.hide() self.im_widget.ui.menuBtn.hide() self.plotting_widget.layout().addWidget(self.im_widget) self.im_widget.show() # automatically adds in encoded standard image patternFile = "/home/pi/Desktop/code/standard_image-batch7-Ebov.json" #patternFile = "/home/pi/Desktop/code/standard_image-batch9-farcorners.json" self.patternDict = {} with open(patternFile) as json_file: self.patternDict = json.load(json_file) print("Pattern Dictionary Active") self.circleDictUploaded = True self.autoCircles = False # set up video buffer to image converter for 8 bit self.vidConverter = pylon.ImageFormatConverter() self.vidConverter.OutputPixelFormat = pylon.PixelType_Mono8 self.vidConverter.OutputBitalignment = pylon.OutputBitAlignment_MsbAligned # set up single capture buffer to image converter for 12 bit self.singleConverter = pylon.ImageFormatConverter() self.singleConverter.OutputPixelFormat = pylon.PixelType_Mono16 self.singleConverter.OutputBitalignment = pylon.OutputBitAlignment_MsbAligned # set up laser pin control self.laser = pinMode(26)
def PylonImageToCVImage_perferences(self): """ converting to opencv bgr format option """ self.converter = pylon.ImageFormatConverter() self.converter.OutputPixelFormat = pylon.PixelType_BGR8packed self.converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned
def __init__(self, sources='streams.txt', img_size=640): self.mode = 'images' self.img_size = img_size sources = [sources] n = len(sources) self.imgs = [None] * n self.sources = sources converter = pylon.ImageFormatConverter() # converting to opencv bgr format converter.OutputPixelFormat = pylon.PixelType_BGR8packed converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned for i, s in enumerate(sources): # Start the thread to read frames from the video stream print('%g/%g: %s... ' % (i + 1, n, s), end='') # conecting to the first available camera camera = pylon.InstantCamera(pylon.TlFactory.GetInstance().CreateFirstDevice()) # Grabing Continusely (video) with minimal delay camera.StartGrabbing(pylon.GrabStrategy_LatestImageOnly) grabResult = camera.RetrieveResult(5000, pylon.TimeoutHandling_ThrowException) if grabResult.GrabSucceeded(): self.imgs[i] = converter.Convert(grabResult).GetArray() thread = Thread(target=self.update, args=([i, camera]), daemon=True) thread.start() print('') # newline # check for common shapes s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal if not self.rect: print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def __init__(self): super(Ui, self).__init__() #uic.loadUi('./gui/Logger.ui', self) ui_path = os.path.dirname( os.path.realpath(__file__)) + '/gui/Logger.ui' uic.loadUi(ui_path, self) self.setFixedSize(WIN_WIDTH, WIN_HEIGHT) self.show() root = tk.Tk() root.withdraw() self.save_dir = os.getcwd() # Signals self.dirButton.clicked.connect(self.saveDir) self.saveButton.clicked.connect(self.saveImage) self.camButton.clicked.connect(self.camSet) # Camera self.camera = None self.isCon = False self.isSaving = False self.converter = pylon.ImageFormatConverter() self.converter.OutputPixelFormat = pylon.PixelType_BGR8packed self.converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned # Thread self.closing = False self.thread = Thread(target=self.displayThread) self.thread.start()
def __init__(self, parent=None): """ 初始化 - self.connect:連接狀態 - self.running:讀取狀態 """ # 將父類初始化 super().__init__(parent) # 建立 cv2 的攝影機物件 self.isCatch = False self.connect = True self.running = False # 判斷攝影機是否正常連接 try: # conecting to the first available camera self.cam = pylon.InstantCamera(pylon.TlFactory.GetInstance().CreateFirstDevice()) # Grabing Continusely (video) with minimal delay self.cam.StartGrabbing(pylon.GrabStrategy_LatestImageOnly) self.converter = pylon.ImageFormatConverter() # converting to opencv bgr format self.converter.OutputPixelFormat = pylon.PixelType_BGR8packed self.converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned except: self.connect = False print('Check pylon IP Status.')
def Cam(): # conecting to the first available camera camera = pylon.InstantCamera(pylon.TlFactory.GetInstance().CreateFirstDevice()) # Grabing Continusely (video) with minimal delay camera.StartGrabbing(pylon.GrabStrategy_LatestImageOnly) converter = pylon.ImageFormatConverter() # converting to opencv bgr format converter.OutputPixelFormat = pylon.PixelType_BGR8packed converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned while camera.IsGrabbing(): grabResult = camera.RetrieveResult(5000, pylon.TimeoutHandling_ThrowException) if grabResult.GrabSucceeded(): # Access the image data image = converter.Convert(grabResult) img = image.GetArray() cv2.namedWindow('Title', cv2.WINDOW_NORMAL) cv2.imshow('Title', img) k = cv2.waitKey(1) if k == 27: break grabResult.Release() # Releasing the resource camera.StopGrabbing() cv2.destroyAllWindows()
def save_image(self, filename): """Saves grabbed image or impro function return value, if specified Parameters ---------- filename : str Filename of grabbed image Returns ------- None """ if self._camera is None or not self._camera.IsOpen(): raise ValueError("Camera object {} is closed.".format( self._camera)) converter = pylon.ImageFormatConverter() converter.OutputPixelFormat = pylon.PixelType_BGR8packed converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned grab_result = self._camera.GrabOne(5000) image = converter.Convert(grab_result) img = image.GetArray() if self._impro_function: img = self._impro_function(img) cv2.imwrite(filename, img)
def acquireOneImage(self): if self.camType == 'imgSource': self.cam0.reset_frame_ready() self.cam0.start_live(show_display=False) self.cam0.enable_trigger(True) if not self.cam0.callback_registered: self.cam0.register_frame_ready_callback() self.cam0.reset_frame_ready() self.itrig = 1 if self.itrig == 0: # si cam pas en mode trig externe on envoi un trig soft... self.cam0.send_trigger() # print('trigg') self.cam0.wait_til_frame_ready(2000) data1 = self.cam0.get_image_data() data1 = np.array(data1) #, dtype=np.double) data1.squeeze() data = data1[:, :, 0] self.dataAlign = np.rot90(data, 1) self.cam0.stop_live() if self.camType == 'basler': self.converter = pylon.ImageFormatConverter() data = self.cam0.GrabOne(200000) data1 = self.converter.Convert(data) data1 = data1.GetArray() #, dtype=np.double) data1.squeeze() # data=data1[:,:,0] self.dataAlign = np.rot90(data1, 1) self.cam.Display(self.dataAlign)
def buffer2image(buffer): converter = pylon.ImageFormatConverter() converter.OutputPixelFormat = pylon.PixelType_Mono8 converter.OutputBitalignment = pylon.OutputBitAlignment_MsbAligned img = converter.Convert(buffer) image = img.GetArray() return image
def __init__(self): ''' Initializing the camera configuration' ''' try: self.camera = pylon.InstantCamera( pylon.TlFactory.GetInstance().CreateFirstDevice()) self.camera.Open() self.camera.PixelFormat = "RGB8" self.camera.BslColorSpaceMode.SetValue("sRGB") self.camera.SensorShutterMode.SetValue("Rolling") self.camera.GainAuto.SetValue("Off") self.camera.ExposureAuto.SetValue("Off") self.camera.BalanceWhiteAuto.SetValue("Off") self.camera.BslContrastMode.SetValue("Linear") self.camera.MaxNumBuffer = 500 self.originSetting() self.converter = pylon.ImageFormatConverter() self.converter.OutputPixelFormat = pylon.PixelType_BGR8packed self.converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned except Exception as e: print('An exception occurred' + str(e)) print(e.GetDescription()) raise
def grapImage(self): camera.StopGrabbing() camera.StartGrabbing(pylon.GrabStrategy_LatestImageOnly) converter = pylon.ImageFormatConverter() converter.OutputPixelFormat = pylon.PixelType_BGR8packed converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned grabResult = camera.RetrieveResult( 5000, pylon.TimeoutHandling_ThrowException) if grabResult.GrabSucceeded(): self.image = converter.Convert(grabResult) self.image = self.image.GetArray() #self.image = cv2.resize(self.image,(1500,1051)) self.image = cv2.cvtColor( self.image, cv2.COLOR_BGR2RGB ) #mau video duoc chuyen doi tro lai RGB de no la mau thuc height, width, channel = self.image.shape self.image = cv2.resize(self.image, (int(self.var_scaleFactor * width), int(self.var_scaleFactor * height)), interpolation=cv2.INTER_CUBIC) height, width, channel = self.image.shape step = channel * width self.showImage = QtGui.QImage( self.image.data, self.image.shape[1], self.image.shape[0], QtGui.QImage.Format_RGB888 ) #chuyen doi du lieu viudeo da doc thanh dinh dang QImage self.var_Ui_DlogAddImage.lab_ShowImage.setPixmap( QtGui.QPixmap.fromImage(self.showImage) ) #Hien thi QImage trong Label hien thi video camera.StopGrabbing()
def func_viewImage(self): converter = pylon.ImageFormatConverter() converter.OutputPixelFormat = pylon.PixelType_BGR8packed converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned grabResult = camera.RetrieveResult( 5000, pylon.TimeoutHandling_ThrowException) if grabResult.GrabSucceeded(): self.image = converter.Convert(grabResult) self.image = self.image.GetArray() #self.image = cv2.resize(self.image,(1500,1051)) self.image = cv2.cvtColor( self.image, cv2.COLOR_BGR2RGB ) #mau video duoc chuyen doi tro lai RGB de no la mau thuc height, width, channel = self.image.shape self.image = cv2.resize(self.image, (int(self.var_scaleFactor * width), int(self.var_scaleFactor * height)), interpolation=cv2.INTER_CUBIC) self.func_translationImage() self.func_rotationImage() height, width, channel = self.image.shape step = channel * width self.showImage = QtGui.QImage(self.image.data, self.image.shape[1], self.image.shape[0], QtGui.QImage.Format_RGB888) self.var_Ui_DlogAddImage.lab_ShowImage.setPixmap( QtGui.QPixmap.fromImage(self.showImage))
def init_camera(time_for_capture): # conecting to the first available camera camera = pylon.InstantCamera(pylon.TlFactory.GetInstance().CreateFirstDevice()) try: camera.RegisterConfiguration(PylonViewer(), pylon.RegistrationMode_Append, pylon.Cleanup_Delete) except Exception as e : print("\n\n\n \n\n!%$$$$$$$$$$$$$$$$$", e,"\n\n\n\n") camera.MaxNumBuffer = 5000 # Grabing Continusely (video) with minimal delay camera.StartGrabbing(pylon.GrabStrategy_OneByOne) converter = pylon.ImageFormatConverter() camera.Gamma.SetValue(1.0) camera.ExposureTime.SetValue(5739) #(7840) # 32670 camera.BalanceWhiteAuto.SetValue('Off') # print(dir(camera)) camera.AcquisitionFrameRateEnable.SetValue(True) camera.AcquisitionFrameRate.SetValue(30) # print(camera.AcquisitionFrameRate.GetValue(), camera.ResultingFrameRate.GetValue()) # converting to opencv bgr format converter.OutputPixelFormat = pylon.PixelType_BGR8packed converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned return camera, converter
def __init__(self): #initialize Camera self.camera = pylon.InstantCamera(pylon.TlFactory.GetInstance( ).CreateFirstDevice()) #create instance of Camera #Open camera self.camera.Open() print("Using device:", self.camera.GetDeviceInfo().GetModelName()) self.converter = pylon.ImageFormatConverter() self.converter.OutputPixelFormat = pylon.PixelType_BGR8packed self.converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned #Set Auto-Gain and Auto-Exposure to be OFF self.camera.ExposureAuto.SetValue("Off") self.camera.GainAuto.SetValue("Off") self.camera.Gain.GetValue() self.camera.ExposureTime.GetValue() self.camera.Gamma.GetValue() #Create an image window self.imageWindow = pylon.PylonImageWindow() self.imageWindow.Create(1) #Change save directory os.chdir("C:\\Users\\Wyss User\\Pictures\\Basler Test") print("Current working directory (save location):", os.getcwd()) print("Camera initialized.")
def camera_show(self): # if timer is stopped if not self.timer_camera_big.isActive( ) and not self.timer_camera_small.isActive(): # start timer self.timer_camera_big.start( ) # timeout interval = default = 0 msec self.timer_camera_small.start() # conecting to the first available camera and loading its features self.camera = pylon.InstantCamera( pylon.TlFactory.GetInstance().CreateFirstDevice()) self.camera.Open() nodeFile = "daA1920-30uc_22901961.pfs" pylon.FeaturePersistence.Load(nodeFile, self.camera.GetNodeMap(), True) self.camera.Close() # Grabing Continusely (video) with minimal delay self.camera.StartGrabbing(pylon.GrabStrategy_LatestImageOnly) self.converter_RGB = pylon.ImageFormatConverter() # converting to opencv bgr format self.converter_RGB.OutputPixelFormat = pylon.PixelType_RGB8packed self.converter_RGB.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned self.cap_signal.emit("Capture Done!") else: self.cap_signal.emit("Camera has already been in capture mode!")
def __init__(self, parameters=None): self.camera = pylon.InstantCamera( pylon.TlFactory.GetInstance().CreateFirstDevice()) self.info = self.camera.GetDeviceInfo() self.camera.MaxNumBuffer = 10 # Default is 10 self.imageWindow = pylon.PylonImageWindow() self.imageWindow.Create(1) if parameters is None: # Use defaults self.parameters = { 'output_file': 'output.mp4', 'width': 160, 'height': 160, 'fps': 500.0, 'exposure_time': 1000.0 } else: self.parameters = parameters # self.parameters['ffmpeg_param_out'] = { # '-vcodec': 'libx264', # '-preset': 'ultrafast', # '-crf': '28', # '-framerate': str(self.parameters['fps']), # #'-r': str(self.parameters['fps']) # #'-g': str(self.parameters['fps']), # #'-keyint_min': str(self.parameters['fps']), # #'-sc_threshold': '0' # } # self.parameters['ffmpeg_param_in'] = { # '-r': str(self.parameters['fps']) #} self.parameters['ffmpeg_param_out'] = { '-vcodec': 'libx264', '-preset': 'ultrafast', '-crf': '8' # '-r': str(self.parameters['fps']) } self.parameters['record'] = True file = Path(self.parameters['output_file']) self.parameters['output_csv'] = file.parent / (file.stem + "_recording.csv") self.data_out = [] self.counter = 0 self.first_timestamp = 0 # First time stamp is used for calculating start point as 0 # Setup converter self.converter = pylon.ImageFormatConverter() # Convert to RGB8 to support skvideo self.converter.OutputPixelFormat = pylon.PixelType_RGB8packed # Convert image BGR8 to support OpenCV # self.converter.OutputPixelFormat = pylon.PixelType_BGR8packed self.converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned
def __init__(self): t = time.time() self.img0 = [] nodeFile = "NodeMap.pfs" self.windowName = 'title' self.temp_exp = 100.0 try: # Create an instant camera object with the camera device found first. self.camera = pylon.InstantCamera( pylon.TlFactory.GetInstance().CreateFirstDevice()) self.camera.Open() #Ouvre la communication avec la caméra self.Model = self.camera.GetDeviceInfo().GetModelName() if self.Model == "acA1920-40uc": self.camera.PixelFormat.SetValue('Mono8') self.tps_exp_min = 50 self.pixel_size = 5.86 #microns (pixels carrés sur les baslers) self.pixel_max = 255 elif self.Model == "acA5472-17um": self.camera.PixelFormat.SetValue('Mono12') self.tps_exp_min = 50 self.pixel_size = 2.4 #microns (pixels carrés sur les baslers) self.pixel_max = 4095 else: print("Camera non reconnue") self.width = self.camera.Width.GetValue() self.height = self.camera.Height.GetValue() self.ratio = float(self.width / self.height) pylon.FeaturePersistence.Save(nodeFile, self.camera.GetNodeMap()) # Print the model name of the camera. print("Using device ", self.camera.GetDeviceInfo().GetModelName()) #print("Exposure time ", self.camera.ExposureTime.GetValue()) #print("Pixels formats :", self.camera.PixelFormat.Symbolics) self.auto_exposure() #This line HAS TO STAY HERE :') # converting to opencv bgr format self.converter = pylon.ImageFormatConverter() self.converter.OutputPixelFormat = pylon.PixelType_Mono16 self.converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned # According to their default configuration, the cameras are # set up for free-running continuous acquisition. #Grabbing continuously (video) with minimal delay #self.camera.StartGrabbing(pylon.GrabStrategy_LatestImageOnly) except genicam.GenericException as e: # Error handling print("An exception occurred.", e.GetDescription()) exitCode = 1 temps = time.time() - t print("Temps acquisition caméra : ", temps)
def __init__(self): self._manager_name = "Basler Pylon" self._factory = pylon.TlFactory.GetInstance() self._enabled_devices = {} self._resolution = None self._converter = pylon.ImageFormatConverter() self._converter.OutputPixelFormat = pylon.PixelType_BGR8packed self._converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned
def open(self): self.camera = pylon.InstantCamera( pylon.TlFactory.GetInstance().CreateFirstDevice()) self.camera.Open() self.converter = pylon.ImageFormatConverter() # converting to opencv bgr format self.converter.OutputPixelFormat = pylon.PixelType_BGR8packed self.converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned
def __init__(self, config: PylonCameraConfig): self._camera = pylon.InstantCamera( pylon.TlFactory.GetInstance().CreateFirstDevice()) self._config = config self._image_lock = threading.Lock() self.converter = pylon.ImageFormatConverter() self.converter.OutputPixelFormat = pylon.PixelType_BGR8packed self.converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned
def __init__(self, parent=None): super(ThreadRunAcq,self).__init__(parent) self.parent=parent self.cam0 = self.parent.cam0 self.stopRunAcq=False self.itrig= self.parent.itrig self.converter=pylon.ImageFormatConverter()
def __init__(self, backend, camera_index): super().__init__() self.backend = backend self.camera_index = camera_index self.camera = None # setting up converter to opencv bgr format self.converter = pylon.ImageFormatConverter() self.converter.OutputPixelFormat = pylon.PixelType_BGR8packed self.converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned
def __init__(self, parent): super(ThreadOneAcq, self).__init__() self.parent = parent self.cam0 = parent.cam0 self.stopRunAcq = False self.itrig = parent.itrig self.LineTrigger = parent.LineTrigger self.converter = pylon.ImageFormatConverter()
def _run_continuous_shot(self, grab_strategy=pylon.GrabStrategy_LatestImageOnly, window_size=None, image_folder='.'): self._camera.StopGrabbing() # converting to opencv bgr format converter = pylon.ImageFormatConverter() converter.OutputPixelFormat = pylon.PixelType_BGR8packed converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned if (not self._impro_own_window): cv2.namedWindow('camera_image', cv2.WINDOW_NORMAL | cv2.WINDOW_GUI_NORMAL) if (window_size is not None): cv2.resizeWindow('camera_image', window_size[0], window_size[1]) self._camera.StartGrabbing(grab_strategy) try: while (self._camera.IsGrabbing()): grab_result = self._camera.RetrieveResult( 5000, pylon.TimeoutHandling_ThrowException) if grab_result.GrabSucceeded(): # Access the image data image = converter.Convert(grab_result) img = image.GetArray() if (self._impro_own_window): self._impro_function(img) elif (self._impro_function is not None): img = self._impro_function(img) if (not isinstance(img, np.ndarray)): cv2.destroyAllWindows() raise ValueError( "The given impro_function must return a numpy array when own_window=False" ) cv2.imshow('camera_image', img) else: cv2.imshow('camera_image', img) k = cv2.waitKey(1) & 0xFF if (k == ord('s') and self._impro_own_window is False): path = os.path.join( image_folder, 'BaslerGrabbedImage-' + str(int(datetime.datetime.now().timestamp())) + '.png') cv2.imwrite(path, img) self._interact_action_widgets[ "StatusLabel"].value = f"Status: Grabbed image was saved to {path}" elif k == ord('q'): break grab_result.Release() finally: cv2.destroyAllWindows() self._camera.StopGrabbing()
def func_viewImage(self): converter = pylon.ImageFormatConverter() converter.OutputPixelFormat = pylon.PixelType_BGR8packed converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned grabResult = self.camera.RetrieveResult(5000, pylon.TimeoutHandling_ThrowException) if grabResult.GrabSucceeded(): image = converter.Convert(grabResult) image = image.GetArray() image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB) #mau video duoc chuyen doi tro lai RGB de no la mau thuc return image
def grab_one(self): converter = pylon.ImageFormatConverter() converter.OutputPixelFormat = pylon.PixelType_Mono8 converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned frames = [] for i, cam in enumerate(self._cameras): grab_result = cam.GrabOne(100) image = converter.Convert(grab_result) frames.append(image.GetArray()) return frames[0], frames[1]
def __init__(self, camera, label, parent=None): super(pylonWorker, self).__init__(parent) self.videoStart = False self._camera = camera self._label = label self.grab_strategy = pylon.GrabStrategy_LatestImageOnly # converting to opencv bgr format self.converter = pylon.ImageFormatConverter() self.converter.OutputPixelFormat = pylon.PixelType_BGR8packed self.converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned
def Threshold_viewCam(self): global flag_Camera_Open flag_Camera_Open = True if self.img_thresold_label.x0 and self.img_thresold_label.x1 and self.img_thresold_label.y0 and self.img_thresold_label.y1: self.x00 = self.img_thresold_label.x0 self.x10 = self.img_thresold_label.x1 self.y00 = self.img_thresold_label.y0 self.y10 = self.img_thresold_label.y1 converter = pylon.ImageFormatConverter() converter.OutputPixelFormat = pylon.PixelType_BGR8packed converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned grabResult = camera.RetrieveResult( 5000, pylon.TimeoutHandling_ThrowException) if grabResult.GrabSucceeded(): image = converter.Convert(grabResult) self.img = image.GetArray() self.image_1 = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY) self.image_2 = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY) height, width = self.image_1.shape step = width qImg = QImage(self.image_1.data, width, height, step, QImage.Format_Grayscale8) self.img_thresold_label.setPixmap(QPixmap.fromImage(qImg)) else: converter = pylon.ImageFormatConverter() converter.OutputPixelFormat = pylon.PixelType_BGR8packed converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned grabResult = camera.RetrieveResult( 5000, pylon.TimeoutHandling_ThrowException) if grabResult.GrabSucceeded(): image = converter.Convert(grabResult) self.img = image.GetArray() self.image_1 = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY) self.image_2 = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY) height, width = self.image_1.shape step = width qImg = QImage(self.image_1.data, width, height, step, QImage.Format_Grayscale8) self.img_thresold_label.setPixmap(QPixmap.fromImage(qImg))
def converter(self): ''' Setting up a converter to convert BGR to RGB NOTE: used by __init__ Returns ------- The converter ''' cvt = pylon.ImageFormatConverter() cvt.OutputPixelFormat = pylon.PixelType_RGB8packed cvt.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned return cvt
def capture_raw(self): self.cam.Open() self.cam.StartGrabbing(pylon.GrabStrategy_LatestImageOnly) grabResult = self.cam.RetrieveResult( 5000, pylon.TimeoutHandling_ThrowException) converter = pylon.ImageFormatConverter() converter.OutputPixelFormat = pylon.PixelType_BGR8packed converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned image = converter.Convert(grabResult) img = image.GetArray() self.cam.StopGrabbing() self.cam.Close() return (img)