Ejemplo n.º 1
0
	def run(self):
		while not self.done:
			self.sleep(1)  # Outer while loop just waits until a device is selected

			self.cap = UvcCapture(self.dev_selected_name, self.dev_selected_vend_id, self.dev_selected_prod_id, self.dev_selected_serial_num)
			if self.cap is None:  # If we didn't find the desired cam, don't continue
				self.qPix = QPixmap(1, 1)  # Default to a black pixel
				self.cv_evaluate_px_value = np.array([0, 0, 0], dtype=np.uint8)
				logging.info("No compatible cameras found or chosen camera name not available :(")
				self.sig_update_cam_ui.emit()  # Make sure camera-related UI is updated (eg: don't show exposure time control if no camera is selected...)
				continue

			# If we made it here, we successfully connected to the selected camera. Print camera info and select default values
			for c in self.cap.controls:
				logging.info("\t{} = {} {} (def:{}, min:{}, max:{})".format(c.display_name, str(c.value), str(c.unit), str(c.def_val), str(c.min_val), str(c.max_val)))
			self.cap.select_best_frame_mode(self.DEFAULT_FPS)
			logging.info(self.cap.name + " has the following available modes:\n\t" + str([tuple(x) for x in self.cap.sorted_available_modes()]) + "\nSelected mode: " + str(self.cap.frame_mode))
			# self.cap.print_info()
			logging.debug("LOADING SETTINGS returned {}".format(self.cap.load_settings(self.cv.CAMERA_SETTINGS_FILE)))
			self.sig_update_cam_ui.emit()  # Update camera-related UI (eg: add controls for exposure time, white balance, etc.)

			img = np.zeros(self.cap.frame_size)  # Resize qPix to fit new image size
			self.qPix = QPixmap.fromImage(QImage(img.data, img.shape[1], img.shape[0], QImage.Format_RGB888))

			self.actual_fps = self.cap.frame_rate
			tt = datetime.now()  # Initialize tt (used to estimate actual frame rate) to prevent an error on the first loop iteration
			while self.cap.name == self.dev_selected_name and self.cap.vend_id == self.dev_selected_vend_id and self.cap.prod_id == self.dev_selected_prod_id and self.cap.serial_num == self.dev_selected_serial_num:  # Run an inner while loop to capture frames as long as the selected device is kept constant
				ok = False
				t = datetime.now()
				for a in range(3):
					try:
						frame = self.cap.get_frame_robust()
						self.index = frame.index
						ok = True
						break
					except Exception as e:
						logging.error('DEBUG - Could not get Frame. Error: "{}". Tried {} time{}.'.format(e.message, a+1, "s" if a > 0 else ""))
				if not ok:
					self.sig_error.emit("Couldn't get camera frame after 3 tries, reconnecting...")
					break  # Exit inner loop (force an iteration on the outer loop) after 3 consecutive failed attempts to grab a frame

				# We successfully grabbed a frame, let's store the value of the pixel cv_evaluate_px_at for debugging purposes
				try:
					self.cv_evaluate_px_value = np.array(frame.bgr[self.cv_evaluate_px_at[1], self.cv_evaluate_px_at[0]])  # Create a new np.array so that the value at the pixel is not cached, but copied (ie: if frame.bgr is modified, cv_evaluate_px_value is not). Also note that images should be indexed [y,x]!
				except:
					self.cv_evaluate_px_at = np.array([0, 0])

				# Now we can run the CF detection algorithm on it (if grpColor is checked) and modify frame.bgr if we want
				t2 = datetime.now()
				if self.cv_do_color:
					cf_curr_pos = self.cv.detect_cf_in_camera(frame.bgr, self.cv_do_blob)[0]  # Run CF detection (and blob detection if checked)

					# Now overlay the mask over the camera frame
					t3 = datetime.now()
					if cf_curr_pos is not None and self.cv_do_blob:
						cv2.circle(self.cv.cv_cam_frame, tuple(cf_curr_pos[0:2].astype(int)), int(cf_curr_pos[2]+5), [0, 255, 0], -1)
					# np.putmask(self.cv.cv_cam_frame[:,:,0], self.cv.cv_filtered_HSV_mask, 255)
					# np.putmask(self.cv.cv_cam_frame[:,:,1], self.cv.cv_filtered_HSV_mask, 0)
					# np.putmask(self.cv.cv_cam_frame[:,:,2], self.cv.cv_filtered_HSV_mask, 255)
					mask = cv2.cvtColor(self.cv.cv_filtered_HSV_mask, cv2.COLOR_GRAY2BGR).astype(bool)
					np.putmask(self.cv.cv_cam_frame, mask, np.array([255, 0, 255], dtype=np.uint8))
				else:
					t3 = datetime.now()  # For consistency, save current time as t3 (even if we didn't do anything

				# And finally convert the resulting image to qPix and emit the appropriate signal so the UI can refresh it
				t4 = datetime.now()
				img = cv2.cvtColor(frame.bgr, cv2.COLOR_BGR2RGB)
				self.qPix.convertFromImage(QImage(img.data, img.shape[1], img.shape[0], QImage.Format_RGB888))
				self.sig_new_img.emit()

				t5 = datetime.now()
				self.actual_fps = self.FPS_ESTIMATION_ALPHA*self.actual_fps + (1-self.FPS_ESTIMATION_ALPHA)/(t5-tt).total_seconds()
				logging.debug("At t={} picture #{:03d} was taken; deltaTtotal={:6.2f}ms [{:6.2f}ms] (capture_frame->{:6.2f}ms + detect->{:6.2f}ms + blob->{:6.2f}ms + putmask->{:6.2f}ms + toQPixmap->{:6.2f}ms) -> Estimated FPS: {:.2f}".format(t2, frame.index, (t5-tt).total_seconds()*1000, (t5-t).total_seconds()*1000, (t2-t).total_seconds()*1000, (self.cv.t_events[-2]-t2).total_seconds()*1000 if self.cv_do_color else 0, (self.cv.t_events[-1]-self.cv.t_events[-2]).total_seconds()*1000 if self.cv_do_blob else 0, (t4-t3).total_seconds()*1000, (t5-t4).total_seconds()*1000, self.actual_fps))
				tt = datetime.now()

			self.cap = None  # Set cap to None to automatically release & disconnect from the camera