def __init__(self):
		QThread.__init__(self)
		self.done = False
		self.dev_selected_name = self.DEFAULT_CAM
		self.cap = None
		self.qPix = QPixmap(1, 1)
		self.cv = DroneController()
		self.cv.load_color_thresh_settings()
		self.cv.load_blob_detector_settings()
		self.cv_do_color = True
		self.cv_do_blob = True
		self.cv_evaluate_px_at = np.array([0, 0])
		self.cv_evaluate_px_value = np.array([0, 0, 0], dtype=np.uint8)
		self.index = 0
		self.actual_fps = 0
class CamFrameGrabberThread(QThread):
	"""
	This class implements a background task that constantly grabs new frames from the selected camera
	"""

	DEFAULT_CAM = "USB 2.0 Camera"
	DEFAULT_FPS = 60
	FPS_ESTIMATION_ALPHA = 0.98
	FRAME_SIZE_STR_SPLITTER = " x "

	sig_update_cam_ui = pyqtSignal()        # Signal that's emitted whenever the UI associated to the camera controls needs to be refreshed (eg: new frame size selected -> need to resize window, error grabbing camera frame -> disconnect camera...)
	sig_new_dev_selected = pyqtSignal(str)  # Signal that's emitted when a device is selected (could be the device that was already selected). It's emitted either from the UI when the user changes the selected item in drpInput, or from the code when sig_new_dev is emitted (as a new device is connected or an old one disconnected)
	sig_new_img = pyqtSignal()              # Signal that's emitted every time a new camera frame becomes available
	sig_error = pyqtSignal(str)             # Signal that's emitted every time an error occurred

	def __init__(self):
		QThread.__init__(self)
		self.done = False
		self.dev_selected_name = self.DEFAULT_CAM
		self.cap = None
		self.qPix = QPixmap(1, 1)
		self.cv = DroneController()
		self.cv.load_color_thresh_settings()
		self.cv.load_blob_detector_settings()
		self.cv_do_color = True
		self.cv_do_blob = True
		self.cv_evaluate_px_at = np.array([0, 0])
		self.cv_evaluate_px_value = np.array([0, 0, 0], dtype=np.uint8)
		self.index = 0
		self.actual_fps = 0

	def __del__(self):
		self.wait()

	@pyqtSlot()
	def stop_thread(self):  # This method is used to externally request this thread to terminate
		self.done = True    # Flag to indicate the outer while loop in run_experiment() to finish thread execution
		self.change_selected_device("EXIT! :P")  # Choose a void device, to force the inner while loop in run_experiment() to exit after grabbing at most one frame
		if self.wait(5000) is not True or self.cap is not None:  # Worst-case scenario: force quit if self.cap is still not None after 5sec
			logging.warning("Uh-ooohh, CamFrameGrabberThread didn't terminate even after waiting for 5sec. Force quitting :S")
			self.cap = None
			self.terminate()
		else:
			logging.info("CamFrameGrabberThread exited cleanly :)")

	@pyqtSlot(str)
	def change_selected_device(self, dev_selected):
		if self.dev_selected_name != dev_selected: logging.info("New device selected: {}".format(dev_selected))
		self.dev_selected_name = str(dev_selected)

	@pyqtSlot(str, bool)
	def change_cam_frame_size(self, new_frame_size, emit_signal):
		if self.cap is not None and window.drpSize.count() > 0:  # Sanity check (in case capture device was just closed or we're clearing drpSize to re-add available sizes)
			logging.info("Changing frame size to {}".format(new_frame_size))
			self.cap.frame_size = tuple([int(x) for x in new_frame_size.split(self.FRAME_SIZE_STR_SPLITTER)])
			if emit_signal:  # If the user was the one who clicked on this combobox item (as opposed to me, manually from the code), update cam UI
				self.sig_update_cam_ui.emit()  # drpFPS might need to be updated (different frame sizes might have different fps available)

	@pyqtSlot(str)
	def change_cam_frame_rate(self, new_frame_rate):
		if self.cap is not None and window.drpFPS.count() > 0:  # Sanity check (in case capture device was just closed or we're clearing drpSize to re-add available sizes)
			logging.info("Changing frame rate to {}".format(new_frame_rate))
			self.cap.frame_rate = int("".join(x for x in new_frame_rate if x.isdigit()))  # Keep only the numbers (= remove the " fps" part, if exists)

	@pyqtSlot(str, int, bool)
	def change_cam_control_setting(self, ctrl_name, new_value, is_bool):
		"""
		Process a request (from the UI) to change a particular camera setting.
		:param ctrl_name: Particular camera setting that wants to be modified (eg: exposure time, white balance...)
		:param new_value: New value desired for the camera setting
		:param is_bool:  True if the UI control is a CheckBox, False if it's a SpinBox (ie: TextBox with up&down arrows)
		"""
		if self.cap is not None:  # Sanity check (in case capture device was just closed)
			for c in self.cap.controls:
				if c.display_name == ctrl_name:
					try:
						if is_bool:
							c.set_value(c.min_val if (new_value == Qt.Unchecked) else c.max_val)
						else:
							c.set_value(new_value)
							logging.info("'{}' changed to {}".format(c.display_name, c.value))
					except:
						self.sig_error.emit("Unable to change '{}' property to '{}'! Make sure the value is valid (within bounds, not disabled by other setting like 'Auto' modes, etc).".format(ctrl_name, new_value))
					return

	@pyqtSlot(bool)
	def set_cv_do_color(self, checked):
		self.cv_do_color = checked
		window.grpBlob.setEnabled(checked)
		self.set_cv_do_blob(False if not checked else window.grpBlob.isChecked())

	@pyqtSlot(bool)
	def set_cv_do_blob(self, checked):
		self.cv_do_blob = checked

	@pyqtSlot(int, str, bool)
	def change_HSV_setting(self, new_value, letter, is_max):
		"""
		Updates the corresponding field (H,S or V variable, min or max) of the HSV color thresholding filter.
		:param new_value: New value to set the corresponding threshold to
		:param letter: either 'H', 'S', or 'V' (case insensitive), depending on which threshold should be modified
		:param is_max: True to change the upper threshold; False to change the lower one
		"""
		letter = letter.lower()  # To avoid problems, convert the letter to lowercase and only compare to lowercase
		index = 0 if letter == 'h' else 1 if letter == 's' else 2  # Convert H, S, V to 0, 1, 2

		# Now update the right variable based on is_max
		if is_max:
			self.cv.cv_HSV_thresh_max[index] = new_value
		else:
			self.cv.cv_HSV_thresh_min[index] = new_value

	@pyqtSlot(int, str, bool)
	def change_blob_setting(self, new_value, letter, is_max):
		"""
		Updates the corresponding setting (area, circularity, convexity or inertia, min or max) of the blob detector.
		:param new_value: If int: New value to set the corresponding threshold to; if bool: activate/deactivate filter
		:param letter: either 'A', 'C', 'V', or 'I' (case insensitive), depending on which setting should be modified
		:param is_max: True to change the upper threshold; False to change the lower one [ignored if new_value is bool]
		"""
		letter = letter.lower()  # To avoid problems, convert the letter to lowercase and only compare to lowercase
		param = "Area" if letter == 'a' else "Circularity" if letter == 'c' else "Convexity" if letter == 'v' else "Inertia"

		if type(new_value) is bool:
			setattr(self.cv.cv_blob_detect_params, "filterBy{}".format(param), new_value)
		else:
			setattr(self.cv.cv_blob_detect_params, "{}{}{}".format("max" if is_max else "min", param, "Ratio" if letter == 'i' else ""), new_value)

	def run(self):
		while not self.done:
			self.sleep(1)  # Outer while loop just waits until a device is selected

			self.cap = UvcCapture(self.dev_selected_name)
			if self.cap is None:  # If we didn't find the desired cam, don't continue
				self.qPix = QPixmap(1, 1)  # Default to a black pixel
				self.cv_evaluate_px_value = np.array([0, 0, 0], dtype=np.uint8)
				logging.info("No compatible cameras found or chosen camera name not available :(")
				self.sig_update_cam_ui.emit()  # Make sure camera-related UI is updated (eg: don't show exposure time control if no camera is selected...)
				continue

			# If we made it here, we successfully connected to the selected camera. Print camera info and select default values
			for c in self.cap.controls:
				logging.info("\t{} = {} {} (def:{}, min:{}, max:{})".format(c.display_name, str(c.value), str(c.unit), str(c.def_val), str(c.min_val), str(c.max_val)))
			self.cap.select_best_frame_mode(self.DEFAULT_FPS)
			logging.info(self.cap.name + " has the following available modes:\n\t" + str([tuple(x) for x in self.cap.sorted_available_modes()]) + "\nSelected mode: " + str(self.cap.frame_mode))
			self.cap.print_info()
			logging.debug("LOADING SETTINGS returned {}".format(self.cap.load_settings(self.cv.CAMERA_SETTINGS_FILE)))
			self.sig_update_cam_ui.emit()  # Update camera-related UI (eg: add controls for exposure time, white balance, etc.)

			img = np.zeros(self.cap.frame_size)  # Resize qPix to fit new image size
			self.qPix = QPixmap.fromImage(QImage(img.data, img.shape[1], img.shape[0], QImage.Format_RGB888))

			self.actual_fps = self.cap.frame_rate
			tt = datetime.now()  # Initialize tt (used to estimate actual frame rate) to prevent an error on the first loop iteration
			while self.cap.name == self.dev_selected_name:  # Run an inner while loop to capture frames as long as the selected device is kept constant
				ok = False
				t = datetime.now()
				for a in range(5):
					try:
						frame = self.cap.get_frame_robust()
						self.index = frame.index
						# logging.debug("DEBUG - Successfully got frame {:d} after {:d} tries".format(frame.index, a+1))
						ok = True
						break
					except uvc.CaptureError as e:
						logging.error('DEBUG - Could not get Frame. Error: "{}". Tried {} time{}.'.format(e.message, a+1, "s" if a > 0 else ""))
				if not ok:
					self.sig_error.emit("Couldn't get camera frame after 5 tries, reconnecting...")
					break  # Exit inner loop (force an iteration on the outer loop) after 5 consecutive failed attempts to grab a frame

				# We successfully grabbed a frame, let's store the value of the pixel cv_evaluate_px_at for debugging purposes
				try:
					self.cv_evaluate_px_value = np.array(frame.bgr[self.cv_evaluate_px_at[1], self.cv_evaluate_px_at[0]])  # Create a new np.array so that the value at the pixel is not cached, but copied (ie: if frame.bgr is modified, cv_evaluate_px_value is not). Also note that images should be indexed [y,x]!
				except:
					self.cv_evaluate_px_at = np.array([0, 0])

				# Now we can run the CF detection algorithm on it (if grpColor is checked) and modify frame.bgr if we want
				t2 = datetime.now()
				if self.cv_do_color:
					self.cv.detect_cf_in_camera(frame.bgr, self.cv_do_blob)  # Run CF detection (and blob detection if checked)

					# Now overlay the mask over the camera frame
					t3 = datetime.now()
					if self.cv.cf_pos_tracked and self.cv_do_blob:
						cv2.circle(self.cv.cv_cam_frame, tuple(self.cv.cf_curr_pos[0:2].astype(int)), int(self.cv.cf_curr_pos[2]+5), [0, 255, 0], -1)
					# np.putmask(self.cv.cv_cam_frame[:,:,0], self.cv.cv_filtered_HSV_mask, 255)
					# np.putmask(self.cv.cv_cam_frame[:,:,1], self.cv.cv_filtered_HSV_mask, 0)
					# np.putmask(self.cv.cv_cam_frame[:,:,2], self.cv.cv_filtered_HSV_mask, 255)
					mask = cv2.cvtColor(self.cv.cv_filtered_HSV_mask, cv2.COLOR_GRAY2BGR).astype(bool)
					np.putmask(self.cv.cv_cam_frame, mask, np.array([255, 0, 255], dtype=np.uint8))
				else:
					t3 = datetime.now()  # For consistency, save current time as t3 (even if we didn't do anything

				# And finally convert the resulting image to qPix and emit the appropriate signal so the UI can refresh it
				t4 = datetime.now()
				img = cv2.cvtColor(frame.bgr, cv2.COLOR_BGR2RGB)
				self.qPix.convertFromImage(QImage(img.data, img.shape[1], img.shape[0], QImage.Format_RGB888))
				self.sig_new_img.emit()

				t5 = datetime.now()
				self.actual_fps = self.FPS_ESTIMATION_ALPHA*self.actual_fps + (1-self.FPS_ESTIMATION_ALPHA)/(t5-tt).total_seconds()
				logging.debug("At t={} picture #{:03d} was taken; deltaTtotal={:6.2f}ms [{:6.2f}ms] (capture_frame->{:6.2f}ms + detect->{:6.2f}ms + blob->{:6.2f}ms + putmask->{:6.2f}ms + toQPixmap->{:6.2f}ms) -> Estimated FPS: {:.2f}".format(t2, frame.index, (t5-tt).total_seconds()*1000, (t5-t).total_seconds()*1000, (t2-t).total_seconds()*1000, (self.cv.t_events[-2]-t2).total_seconds()*1000 if self.cv_do_color else 0, (self.cv.t_events[-1]-self.cv.t_events[-2]).total_seconds()*1000 if self.cv_do_blob else 0, (t4-t3).total_seconds()*1000, (t5-t4).total_seconds()*1000, self.actual_fps))
				tt = datetime.now()

			self.cap = None  # Set cap to None to automatically release & disconnect from the camera