def update(self): if self.mapitem: self.scene.removeItem(self.mapitem) qpix = QPixmap.fromImage(self.map) self.mapitem = self.scene.addPixmap(qpix) self.mirror(self.mapitem) self.show()
def put_image_into_scene(self): if self._image: resized_image = self._image.resize((self._image_view.size().width() - 2, self._image_view.size().height() - 2), self.quality) QtImage = ImageQt(resized_image) pixmap = QPixmap.fromImage(QtImage) self._scene.clear() self._scene.addPixmap(pixmap)
def put_image_into_scene(self): if self._image: if self.capture is None: resized_image = self._image.resize( (self._image_view.size().width() - 2, self._image_view.size().height() - 2, self.quality)) QtImage = ImageQt(resized_image) pixmap = QPixmap.fromImage(QtImage) self._scene.clear() self._scene.addPixmap(pixmap) else: QtImage = ImageQt(self._image) pixmap = QPixmap.fromImage(QtImage) self.label = CaptureImage(self._timeline, self._topic, self._image_msg, self.capture) self.label.setPixmap(pixmap) self._scene.addWidget(self.label)
def put_image_into_scene(self): if self._image: resized_image = self._image.resize((self._image_view.size().width() - 2, self._image_view.size().height() - 2), self.quality) QtImage = ImageQt(resized_image) pixmap = QPixmap.fromImage(QtImage) self._scene.clear() self._scene.addPixmap(pixmap)
def _update(self): if self._map_item: self._scene.removeItem(self._map_item) pixmap = QPixmap.fromImage(self._map) self._map_item = self._scene.addPixmap(pixmap) self.centerOn(self._map_item) self.show()
def _update(self): if self._map_item: self._scene.removeItem(self._map_item) pixmap = QPixmap.fromImage(self._map) self._map_item = self._scene.addPixmap(pixmap) self.centerOn(self._map_item) self.show()
def _update(self): if self._map_item: self._scene.removeItem(self._map_item) pixmap = QPixmap.fromImage(self._map) self._map_item = self._scene.addPixmap(pixmap) # Everything must be mirrored self._mirror(self._map_item) self.centerOn(self._map_item) self.show()
def fome_raw(self, data): try: cvImage = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e) height, width, byteValue = cvImage.shape byteValue = byteValue * width cv2.cvtColor(cvImage, cv2.COLOR_BGR2RGB, cvImage) r_image = QImage(cvImage, width, height, byteValue, QImage.Format_RGB888) r_image = QPixmap.fromImage(r_image) r_image = r_image.scaledToHeight(300) self.r_label.setPixmap(r_image)
def msg_to_pixmap(self, msg): cv_img = self.bridge.imgmsg_to_cv2(msg) shape = cv_img.shape if len(shape) == 3 and shape[2] == 3: # RGB888 h, w, _ = shape bytesPerLine = 3 * w img_format = QImage.Format_RGB888 else: # Grayscale8 h, w = shape[0], shape[1] bytesPerLine = 1 * w img_format = QImage.Format_Grayscale8 q_img = QImage(cv_img.data, w, h, bytesPerLine, img_format) return QPixmap.fromImage(q_img).scaled(320, 240)
def updateCupMask(self, img): try: image = self.__cvBridge.imgmsg_to_cv2(img) # Convert ROS' sensor_msgs/Image to cv2 image image = cv2.merge((image, image, image)) # QImage only supports colour images or 1-bit mono (which opencv does not) height, width = image.shape[:2] frame = QImage(image.data, width, height, QImage.Format_RGB888) self.__cupImageScene.clear() self.__cupImageScene.addPixmap(QPixmap.fromImage(frame)) self.__cupImageScene.update() self._widget.cupGraphicsView.setScene(self.__cupImageScene) self._widget.cupGraphicsView.ensureVisible(self.__cupImageScene.sceneRect()); self._widget.cupGraphicsView.fitInView(self.__cupImageScene.sceneRect(), Qt.KeepAspectRatio); except Exception as e: rospy.logerr("update updatecupMask: %s", e)
def put_image_into_scene(self): if self._image: scale_factor = min( float(self._image_view.size().width() - 2) / self._image.size[0], float(self._image_view.size().height() - 2) / self._image.size[1]) resized_image = self._image.resize( (int(scale_factor * self._image.size[0]), int(scale_factor * self._image.size[1])), self.quality) QtImage = ImageQt(resized_image) pixmap = QPixmap.fromImage(QtImage) self._scene.clear() self._scene.addPixmap(pixmap)
def _update(self): if self._map_item: self._scene.removeItem(self._map_item) pixmap = QPixmap.fromImage(self._map) self._map_item = self._scene.addPixmap(pixmap) # Everything must be mirrored self._mirror(self._map_item) # Add drag and drop functionality self.add_dragdrop(self._map_item) self.centerOn(self._map_item) self.show()
def _update(self): if self._map_item: self._scene.removeItem(self._map_item) pixmap = QPixmap.fromImage(self._map) self._map_item = self._scene.addPixmap(pixmap) # Everything must be mirrored self._mirror(self._map_item) # Add drag and drop functionality self.add_dragdrop(self._map_item) self.centerOn(self._map_item) self.show()
def obj_det_callback(self, data): if self.view != OBD: return if self.frozen: return img = self.brd.imgmsg_to_cv2(data, 'rgb8') # cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, dsize=(1280, 720), interpolation=cv2.INTER_CUBIC) h, w, c = img.shape q_img = QImage(img.data, w, h, 3 * w, QImage.Format_RGB888) q = QPixmap.fromImage(q_img) self.wdg_img.setFixedWidth(w) self.wdg_img.setFixedHeight(h) self.wdg_img.setPixmap(q)
def rgb_msg_to_pixmap(self, msg): # print("before",msg.msg.encoding) msg.encoding = "bgr8" # print("after",msg.encoding) cv_img = self.bridge.imgmsg_to_cv2(msg, "rgb8") shape = cv_img.shape if len(shape) == 3 and shape[2] == 3: # RGB888 h, w, _ = shape bytesPerLine = 3 * w img_format = QImage.Format_RGB888 else: # Grayscale8 h, w = shape[0], shape[1] bytesPerLine = 1 * w img_format = QImage.Format_Grayscale8 q_img = QImage(cv_img.data, w, h, bytesPerLine, img_format) return QPixmap.fromImage(q_img).scaled(320, 240)
def updateImage(self, img): try: img.encoding = "bgr8" # no, it's not but cv_bridge can't handle hsv image = self.__cvBridge.imgmsg_to_cv2(img) # Convert ROS' sensor_msgs/Image to cv2 image self.__lastHSVimage = image.copy() image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB) height, width = image.shape[:2] frame = QImage(image.data, width, height, QImage.Format_RGB888) self.__colorImageScene.clear() self.__colorImageScene.addPixmap(QPixmap.fromImage(frame)) self.__colorImageScene.update() self._widget.lightCorrectedGraphicsView.setScene(self.__colorImageScene) self._widget.lightCorrectedGraphicsView.ensureVisible(self.__colorImageScene.sceneRect()); self._widget.lightCorrectedGraphicsView.fitInView(self.__colorImageScene.sceneRect(), Qt.KeepAspectRatio); except Exception as e: rospy.logerr("update image: %s", e)
def luv_callback(self, data): if self.view != LUV: return img = self.brd.imgmsg_to_cv2(data, 'rgb8') # cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, dsize=(1280, 720), interpolation=cv2.INTER_CUBIC) h, w, c = img.shape # rospy.loginfo('shape {}'.format(img.shape)) q_img = QImage(img.data, w, h, 3 * w, QImage.Format_RGB888) q = QPixmap.fromImage(q_img) # self.wdg_img.setFixedWidth(w) # self.wdg_img.setFixedHeight(h) self.wdg_img.setPixmap(q)
def img_callback(self, data): if self.view != RGB: return img = self.brd.imgmsg_to_cv2(data, 'rgb8') h, w, c = img.shape self.orig_h = h self.orig_w = w # rospy.loginfo('h {} w {}'.format(h,w)) # cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, dsize=(1280, 720), interpolation=cv2.INTER_CUBIC) h, w, c = img.shape q_img = QImage(img.data, w, h, 3 * w, QImage.Format_RGB888) q = QPixmap.fromImage(q_img) self.wdg_img.setFixedWidth(1280) self.wdg_img.setFixedHeight(720) self.wdg_img.setPixmap(q)
def __init__(self, name, img, coords): self.name = name self.cur_coords = {} self.visible = True self.pixmap_item = QGraphicsPixmapItem(QPixmap.fromImage(img)) bd = self.pixmap_item.boundingRect() self.pixmap_item.setTransformOriginPoint(bd.center()) self.cur_coords['theta'] = coords['theta'] self.pixmap_item.setRotation(coords['theta']) self.cur_coords['xy'] = (coords['x'], coords['y']) self.pixmap_item.setPos(coords['x'] - bd.width() / 2, coords['y'] - bd.height() / 2) self.img = img
def _update_map(self): """ Method used in the rqt_nav_view plugin to read the image from the map server. """ a = numpy.array(self.map.data, dtype=numpy.uint8, copy=False, order='C') a = a.reshape((self.map.info.height, self.map.info.width)) if self.map.info.width % 4: e = numpy.empty( (self.map.info.height, 4 - self.map.info.width % 4), dtype=a.dtype, order='C') a = numpy.append(a, e, axis=1) image = QImage(a.reshape( (a.shape[0] * a.shape[1])), self.map.info.width, self.map.info.height, QImage.Format_Indexed8) for i in reversed(range(101)): image.setColor(100 - i, qRgb(i * 2.55, i * 2.55, i * 2.55)) image.setColor(101, qRgb(255, 0, 0)) # not used indices image.setColor(255, qRgb(200, 200, 200)) # color for unknown value -1 self._map = image self.graphics_view.setSceneRect(0, 0, self.map.info.width, self.map.info.height) pixmap = QPixmap.fromImage(self._map) self._map_item = self._scene.addPixmap(pixmap) # Everything must be mirrored self._map_item.scale(-1, 1) self._map_item.translate(-1 * self.map.info.width, 0) # Add drag and drop functionality # self.add_dragdrop(self._map_item) self.graphics_view.centerOn(self._map_item) self.graphics_view.show()
def _set_image(self): print("_set_image") self._scene.clear() img = cv2.imread("/home/gachiemchiep/Pictures/aaa.png", 1) img_rz = cv2.resize(img, (640, 480)) # https://stackoverflow.com/questions/34232632/convert-python-opencv-image-numpy-array-to-pyqt-qpixmap-image height, width, channel = img_rz.shape print(img_rz.shape) bytesPerLine = 3 * width img_q = QImage(img_rz.data, width, height, bytesPerLine, QImage.Format_RGB888).rgbSwapped() # painter : this does not work # painter = QPainter(img_q) # painter.setRenderHint(QPainter.Antialiasing) # self._scene.render(painter) # painter.end() # pixmap : this work pixmap = QPixmap.fromImage(img_q) self._scene.addPixmap(pixmap) self._scene.setSceneRect(0, 0, 640, 480)
def both_callback(self, luv, hsv): if self.view != BOTH: self.label_hsv.hide() self.label_lab.hide() return img_luv = self.brd.imgmsg_to_cv2(luv) img_hsv = self.brd.imgmsg_to_cv2(hsv) # cv2.cvtColor(img_luv, cv2.COLOR_BGR2RGB) # cv2.cvtColor(img_hsv, cv2.COLOR_BGR2RGB) h, w, c = img_luv.shape img = np.zeros([h, w, c]) luv_2 = cv2.resize(img_luv, (0, 0), fx=0.5, fy=0.5) hsv_2 = cv2.resize(img_hsv, (0, 0), fx=0.5, fy=0.5) both = np.hstack((hsv_2, luv_2)) dif = (img.shape[0] - both.shape[0]) // 2 img[dif:img.shape[0] - dif, 0:img.shape[1]] = both q_img = QImage(both.data, both.shape[1], both.shape[0], 3 * both.shape[1], QImage.Format_RGB888) q = QPixmap.fromImage(q_img) self.wdg_img.setFixedWidth(both.shape[1]) self.wdg_img.setFixedHeight(both.shape[0]) self.wdg_img.setPixmap(q)
def draw_hist_lab(self, histRGB): # histRGB = np.log2(histRGB) new_h = cv2.resize(histRGB.astype('uint8'), dsize=(400, 400), interpolation=cv2.INTER_CUBIC) # new_h = histRGB.copy().astype('uint8') # cv2.imshow("to draw", new_h) # cv2.waitKey(1) rospy.loginfo('new_h shape {}'.format(new_h.shape)) h, w, c = new_h.shape total = new_h.nbytes perLine = int(total / h) if c == 3: q_img = QImage(new_h.data, w, h, perLine, QImage.Format_RGB888) elif c == 4: q_img = QImage(new_h.data, w, h, perLine, QImage.Format_RGBA8888) q = QPixmap.fromImage(q_img) self.inner_luv_hist.setFixedWidth(400) self.inner_luv_hist.setFixedHeight(400) self.inner_luv_hist.setPixmap(q)
def draw_timeline_segment(self, painter, topic, stamp_start, stamp_end, x, y, width, height): """ draws a stream of images for the topic :param painter: painter object, ''QPainter'' :param topic: topic to draw, ''str'' :param stamp_start: stamp to start drawing, ''rospy.Time'' :param stamp_end: stamp to end drawing, ''rospy.Time'' :param x: x to draw images at, ''int'' :param y: y to draw images at, ''int'' :param width: width in pixels of the timeline area, ''int'' :param height: height in pixels of the timeline area, ''int'' """ if x < self.timeline._history_left: width += x - self.timeline._history_left x = self.timeline._history_left max_interval_thumbnail = self.timeline.map_dx_to_dstamp(self.thumbnail_combine_px) max_interval_thumbnail = max(0.1, max_interval_thumbnail) thumbnail_gap = 6 thumbnail_x, thumbnail_y, thumbnail_height = x + 1, y + 1, height - 2 - thumbnail_gap # leave 1px border # set color to white draw rectangle over messages painter.setBrush(QBrush(Qt.white)) painter.drawRect(x, y, width, height - thumbnail_gap) thumbnail_width = None while True: available_width = (x + width) - thumbnail_x # Check for enough remaining to draw thumbnail if width > 1 and available_width < self.min_thumbnail_width: break # Try to display the thumbnail, if its right edge is to the right of the timeline's left side if not thumbnail_width or thumbnail_x + thumbnail_width >= self.timeline._history_left: stamp = self.timeline.map_x_to_stamp(thumbnail_x, clamp_to_visible=False) thumbnail_bitmap = self.thumbnail_cache.get_item(topic, stamp, max_interval_thumbnail) # Cache miss if not thumbnail_bitmap: thumbnail_details = (thumbnail_height,) self.thumbnail_cache.enqueue((topic, stamp, max_interval_thumbnail, thumbnail_details)) if not thumbnail_width: break else: thumbnail_width, _ = thumbnail_bitmap.size if width > 1: if available_width < thumbnail_width: thumbnail_width = available_width - 1 QtImage = ImageQt(thumbnail_bitmap) pixmap = QPixmap.fromImage(QtImage) painter.drawPixmap(thumbnail_x, thumbnail_y, thumbnail_width, thumbnail_height, pixmap) thumbnail_x += thumbnail_width if width == 1: break painter.setPen(QPen(Qt.black)) painter.setBrush(QBrush(Qt.transparent)) if width == 1: painter.drawRect(x, y, thumbnail_x - x, height - thumbnail_gap - 1) else: painter.drawRect(x, y, width, height - thumbnail_gap - 1) return True
def put_image_into_scene(self): if self._image: QtImage = ImageQt(self._image) pixmap = QPixmap.fromImage(QtImage) self._scene.clear() self._scene.addPixmap(pixmap)
def converter(rosimg): return QPixmap.fromImage( BagPlayer.cvimg2qimg(BagPlayer.rosimg2cvimg(rosimg)))
def tf_connect(self): self.client_tf = self.node.create_client(HandeyeTF, 'handeye_tf_service') self.tf_action.setIcon(QIcon(QPixmap.fromImage(QImage(self.path_red_icon))) \ if not self.client_tf.wait_for_service(timeout_sec=0.5) \ else QIcon(QPixmap.fromImage(QImage(self.path_green_icon))))
def parameter_connect(self): self.client_param = self.node.create_client( SetParameters, '/grasp_modbus_server/set_parameters') self.parameter_action.setIcon(QIcon(QPixmap.fromImage(QImage(self.path_red_icon))) \ if not self.client_param.wait_for_service(timeout_sec=0.5) \ else QIcon(QPixmap.fromImage(QImage(self.path_green_icon))))
def msg_to_pixmap(self, msg): cv_img = self.bridge.imgmsg_to_cv2(msg) h, w, _ = cv_img.shape bytesPerLine = 3 * w q_img = QImage(cv_img.data, w, h, bytesPerLine, QImage.Format_RGB888) return QPixmap.fromImage(q_img).scaled(320, 240)
def __init__(self, context): super(HandEyeCalibration, self).__init__(context) self.context = context self.node = context.node self.widget = QWidget() self.widget.setObjectName(self.PLUGIN_TITLE) self.widget.setWindowTitle(self.PLUGIN_TITLE) # Data self.Tsamples = [] # Toolbar _, path_pkg = get_resource('packages', 'handeye_dashboard') print("{}".format(path_pkg)) self.snapshot_action = QAction(QIcon.fromTheme('camera-photo'), 'Take a snapshot', self.widget) path = path_pkg + '/share/handeye_dashboard/images/capture.png' self.calibrate_action = QAction(QIcon(QPixmap.fromImage(QImage(path))), 'Get the camera/robot transform', self.widget) self.clear_action = QAction(QIcon.fromTheme('edit-clear'), 'Clear the record data.', self.widget) path = path_pkg + '/share/handeye_dashboard/images/UR5.png' self.execut_action = QAction(QIcon(QPixmap.fromImage(QImage(path))), 'EStart the publishing the TF.', self.widget) self.toolbar = QToolBar() self.toolbar.addAction(self.snapshot_action) self.toolbar.addAction(self.calibrate_action) self.toolbar.addAction(self.clear_action) self.toolbar.addAction(self.execut_action) # Toolbar0 self.l0 = QLabel(self.widget) self.l0.setText("Camera-Mount-Type: ") self.l0.setFixedWidth(150) self.l0.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) self.combobox = QComboBox(self.widget) self.combobox.addItem('attached on robot') self.combobox.addItem('fixed beside robot') self.toolbar0 = QToolBar() self.toolbar0.addWidget(self.l0) self.toolbar0.addWidget(self.combobox) # Toolbar1 self.l1 = QLabel(self.widget) self.l1.setText("Camera-Frame: ") self.l1.setFixedWidth(150) self.l1.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) self.camera_frame = QLineEdit(self.widget) self.camera_frame.setText("camera_link") self.toolbar1 = QToolBar() self.toolbar1.addWidget(self.l1) self.toolbar1.addWidget(self.camera_frame) # Toolbar2 self.l2 = QLabel(self.widget) self.l2.setText("Object-Frame: ") self.l2.setFixedWidth(150) self.l2.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) self.object_frame = QLineEdit(self.widget) self.object_frame.setText("calib_board") self.toolbar2 = QToolBar() self.toolbar2.addWidget(self.l2) self.toolbar2.addWidget(self.object_frame) # Toolbar3 self.l3 = QLabel(self.widget) self.l3.setText("Robot-Base-Frame: ") self.l3.setFixedWidth(150) self.l3.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) self.base_frame = QLineEdit(self.widget) self.base_frame.setText("base") self.toolbar3 = QToolBar() self.toolbar3.addWidget(self.l3) self.toolbar3.addWidget(self.base_frame) # Toolbar4 self.l4 = QLabel(self.widget) self.l4.setText("End-Effector-Frame: ") self.l4.setFixedWidth(150) self.l4.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) self.endeffector_frame = QLineEdit(self.widget) self.endeffector_frame.setText("tool0") self.toolbar4 = QToolBar() self.toolbar4.addWidget(self.l4) self.toolbar4.addWidget(self.endeffector_frame) # Toolbar5 self.l5 = QLabel(self.widget) self.l5.setText("Sample-Number: ") self.l5.setFixedWidth(150) self.l5.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) self.le5 = QLineEdit(self.widget) self.le5.setValidator(QIntValidator()) self.le5.setText('10') self.le5.setReadOnly(True) self.toolbar5 = QToolBar() self.toolbar5.addWidget(self.l5) self.toolbar5.addWidget(self.le5) # TreeView self.treeview = QTreeView() self.treeview.setAlternatingRowColors(True) self.model = QStandardItemModel(self.treeview) self.treeview.setModel(self.model) self.treeview.setHeaderHidden(True) # TextEdit self.textedit = QTextEdit(self.widget) self.textedit.setReadOnly(True) # Layout self.layout = QVBoxLayout() self.layout.addWidget(self.toolbar0) self.layout.addWidget(self.toolbar1) self.layout.addWidget(self.toolbar2) self.layout.addWidget(self.toolbar3) self.layout.addWidget(self.toolbar4) self.layout.addWidget(self.toolbar5) self.layout.addWidget(self.toolbar) self.layoutH = QHBoxLayout() self.layoutH.addWidget(self.treeview) self.layoutH.addWidget(self.textedit) self.layout.addLayout(self.layoutH) self.widget.setLayout(self.layout) # Add the widget to the user interface if context.serial_number() > 1: self.widget.setWindowTitle(self.widget.windowTitle() + (' (%d)' % context.serial_number())) context.add_widget(self.widget) # Make the connections self.snapshot_action.triggered.connect(self.take_snapshot) self.calibrate_action.triggered.connect(self.calibration) self.clear_action.triggered.connect(self.clear) self.execut_action.triggered.connect(self.execution) # Package path self.path_pkg = path_pkg # Set up TF self.cli = self.node.create_client(HandeyeTF, 'handeye_tf_service') while not self.cli.wait_for_service(timeout_sec=1.0): self.node.get_logger().info( 'service not available, waiting again...') self.req = HandeyeTF.Request()
def __init__(self, context): super(MyPlugin, self).__init__(context) Black = np.zeros((300, 400, 3), np.uint8) self.is_decay_auto = 0 self.is_frame_auto = 0 self.is_segsz_auto = 0 self.bridge = CvBridge() self.d_image_sub = rospy.Subscriber("/fome/direction", Image, self.fome_dir) self.m_image_sub = rospy.Subscriber("/fome/magnitude", Image, self.fome_mag) self.c_image_sub = rospy.Subscriber("/fome/cluster", Image, self.fome_cls) # self.r_image_sub = rospy.Subscriber("/dvs_rendering",Image,self.fome_raw) self.r_image_sub = rospy.Subscriber("/dvs/image_raw", Image, self.fome_raw) self.decay_pub = rospy.Publisher("/fome/decay_rate", String, queue_size=1) self.frame_pub = rospy.Publisher("/fome/frame_rate", String, queue_size=1) self.segsz_pub = rospy.Publisher("/fome/segment_size", String, queue_size=1) self.setObjectName('fome_gui') rp = rospkg.RosPack() self._widget = QWidget() ui_file = os.path.join(rp.get_path('fome_gui'), 'resource', 'MyPlugin.ui') loadUi(ui_file, self._widget) self._widget.setObjectName('fome_gui') height, width, byteValue = Black.shape byteValue = byteValue * width self.d_image = QImage(Black, width, height, byteValue, QImage.Format_RGB888) self.m_image = QImage(Black, width, height, byteValue, QImage.Format_RGB888) self.r_image = QImage(Black, width, height, byteValue, QImage.Format_RGB888) self.c_image = QImage(Black, width, height, byteValue, QImage.Format_RGB888) self.d_label = QLabel(self._widget.graphicsView_1) d_image = QPixmap.fromImage(self.d_image) self.d_label.setPixmap(d_image) self.m_label = QLabel(self._widget.graphicsView_2) m_image = QPixmap.fromImage(self.m_image) self.m_label.setPixmap(m_image) self.r_label = QLabel(self._widget.graphicsView_3) r_image = QPixmap.fromImage(self.r_image) self.r_label.setPixmap(r_image) self.c_label = QLabel(self._widget.graphicsView_4) c_image = QPixmap.fromImage(self.c_image) self.c_label.setPixmap(c_image) @QtCore.Slot(int) def decay_moved(r): if self.is_decay_auto == 0: self.decay_pub.publish(str(r)) self.decay_rate = SliderWithValue(QtCore.Qt.Horizontal, 1) self.decay_rate.slidervalue.connect(decay_moved) self.decay_rate.setMinimum(-20) self.decay_rate.setMaximum(30) self.decay_rate.setTickInterval(1) self.decay_rate.setSingleStep(1) self.decay_rate.setPageStep(1) self.decay_rate.setTickPosition(QSlider.TicksBelow) self.decay_rate.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Fixed) self.decay_rate.setValue(-10) self.decay_pub.publish(str(self.decay_rate.slidervalue)) self._widget.decay_rate.addWidget(self.decay_rate) @QtCore.Slot(int) def decay_auto(args): self.is_decay_auto = args if args == 2: self.decay_pub.publish("auto") self.decay_pub.unregister del self.decay_pub if args == 0: self.decay_pub = rospy.Publisher("/fome/decay_rate", String, queue_size=1) self._widget.decay_auto.stateChanged.connect(decay_auto) @QtCore.Slot(int) def frame_moved(r): if self.is_frame_auto == 0: self.frame_pub.publish(str(r)) self.frame_rate = SliderWithValue(QtCore.Qt.Horizontal, 1) self.frame_rate.slidervalue.connect(frame_moved) self.frame_rate.setMinimum(10) self.frame_rate.setMaximum(2000) self.frame_rate.setTickInterval(1) self.frame_rate.setSingleStep(1) self.frame_rate.setPageStep(1) self.frame_rate.setTickPosition(QSlider.TicksBelow) self.frame_rate.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Fixed) self.frame_rate.setValue(100) self._widget.frame_rate.addWidget(self.frame_rate) @QtCore.Slot(int) def frame_auto(args): self.is_frame_auto = args if args == 2: self.frame_pub.publish("auto") self.frame_pub.unregister del self.frame_pub if args == 0: self.frame_pub = rospy.Publisher("/fome/frame_rate", String, queue_size=1) self._widget.frame_auto.stateChanged.connect(frame_auto) @QtCore.Slot(int) def segsz_moved(r): if self.is_segsz_auto == 0: self.segsz_pub.publish(str(r)) self.segsz_rate = SliderWithValue(QtCore.Qt.Horizontal, 1) self.segsz_rate.slidervalue.connect(segsz_moved) self.segsz_rate.setMinimum(100) self.segsz_rate.setMaximum(10000) self.segsz_rate.setTickInterval(10) self.segsz_rate.setSingleStep(10) self.segsz_rate.setPageStep(10) self.segsz_rate.setTickPosition(QSlider.TicksBelow) self.segsz_rate.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Fixed) self.segsz_rate.setValue(4320) self._widget.segsz_rate.addWidget(self.segsz_rate) @QtCore.Slot(int) def segsz_auto(args): self.is_segsz_auto = args if args == 2: self.segsz_pub.publish("auto") self.segsz_pub.unregister del self.segsz_pub if args == 0: self.segsz_pub = rospy.Publisher("/fome/segsz_rate", String, queue_size=1) self._widget.segsz_auto.stateChanged.connect(segsz_auto) if context.serial_number() > 1: self._widget.setWindowTitle('Estimated direction from events') context.add_widget(self._widget)
def draw_timeline_segment(self, painter, topic, stamp_start, stamp_end, x, y, width, height): """ draws a stream of images for the topic :param painter: painter object, ''QPainter'' :param topic: topic to draw, ''str'' :param stamp_start: stamp to start drawing, ''rospy.Time'' :param stamp_end: stamp to end drawing, ''rospy.Time'' :param x: x to draw images at, ''int'' :param y: y to draw images at, ''int'' :param width: width in pixels of the timeline area, ''int'' :param height: height in pixels of the timeline area, ''int'' """ if x < self.timeline._history_left: width += x - self.timeline._history_left x = self.timeline._history_left max_interval_thumbnail = self.timeline.map_dx_to_dstamp(self.thumbnail_combine_px) max_interval_thumbnail = max(0.1, max_interval_thumbnail) thumbnail_gap = 6 thumbnail_x, thumbnail_y, thumbnail_height = x + 1, y + 1, height - 2 - thumbnail_gap # leave 1px border # set color to white draw rectangle over messages painter.setBrush(QBrush(Qt.white)) painter.drawRect(x, y, width, height - thumbnail_gap) thumbnail_width = None while True: available_width = (x + width) - thumbnail_x # Check for enough remaining to draw thumbnail if width > 1 and available_width < self.min_thumbnail_width: break # Try to display the thumbnail, if its right edge is to the right of the timeline's left side if not thumbnail_width or thumbnail_x + thumbnail_width >= self.timeline._history_left: stamp = self.timeline.map_x_to_stamp(thumbnail_x, clamp_to_visible=False) thumbnail_bitmap = self.thumbnail_cache.get_item(topic, stamp, max_interval_thumbnail) # Cache miss if not thumbnail_bitmap: thumbnail_details = (thumbnail_height,) self.thumbnail_cache.enqueue((topic, stamp, max_interval_thumbnail, thumbnail_details)) if not thumbnail_width: break else: thumbnail_width, _ = thumbnail_bitmap.size if width > 1: if available_width < thumbnail_width: thumbnail_width = available_width - 1 QtImage = ImageQt(thumbnail_bitmap) pixmap = QPixmap.fromImage(QtImage) painter.drawPixmap(thumbnail_x, thumbnail_y, thumbnail_width, thumbnail_height, pixmap) thumbnail_x += thumbnail_width if width == 1: break painter.setPen(QPen(QBrush(Qt.black))) painter.setBrush(QBrush(Qt.transparent)) if width == 1: painter.drawRect(x, y, thumbnail_x - x, height - thumbnail_gap - 1) else: painter.drawRect(x, y, width, height - thumbnail_gap - 1) return True