Example #1
0
 def mouseMoveEvent(self, event):
     """
     Mouse move event handler
     @param event:
     @type event:
     """
     modifiers = QApplication.keyboardModifiers()
     # mouse coordinates, relative to parent widget
     pos = self.mapToParent(event.pos())
     img = self.tool.layer.parentImage
     r = self.tool.resizingCoeff
     self.tool.targetQuad_old = self.tool.getTargetQuad(
     )  # TODO added 15/05/18 validate
     self.posRelImg = (pos - QPoint(img.xOffset, img.yOffset)) / r
     if modifiers == Qt.ControlModifier | Qt.AltModifier:
         if self.tool.isModified():
             dlgWarn("A transformation is in progress", "Reset first")
             return
         # update the new starting  position
         self.posRelImg_ori = self.posRelImg  # (pos - QPoint(img.xOffset, img.yOffset)) / r
         self.posRelImg_frozen = self.posRelImg
         self.tool.moveRotatingTool()
         self.tool.parent().repaint()
         return
     curimg = self.tool.layer.getCurrentImage()
     w, h = curimg.width(), curimg.height()
     s = w / self.tool.img.width()
     form = self.tool.getForm()
     if form.options['Free']:
         pass
     elif form.options['Rotation']:
         center = self.tool.getTargetQuad().boundingRect().center()
         v = QPointF(self.posRelImg.x() - center.x(),
                     self.posRelImg.y() - center.y())
         v0 = QPointF(self.posRelImg_frozen.x() - center.x(),
                      self.posRelImg_frozen.y() - center.y())
         theta = (np.arctan2(v.y(), v.x()) -
                  np.arctan2(v0.y(), v0.x())) * 180.0 / np.pi
         T = QTransform()  # self.tool.geoTrans_ori)
         T.translate(center.x(), center.y()).rotate(theta).translate(
             -center.x(), -center.y())
         q = T.map(self.tool.getFrozenQuad())
         for i, role in enumerate(
             ['topLeft', 'topRight', 'bottomRight', 'bottomLeft']):
             self.tool.btnDict[role].posRelImg = q.at(i)
     elif form.options['Translation']:
         # translation vector (coordinates are relative to the full size image)
         p = QPointF(self.posRelImg) - QPointF(self.posRelImg_frozen)
         T = QTransform()
         T.translate(p.x(), p.y())
         q = T.map(self.tool.getFrozenQuad())
         for i, role in enumerate(
             ['topLeft', 'topRight', 'bottomRight', 'bottomLeft']):
             self.tool.btnDict[role].posRelImg = q.at(i)
     self.tool.moveRotatingTool()
     self.tool.modified = True
     self.tool.layer.applyToStack()
     self.parent().repaint()
Example #2
0
 def testMap(self):
     transform = QTransform()
     values = (10.0, 20.0)
     tx, ty = transform.map(*values)
     self.assertTrue(isinstance(tx, float))
     self.assertTrue(isinstance(ty, float))
     self.assertEqual((tx, ty), values)
Example #3
0
    def keyPressEvent(self, event):
        """Handles deleting and rotating the selected
        item when dedicated keys are pressed.

        Args:
            event (QKeyEvent): Key event
        """
        if event.key() == Qt.Key_Delete and self.isSelected():
            self._project_item._project.remove_item(self.name())
            event.accept()
        elif event.key() == Qt.Key_R and self.isSelected():
            # TODO:
            # 1. Change name item text direction when rotating
            # 2. Save rotation into project file
            rect = self.mapToScene(self.boundingRect()).boundingRect()
            center = rect.center()
            t = QTransform()
            t.translate(center.x(), center.y())
            t.rotate(90)
            t.translate(-center.x(), -center.y())
            self.setPos(t.map(self.pos()))
            self.setRotation(self.rotation() + 90)
            links = set(lnk for conn in self.connectors.values()
                        for lnk in conn.links)
            for link in links:
                link.update_geometry()
            event.accept()
        else:
            super().keyPressEvent(event)
Example #4
0
    def getObjectInteraction(self, persons, objects, interaction, d):

        # print("getObjectInteration")
        plt.close('all')

        polylines_object = []
        polylines_interacting = []

        for o in objects:
            obj = Object(o.x / 1000., o.z / 1000., o.angle, o.space)
            # print("OBJETO")
            ##para dibujarlo
            if d:
                plt.figure('ObjectSpace')
                rect = plt.Rectangle((obj.x - 0.25, obj.y - 0.25),
                                     0.5,
                                     0.5,
                                     fill=False)

                plt.gca().add_patch(rect)
                x_aux = obj.x + 0.25 * cos(pi / 2 - obj.th)
                y_aux = obj.y + 0.25 * sin(pi / 2 - obj.th)
                heading = plt.Line2D((obj.x, x_aux), (obj.y, y_aux),
                                     lw=1,
                                     color='k')
                plt.gca().add_line(heading)

            w = 1.0
            # print (obj.x,obj.y)
            ##para calcular el rectangulo
            s = QRectF(QPointF(0, 0), QSizeF(w, obj.sp))

            # if (d):
            #     plt.plot (s.bottomLeft().x(),s.bottomLeft().y(),"go")
            #     plt.plot(s.bottomRight().x(), s.bottomRight().y(), "ro")
            #     plt.plot(s.topRight().x(), s.topRight().y(), "yo")
            #     plt.plot(s.topLeft().x(), s.topLeft().y(), "bo")

            space = QPolygonF()
            space.append(s.topLeft())
            space.append(s.topRight())
            space.append(
                QPointF(s.bottomRight().x() + obj.sp / 4,
                        s.bottomRight().y()))
            space.append(
                QPointF(s.bottomLeft().x() - obj.sp / 4,
                        s.bottomLeft().y()))

            t = QTransform()
            t.translate(-w / 2, 0)
            space = t.map(space)
            t = QTransform()
            t.rotateRadians(-obj.th)
            space = t.map(space)

            t = QTransform()
            t.translate(obj.x, obj.y)
            space = t.map(space)

            # points = []
            # for x in xrange(space.count()-1):
            #     point = space.value(x)
            #     print ("valor", point)
            #     points.append([point.x(),point.y()])
            #     plt.plot(point.x(),point.y(),"go")

            polyline = []

            for x in range(space.count()):
                point = space.value(x)
                if (d):
                    plt.plot(point.x(), point.y(), "go")

                p = SNGPoint2D()
                p.x = point.x()
                p.z = point.y()
                polyline.append([p.x, p.z])

            polylines_object.append(polyline)

            for p in persons:
                pn = Person(p.x, p.z, p.angle)
                # print("PERSONA", persons.index(p)+1)
                if d:
                    body = plt.Circle((pn.x, pn.y), radius=0.3, fill=False)
                    plt.gca().add_patch(body)

                    x_aux = pn.x + 0.30 * cos(pi / 2 - pn.th)
                    y_aux = pn.y + 0.30 * sin(pi / 2 - pn.th)
                    heading = plt.Line2D((pn.x, x_aux), (pn.y, y_aux),
                                         lw=1,
                                         color='k')
                    plt.gca().add_line(heading)
                    plt.axis('equal')

                ##CHECKING THE ORIENTATION
                print("obj.angle", obj.th, "person.angle", pn.th)
                a = abs(obj.th - abs(pn.th - math.pi))
                if a < math.radians(45):
                    checkangle = True
                else:
                    checkangle = False

                ##CHECKING IF THE PERSON IS INSIDE THE POLYGON
                if space.containsPoint(QPointF(pn.x, pn.y),
                                       Qt.OddEvenFill):  # and checkangle:
                    print("DENTROOOOO Y MIRANDO")
                    if not polyline in polylines_interacting:
                        polylines_interacting.append(polyline)

        if d:
            for ps in polylines_interacting:
                #  plt.figure()
                for p in ps:
                    plt.plot(p.x, p.z, "ro")
                    plt.axis('equal')
                    plt.xlabel('X')
                    plt.ylabel('Y')
            plt.show()
        plt.show()

        if (interaction):
            return polylines_interacting
        else:
            return polylines_object
Example #5
0
class OutlinePaintEngine(QPaintEngine):
    """
    Used internally by OutlinePaintDevice. Accumulates stroke-drawing commands
    and records the pixel-coordinates of these line segments and colours used.
    Fetch the accumulated lines using getOutlines().
    """
    def __init__(self, paint_device):
        # NB: AllFeatures passed since doing otherwise results in unsupported
        # features being turned into rasters (which is not a useful fallback
        # here).
        super().__init__(QPaintEngine.PaintEngineFeature.AllFeatures)

        self._transform = QTransform()
        self._pen = QPen()

        # [((r, g, b, a) or None, width, [(x, y), ...]), ...]
        #
        # Colours are None or tuples of 0.0 to 1.0 floats. Line widths are
        # given in pixels. Line coordinates are given in pixels.
        self._outlines = []

    def getOutlines(self):
        """
        See OutlinePaintDevice.getOutlines(), except the line widths and
        coordinates are given in pixels.
        """
        return self._outlines

    def begin(self, paint_device):
        return True

    def end(self):
        return True

    def updateState(self, new_state):
        dirty_flags = new_state.state()
        if dirty_flags & QPaintEngine.DirtyTransform:
            self._transform = new_state.transform()
        if dirty_flags & QPaintEngine.DirtyPen:
            self._pen = new_state.pen()
        if (dirty_flags & QPaintEngine.DirtyClipEnabled
                or dirty_flags & QPaintEngine.DirtyClipRegion
                or dirty_flags & QPaintEngine.DirtyClipPath):
            # Clipping seems to be done by the QtSVG library's own renderer so
            # some time can be saved here!
            if new_state.clipOperation() != Qt.ClipOperation.NoClip:
                raise NotImplementedError(
                    "Clipping mode {} not supported".format(
                        new_state.clipOperation()))
        if dirty_flags & QPaintEngine.DirtyCompositionMode:
            # Other modes not expected (not available in SVG)
            if new_state.compositionMode() != \
                    QPainter.CompositionMode.SourceOver:
                raise NotImplementedError(
                    "CompositionMode {} not supported".format(
                        new_state.compositionMode()))

    def drawImage(self, r, pm, sr, flags):
        # Draw image outline...
        self.drawRects(r, 1)

    def drawPixmap(self, r, pm, sr):
        # Draw pixmap outline...
        self.drawRects(r, 1)

    def drawPolygon(self, points, count, mode):
        # Just draw the polygon using drawPath...
        # NB: A bug prevents a useful implementation of this function being
        # written. Fortunately the QtSVG renderer only ever uses QPainterPath
        # objects for drawing.
        raise NotImplementedError("Qt for Python bug PYSIDE-891 "
                                  "prevents drawPolygon being implemented")

        # Implementation should look something like:
        #
        #     from PySide2.QtGui import QPainterPath
        #     path = QPainterPath()
        #     for i, point in enumerate(points):
        #         if i == 0:
        #             path.moveTo(point)
        #         else:
        #             path.lineTo(point)
        #     self.drawPath(path)

    def drawPath(self, path):
        # Nothing to do if not drawing the outline
        if (self._pen.style() == Qt.PenStyle.NoPen
                or self._pen.brush().style() == Qt.BrushStyle.NoBrush):
            return

        # Determine colour
        if self._pen.brush().style() == Qt.BrushStyle.SolidPattern:
            rgba = self._pen.brush().color().getRgbF()
        else:
            rgba = None

        # Determine dash style
        pen_width = self._pen.widthF() or 1.0
        dash_pattern = [v * pen_width for v in self._pen.dashPattern()]
        dash_offset = self._pen.dashOffset() * pen_width

        # When applying the dash style, perform this on a version of the line
        # prior to the current transform (to achieve correct dash spacing)
        transform = self._transform
        inverse_transform, invertable = self._transform.inverted()
        if not invertable:
            transform = inverse_transform = QTransform()
            if dash_pattern:
                warnings.warn(
                    "Dashed lines transformed by non-singular matrices are "
                    "not supported and the dash pattern will be incorrectly "
                    "scaled.")

        # Approximate the scaling factor applied by the current transform as
        # being the scale applied to a diagonal line. This won't work if the
        # line happens to be an eigen vector but for non-uniform scalings, the
        # concept of a scaled line widthis not especially well defined anyway
        # anyway.
        #
        # (test_line has length 1)
        test_line = QLineF(0, 0, 2**0.5 / 2.0, 2**0.5 / 2.0)
        scaled_pen_width = pen_width * self._transform.map(test_line).length()

        # Don't scale the points for dashing when in cosmetic mode
        if self._pen.isCosmetic():
            transform = inverse_transform = QTransform()
            scaled_pen_width = pen_width

        # Convert to simple straight line segments. The conversion of Text,
        # Bezier curves, arcs, ellipses etc. into to chains of simple straight
        # line is implemented by QPainterPath.toSubpathPolygons. Note that the
        # transform being supplied here is important to ensure bezier-to-line
        # segmentation occurs at the correct resolution.
        for poly in path.toSubpathPolygons(self._transform):
            # Apply dash style. The coordinates must be scaled back to their
            # native size for this process since the spacing for dashes is
            # based on  the line width and aspect ratio used.
            line = [p.toTuple() for p in inverse_transform.map(poly)]
            sub_lines = dash_line(line, dash_pattern, dash_offset)

            # Transform the coordinates back to pixels once more and add colour
            # information.
            self._outlines.extend(
                (rgba, scaled_pen_width, [transform.map(*p) for p in line])
                for line in sub_lines)
	def draw_rectangle_on_roi(self, object_found):

		data = object_found.data
		focal_length_mm = 3.04
		sensor_width_mm = 3.68
		#image_width_px = 1920
		image_width_px = 640
		focal_length_px = (focal_length_mm / sensor_width_mm) * image_width_px

		#camera_angle_conv = 0.0334
		camera_angle_conv = 0.0971
		camera_min_angle = -31.1
		camera_max_angle = 31.1

		object_size = 1
		diagonal_cube = sqrt(2)
		cv_image = self.bridge.imgmsg_to_cv2(self.actual_image, "bgr8")

		if(len(object_found.data) != 0):
			for i in range(0, len(data), 12):

				# get data
				id = data[i]
				object_width = data[i+1]
				object_height = data[i+2]

				# Find the corner pose
				qtHomography = QTransform(data[i+3], data[i+4], data[i+5], data[i+6], data[i+7], data[i+8], data[i+9], data[i+10], data[i+11])
				qtTopLeft = qtHomography.map(QPointF(0,0))
				qtTopRight = qtHomography.map(QPointF(object_width,0))
				qtBottomLeft = qtHomography.map(QPointF(0,object_height))
				qtBottomRight = qtHomography.map(QPointF(object_width,object_height))
				#print("qtTopLeft is at: " + str(qtBottomRight.x()) + " and "+ str(qtBottomRight.y()))
				height = abs(qtBottomRight.y() - qtTopRight.y())
				ximage = qtBottomRight.x() - (qtBottomRight.x() - qtBottomLeft.x()) / 2
				yimage = qtBottomRight.y() - (qtBottomRight.y() - qtTopRight.y()) / 2
				distance = (focal_length_px * object_size) / height
				self.object_distance = distance
				angle_deg = int(camera_min_angle + camera_angle_conv * ximage)
				self.object_angle = angle_deg
				# Draw the rectangle around the object
				font = cv2.FONT_HERSHEY_SIMPLEX
				cv2.rectangle(cv_image, (int(qtTopLeft.x()), int(qtTopLeft.y())), (int(qtBottomRight.x()), int(qtBottomRight.y())), (0, 255, 0), 2)
				cv2.putText(cv_image, "("+str(round(distance, 2)) + "m," + str(round(angle_deg, 2)) + "deg" + ")", (int(qtTopLeft.x()), int(qtTopLeft.y())), font, 1, (255, 0, 255), 2)
				#cv2.putText(cv_image, str(round(distance, 2)) + "m", (int(qtTopLeft.x()), int(qtTopLeft.y())), font, 2, (255, 0, 255), 2)
				image_message = self.bridge.cv2_to_imgmsg(cv_image, "bgr8")
				print(distance)
				
				if (object_found.data[i] == self.image_person):
					#Add a color detector with openCV
					state = self.detect_people_state()
					if(state == self.alive):
						print('Alive person found')
						self.createMarker(Shape.SPHERE, 0.0, 1.0, 0.0)
						self.createMeanMarker(Shape.SPHERE, 0.0, 1.0, 0.0, object_found.data[i])
					else:
						print('Injured person found')
						self.createMarker(Shape.SPHERE, 1.0, 0.0, 0.0)
						self.createMeanMarker(Shape.CUBE, 1.0, 0.0, 0.0, object_found.data[i])
					self.person_state = state
					

				if (object_found.data[i] == self.image_toxic):
					#Add a color detector with openCV
					print('Toxic area found')
					self.createMarker(Shape.SPHERE, 0.0, 0.0, 0.0)
					self.createMeanMarker(Shape.SPHERE, 0.0, 0.0, 0.0, object_found.data[i])

				if (object_found.data[i] == self.image_warning):
					#Add a color detector with openCV
					print('Warning are found')
					self.createMarker(Shape.CUBE, 1.0, 0.0, 1.0)
					self.createMeanMarker(Shape.CUBE, 1.0, 0.0, 0.0, object_found.data[i])
					
				if (object_found.data[i] == self.image_fire):
					#Add a color detector with openCV
					print('Fire found')
					self.createMarker(Shape.CYLINDER, 1.0, 0.5, 0.0)
					self.createMeanMarker(Shape.CYLINDER, 1.0, 0.5, 0.0, object_found.data[i])

				if (object_found.data[i] == self.image_no_smoke):
					#Add a color detector with openCV
					print('Radioactive area found')
					self.createMarker(Shape.CUBE, 0.5, 0.5, 0.5)
					self.createMeanMarker(Shape.CUBE, 0.5, 0.5, 0.5, object_found.data[i])
				

				if (object_found.data[i] == self.image_radioactive):
					#Add a color detector with openCV
					print('Radioactive area found')
					self.createMarker(Shape.CUBE, 1.0, 1.0, 0.0)
					self.createMeanMarker(Shape.CUBE, 1.0, 1.0, 0.0, object_found.data[i])

				if (object_found.data[i] == self.image_dead):
					#Add a color detector with openCV
					print('Radioactive area found')
					self.createMarker(Shape.SPHERE, 0.0, 0.0, 0.0)
					self.createMeanMarker(Shape.SPHERE, 0.0, 0.0, 0.0, object_found.data[i])
					
				else:
					print("No object found")
			self.img.publish(image_message)
			
		else:
			print("Nothing detected")