def calculateAffinityMatrixAndDraw(self, bestImage, inliersDataBase, inliersWebCam, imgout):
        # The affinity matrix A
        A = cv2.estimateRigidTransform(inliersDataBase, inliersWebCam, fullAffine=True)
        A = np.vstack((A, [0, 0, 1]))

        # Calculate the points of the rectangle occupied by the recognized object
        a = np.array([0, 0, 1], np.float)
        b = np.array([bestImage.shape[1], 0, 1], np.float)
        c = np.array([bestImage.shape[1], bestImage.shape[0], 1], np.float)
        d = np.array([0, bestImage.shape[0], 1], np.float)
        centro = np.array([float(bestImage.shape[0]) / 2,
                           float(bestImage.shape[1]) / 2, 1], np.float)

        # Multiply the points of the virtual space, to convert them into Real image points
        a = np.dot(A, a)
        b = np.dot(A, b)
        c = np.dot(A, c)
        d = np.dot(A, d)
        centro = np.dot(A, centro)

        # The points are dehomogenized
        areal = (int(a[0] / a[2]), int(a[1] / b[2]))
        breal = (int(b[0] / b[2]), int(b[1] / b[2]))
        creal = (int(c[0] / c[2]), int(c[1] / c[2]))
        dreal = (int(d[0] / d[2]), int(d[1] / d[2]))
        centroreal = (int(centro[0] / centro[2]), int(centro[1] / centro[2]))

        # The polygon and the file name of the image are painted in the center of the polygon
        points = np.array([areal, breal, creal, dreal], np.int32)
        cv2.polylines(imgout, np.int32([points]), 1, (255, 255, 255), thickness=2)
        utilscv.draw_str(imgout, centroreal, bestImage.nameFile.upper())
    def p4_object_recog(self, frame):
        try:
            t1 = time.time()

            image_in = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            image_out = frame.copy()

            kp, desc = self.detector.detectAndCompute(image_in, None)

            selectedDataBase = self.dataBase[self.method_str]
            if len(selectedDataBase) > 0:
                # We perform the mutual matching
                imgsMatchingMutuos = self.find_matching_mutuos_optimum(self.method_str, desc, kp)

                minInliers = int(cv2.getTrackbarPos('inliers', 'Features'))
                projer = float(cv2.getTrackbarPos('projer', 'Features'))
                LOG.info("Line: %s :: minInliers: %s, projer: %s", print_frame(), minInliers, projer)

                # The best image is calculated based on the number of inliers.
                # The best image is one that has more number of inliers,
                # but always exceeding the minimum that is indicated in the trackbar 'minInliers'
                bestImage, inliersWebCam, inliersDataBase = self.calculate_best_image_by_num_inliers(self.method_str,
                                                                                                     projer, minInliers)

                if not bestImage is None:
                    # If we find a good image, we calculate the affinity matrix
                    # and paint the recognized object on the screen.
                    self.calculateAffinityMatrixAndDraw(bestImage, inliersDataBase, inliersWebCam, image_out)

            # Get descriptor dimension of each feature:
            if desc is not None:
                if len(desc) > 0:
                    dim = len(desc[0])
                else:
                    dim = -1
            # We draw features, and write informative text about the image
            # Only the features are dibuban if the slider indicates it
            if (int(cv2.getTrackbarPos('drawKP', 'Features')) > 0):
                cv2.drawKeypoints(image_out, kp, image_out,
                                  flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

            t1 = 1000 * (time.time() - t1)

            utilscv.draw_str(image_out, (20, 20),
                             "Method {0}, {1} features found, desc. dim. = {2} ".
                             format(self.method_str, len(kp), dim))

            utilscv.draw_str(image_out, (20, 40), "Time (ms): {0}".format(str(t1)))

            # Show results and check keys:
            # self.display_image('Features', imgage_out, resize_max=640, wait_time=1)
            return image_out
        except Exception as e:
            LOG.info("* ** p4_object_recog Error: %s\n" % str(e))
            traceback.print_exc()
Esempio n. 3
0
def calculateAffinityMatrixAndDraw(bestImage, inliersDataBase, inliersWebCam,
                                   imgout):
    #Se calcula la matriz de afinidad A
    A = cv2.estimateRigidTransform(inliersDataBase,
                                   inliersWebCam,
                                   fullAffine=True)
    A = np.vstack((A, [0, 0, 1]))

    #Se calculan los puntos del rectangulo que ocupa el objeto reconocido
    a = np.array([0, 0, 1], np.float)
    b = np.array([bestImage.shape[1], 0, 1], np.float)
    c = np.array([bestImage.shape[1], bestImage.shape[0], 1], np.float)
    d = np.array([0, bestImage.shape[0], 1], np.float)
    centro = np.array(
        [float(bestImage.shape[0]) / 2,
         float(bestImage.shape[1]) / 2, 1], np.float)

    #Se multiplican los puntos del espacio virtual, para convertirlos en
    #puntos reales de la imagen
    a = np.dot(A, a)
    b = np.dot(A, b)
    c = np.dot(A, c)
    d = np.dot(A, d)
    centro = np.dot(A, centro)

    #Se deshomogeneizan los puntos
    areal = (int(a[0] / a[2]), int(a[1] / b[2]))
    breal = (int(b[0] / b[2]), int(b[1] / b[2]))
    creal = (int(c[0] / c[2]), int(c[1] / c[2]))
    dreal = (int(d[0] / d[2]), int(d[1] / d[2]))
    centroreal = (int(centro[0] / centro[2]), int(centro[1] / centro[2]))

    #Se pinta el polígono y el nombre del fichero de la imagen en el centro del polígono
    points = np.array([areal, breal, creal, dreal], np.int32)
    cv2.polylines(imgout, np.int32([points]), 1, (255, 255, 255), thickness=2)
    utilscv.draw_str(imgout, centroreal, bestImage.nameFile.upper())
    #Se visualiza el objeto detectado en una ventana a parte
    cv2.imshow('ImageDetector', bestImage.imageBinary)
Esempio n. 4
0
    def callback(self,data):
        try: 
            frame = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)


        if (not self.paused) and (frame is not None):
            # self.curFrame = 0
            # convert color to gray
            imgin = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            # output image for display
            imgout = frame.copy()

            t1 = time.time()
            kp, desc = self.detector.detectAndCompute(imgin, None)
            selectedDataBase = self.dataBaseDictionary[self.methodstr]
            if len(selectedDataBase) > 0:
                # Mutual Matching
                imgsMatchingMutuos = orec.findMatchingMutuosOptimizado(selectedDataBase, desc, kp)
                minInliers = int(20)
                projer = float(5)
                # Calculating the best image according to the inliers numbers
                # The best image is the one with the greater inliers number,
                # Always taking into account that it should exceed the minimum indicated in
                # the trackbar as 'minInliers' 
                bestImage, inliersWebCam, inliersDataBase =  orec.calculateBestImageByNumInliers(selectedDataBase, projer, minInliers)
                rate = rospy.Rate(10)
                motion = Twist();
                if not bestImage is None:
                    #If we find a good image, we calculate the affinity matrix and mark it on the screen as a recognized object
                    self.depthObj, center = orec.calculateAffinityMatrixAndDraw(bestImage, inliersDataBase, inliersWebCam, imgout)


                    self.msg_recog.variance = 1 
                    self.msg_recog.temperature = self.depthObj
                    self.msg_recog.header.frame_id = bestImage.nameFile.upper().split('.')[0]
                    self.msg_recog.header.stamp = rospy.get_rostime();

                    xc = 160
                    dx = xc - center[0]

                    motion.angular.z = dx/500.0;

                    if self.depthObj > 0.4:
                        motion.linear.x = 0.3
                    if self.depthObj < 0.2:
                        motion.linear.x = -0.3
                    
                    self.motionPub.publish(motion);
                else:
                    motion.linear.x = 0
                    self.motionPub.publish(motion);
                    rate.sleep();
                    self.depthObj = 0
                    self.msg_recog.variance = 0
                    self.msg_recog.temperature = 0.00
                    self.msg_recog.header.frame_id = 'None'
                    self.msg_recog.header.stamp = rospy.get_rostime();


            t1 = 1000 * (time.time() - t1)  # Time in milliseconds
            # Obtain the descriptors dimension in each features
            if desc is not None:
                if len(desc) > 0:
                    dim = len(desc[0])
                else:
                    dim = -1
            
            imgout = cv2.resize(imgout, (0, 0), fx = 1.45, fy = 1.45)
            # Drawing features, writing the informative text on the image
            # And, only draw features if the slider indicates it
            if self.drawKP > 0:
                cv2.drawKeypoints(imgout, kp, imgout, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
            utilscv.draw_str(imgout, (20, 20),
                             "Method {0}, {1} features found ".
                             format(self.methodstr, len(kp)))
            utilscv.draw_str(imgout, (20, 40), "Time (ms): {0}".format(str(t1)))

            if self.depthObj == 0:
                utilscv.draw_str(imgout, (20, 60),
                                 "Depth to the object: None")
            else:
                utilscv.draw_str(imgout, (20, 60),
                             "Depth to the object: {0} m".
                             format(np.around(self.depthObj, decimals = 3)))

            # Show results and check the keyboards
            cv2.imshow('Pepper Object Recognition', imgout)
            ch = cv2.waitKey(5) & 0xFF
            if ch == ord(' '):  # Pause space bar
                self.paused = not self.paused
        self.recog_pub.publish(self.msg_recog)    
               orec.calculateAffinityMatrixAndDraw(bestImage, inliersDataBase, inliersWebCam, imgout)
               
        t1 = 1000 * (time.time() - t1)  # Tiempo en milisegundos
        # Obtener dimensión de descriptores de cada feature:
        if desc is not None:
            if len(desc) > 0:
                dim = len(desc[0])
            else:
                dim = -1
        # Dibujamos features, y escribimos texto informativo sobre la imagen
        # Solo se dibuban las features si el slider lo indica
        if (int(cv2.getTrackbarPos('drawKP', 'Features')) > 0):
            cv2.drawKeypoints(imgout, kp, imgout,
                              flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        utilscv.draw_str(imgout, (20, 20),
                         "Method {0}, {1} features found, desc. dim. = {2} ".
                         format(methodstr, len(kp), dim))
        utilscv.draw_str(imgout, (20, 40), "Time (ms): {0}".format(str(t1)))
        # Mostrar resultados y comprobar teclas:
        cv2.imshow('Features', imgout)
        ch = cv2.waitKey(5) & 0xFF
        if ch == 27:  # Escape termina
            break
        elif ch == ord(' '):  # Barra espaciadora pausa
            paused = not paused
        elif ch == ord('.'):  # Punto avanza un sólo frame
            paused = True
            frame = videoinput.read()

    # Cerrar ventana(s) y fuente(s) de vídeo:
    videoinput.close()
def calculateAffinityMatrixAndDraw(bestImage, inliersDataBase, inliersWebCam, imgout):
    #A Calculates the affinity matrix A
    A = cv2.estimateRigidTransform(inliersDataBase, inliersWebCam, fullAffine=True)
    A = np.vstack((A, [0, 0, 1]))
    
    #A Calculated points of the rectangle that occupies the recognized object
    a = np.array([0, 0, 1], np.float)
    b = np.array([bestImage.shape[1], 0, 1], np.float)
    c = np.array([bestImage.shape[1], bestImage.shape[0], 1], np.float)
    d = np.array([0, bestImage.shape[0], 1], np.float)
    centro = np.array([float(bestImage.shape[0])/2, 
       float(bestImage.shape[1])/2, 1], np.float)
       
    #A Multiply the points of the virtual space, to become
    #puntos real image
    a = np.dot(A, a)
    b = np.dot(A, b)
    c = np.dot(A, c)
    d = np.dot(A, d)
    centro = np.dot(A, centro)
    
    #A Deshomogeneizan points
    areal = (int(a[0]/a[2]), int(a[1]/b[2]))
    breal = (int(b[0]/b[2]), int(b[1]/b[2]))
    creal = (int(c[0]/c[2]), int(c[1]/c[2]))
    dreal = (int(d[0]/d[2]), int(d[1]/d[2]))
    centroreal = (int(centro[0]/centro[2]), int(centro[1]/centro[2]))
    

    #height = imgout.shape[0]
    #width = imgout.shape[1]

    #if areal[0]<0:
     #   areal = (0, areal[1])
    #elif areal[0]> width:
     #   areal = (width, areal[1])

    #if creal[0]<0:
     #   creal = (0, creal[1])
    #elif creal[0]> width:
     #   creal = (width, creal[1])

    #if areal[1]<0:
     #   areal = (areal[0], 0)
    #elif areal[1]> height:
     #   areal = (areal[0], height)

    #if creal[1]<0:
     #   creal = (creal[0], 0)
    #elif creal[1]> height:
     #   creal = (creal[0], height)


    #print(areal,breal,creal,dreal)


    #if areal[1] >= creal[1] and areal[0] >= creal[0]:
     #   imgshow = imgout[creal[1]:areal[1], creal[0]:areal[0]]
    #if areal[1] <= creal[1] and areal[0] > creal[0]:
     #   imgshow = imgout[areal[1]:creal[1], creal[0]:areal[0]]
    #if areal[1] < creal[1] and areal[0] <= creal[0]:
     #   imgshow = imgout[areal[1]:creal[1], areal[0]:creal[0]]
    #if areal[1] > creal[1] and areal[0] < creal[0]:
     #   imgshow = imgout[creal[1]:areal[1], areal[0]:creal[0]]
    
    
    #cv2.imshow('Test', imgshow)
    #cv2.imwrite("modelos/CRYSIS"+ str(areal[1])+".jpg",imgshow)

    #A Paints the polygon and the file name of the image in the center of the polygon
    points = np.array([areal, breal, creal, dreal], np.int32)
    cv2.polylines(imgout, np.int32([points]),1, (0,255,255), thickness=2)
    utilscv.draw_str(imgout, centroreal, bestImage.nameFile.upper())
    
    
    #A Displays the detected object in a window part
    cv2.imshow('ImageDetector', bestImage.imageBinary)
                # If we find a good image, the affinity matrix is calculated and the recognized object is painted on the screen.
                orec.calculateAffinityMatrixAndDraw(bestImage, inliersDataBase, inliersWebCam, imgout)

        t1 = 1000 * (time.time() - t1)  # Time in milliseconds
        # Get dimension of descriptors for each feature:
        if desc is not None:
            if len(desc) > 0:
                dim = len(desc[0])
            else:
                dim = -1
        # Features draw and write text about the image
        # Only features are drawn if the slider indicates
        if int(cv2.getTrackbarPos("drawKP", "Features")) > 0:
            cv2.drawKeypoints(imgout, kp, imgout, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        utilscv.draw_str(
            imgout, (20, 20), "Method {0}, {1} features found, desc. dim. = {2} ".format(methodstr, len(kp), dim)
        )
        utilscv.draw_str(imgout, (20, 40), "Time (ms): {0}".format(str(t1)))
        # Show results and check keys:
        cv2.imshow("Features", imgout)
        ch = cv2.waitKey(5) & 0xFF
        if ch == 27:  # escape ends
            break
        elif ch == ord(" "):  # Spacebar pause
            paused = not paused
        elif ch == ord("."):  # Point moves forward one frame
            paused = True
            ret, frame = cap.read()

    # Close windows and fonts video:
    cap.release()
Esempio n. 8
0
def calculateAffinityMatrixAndDraw(bestImage, inliersDataBase, inliersWebCam,
                                   imgout):
    #A Calculates the affinity matrix A
    A = cv2.estimateRigidTransform(inliersDataBase,
                                   inliersWebCam,
                                   fullAffine=True)
    A = np.vstack((A, [0, 0, 1]))

    #A Calculated points of the rectangle that occupies the recognized object
    a = np.array([0, 0, 1], np.float)
    b = np.array([bestImage.shape[1], 0, 1], np.float)
    c = np.array([bestImage.shape[1], bestImage.shape[0], 1], np.float)
    d = np.array([0, bestImage.shape[0], 1], np.float)
    centro = np.array(
        [float(bestImage.shape[0]) / 2,
         float(bestImage.shape[1]) / 2, 1], np.float)

    #A Multiply the points of the virtual space, to become
    #puntos real image
    a = np.dot(A, a)
    b = np.dot(A, b)
    c = np.dot(A, c)
    d = np.dot(A, d)
    centro = np.dot(A, centro)

    #A Deshomogeneizan points
    areal = (int(a[0] / a[2]), int(a[1] / b[2]))
    breal = (int(b[0] / b[2]), int(b[1] / b[2]))
    creal = (int(c[0] / c[2]), int(c[1] / c[2]))
    dreal = (int(d[0] / d[2]), int(d[1] / d[2]))
    centroreal = (int(centro[0] / centro[2]), int(centro[1] / centro[2]))

    #height = imgout.shape[0]
    #width = imgout.shape[1]

    #if areal[0]<0:
    #   areal = (0, areal[1])
    #elif areal[0]> width:
    #   areal = (width, areal[1])

    #if creal[0]<0:
    #   creal = (0, creal[1])
    #elif creal[0]> width:
    #   creal = (width, creal[1])

    #if areal[1]<0:
    #   areal = (areal[0], 0)
    #elif areal[1]> height:
    #   areal = (areal[0], height)

    #if creal[1]<0:
    #   creal = (creal[0], 0)
    #elif creal[1]> height:
    #   creal = (creal[0], height)

    #print(areal,breal,creal,dreal)

    #if areal[1] >= creal[1] and areal[0] >= creal[0]:
    #   imgshow = imgout[creal[1]:areal[1], creal[0]:areal[0]]
    #if areal[1] <= creal[1] and areal[0] > creal[0]:
    #   imgshow = imgout[areal[1]:creal[1], creal[0]:areal[0]]
    #if areal[1] < creal[1] and areal[0] <= creal[0]:
    #   imgshow = imgout[areal[1]:creal[1], areal[0]:creal[0]]
    #if areal[1] > creal[1] and areal[0] < creal[0]:
    #   imgshow = imgout[creal[1]:areal[1], areal[0]:creal[0]]

    #cv2.imshow('Test', imgshow)
    #cv2.imwrite("modelos/CRYSIS"+ str(areal[1])+".jpg",imgshow)

    #A Paints the polygon and the file name of the image in the center of the polygon
    points = np.array([areal, breal, creal, dreal], np.int32)
    cv2.polylines(imgout, np.int32([points]), 1, (0, 255, 255), thickness=2)
    utilscv.draw_str(imgout, centroreal, bestImage.nameFile.upper())

    #A Displays the detected object in a window part
    cv2.imshow('ImageDetector', bestImage.imageBinary)