Exemple #1
0
def get_markers_in_image(cv_image, crop=False):

    markers = []

    if (crop):
        height, width, channel = cv_image.shape
        yMin = 0
        yMax = height
        xMin = 0
        xMax = width / 2
        cv_image = cv_image[yMin:yMax, xMin:xMax]

    aruco_dict = cv2.aruco.getPredefinedDictionary(aruco.DICT_4X4_250)
    parameters = aruco.DetectorParameters_create()

    corners, ids, rejected_points = aruco.detectMarkers(cv_image,
                                                        aruco_dict,
                                                        parameters=parameters)
    cv_image = aruco.drawDetectedMarkers(cv_image,
                                         corners,
                                         borderColor=(0, 255, 0))

    marker_length = 0.2  # meter

    try:
        rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(
            corners, marker_length, zed_parameters.cameraMatrix,
            zed_parameters.distCoeffs)
    except:
        rvecs, tvecs = aruco.estimatePoseSingleMarkers(
            corners, marker_length, zed_parameters.cameraMatrix,
            zed_parameters.distCoeffs)

    if rvecs != None:

        for i in range(0, len(rvecs)):

            rvec = rvecs[i]
            tvec = tvecs[i]

            marker = Marker(ids[i][0], 1.0, corners[i], rvec, tvec)
            markers.append(marker)

    return markers
Exemple #2
0
def get_boundary_angle_min_distance(cv_image,
                                    crop=False,
                                    max_distance_boundary=2.0):
    '''
    Returns the average angle of the boundary next to the vehicle
    within the distance, defined by max_distance_boundary (in meter)
    '''
    markers = []

    if (crop):
        height, width, channel = cv_image.shape
        yMin = 0
        yMax = height
        xMin = 0
        xMax = width / 2
        cv_image = cv_image[yMin:yMax, xMin:xMax]

    aruco_dict = cv2.aruco.getPredefinedDictionary(aruco.DICT_4X4_250)
    parameters = aruco.DetectorParameters_create()

    corners, ids, rejected_points = aruco.detectMarkers(cv_image,
                                                        aruco_dict,
                                                        parameters=parameters)
    cv_image = aruco.drawDetectedMarkers(cv_image,
                                         corners,
                                         borderColor=(0, 255, 0))

    marker_length = 0.2  # meter

    averageAngle = None
    min_distance = 99.0

    try:
        rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(
            corners, marker_length, zed_parameters.cameraMatrix,
            zed_parameters.distCoeffs)
    except:
        rvecs, tvecs = aruco.estimatePoseSingleMarkers(
            corners, marker_length, zed_parameters.cameraMatrix,
            zed_parameters.distCoeffs)

    if rvecs != None:

        sum_sinuses = 0.0
        sum_cosinuses = 0.0
        distCounter = 0

        for i in range(0, len(rvecs)):

            rvec = rvecs[i]
            tvec = tvecs[i]

            R, _ = cv2.Rodrigues(rvec)
            # cameraRotationVector,_ = cv2.Rodrigues(cv2.transpose(R))
            # Maybe needed later
            cameraTranslationVector = np.dot(cv2.transpose(-R),
                                             cv2.transpose(tvec))

            angle = np.arctan2(cameraTranslationVector[2],
                               cameraTranslationVector[0])

            top_left_corner = tuple(corners[i][0][0].astype(int))
            bottom_left_corner = tuple(corners[i][0][3].astype(int))

            # Corner 0 is the top left corner and corner 3 the bottom left. This can be used
            # to detect flipped markers and change the angle accordingly
            if top_left_corner[1] > bottom_left_corner[1]:
                angle = np.pi - angle

            # Some corrections to have the angle within reasonable values
            angle = angle - np.pi / 2.0

            # cv2.putText(cv_image,str(np.round(np.rad2deg(angle[0]),2)), (top_left_corner[0],top_left_corner[1]+30), cv2.FONT_HERSHEY_SIMPLEX, 1, (25,25,255),2)

            # Now get the distance to weight according to distance
            distance_marker = get_distance_of_line(0.2, top_left_corner,
                                                   bottom_left_corner,
                                                   zed_parameters.cameraMatrix,
                                                   zed_parameters.distCoeffs)

            # Angle averaging is difficult because of the change around 0 and 2*pi
            if distance_marker < max_distance_boundary:
                distCounter = distCounter + 1.0
                #
                distance_norm = (max_distance_boundary -
                                 distance_marker) / max_distance_boundary
                min_distance = min(min_distance, distance_marker)
                sum_sinuses = (sum_sinuses + np.sin(angle)) * distance_norm
                sum_cosinuses = (sum_cosinuses + np.cos(angle)) * distance_norm

            # confidence,corners_xy,angle_to_top_left
            marker = Marker(ids[i], 1.0, corners, angle, distance_marker)
            markers.append(marker)

        try:
            averageAngle = np.arctan(sum_sinuses / sum_cosinuses)[0]
        except ZeroDivisionError:
            pass

    return averageAngle, min_distance, markers
Exemple #3
0
def get_boundary_angles_distances(cv_image, crop=False):

    raise NotImplementedError(
        "Change in the Marker code made this method unusable for now")
    '''
    Returns the average angle of the boundary next to the vehicle
    within the distance, defined by max_distance_boundary (in meter)
    '''
    markers = []

    if (crop):
        height, width, channel = cv_image.shape
        yMin = 0
        yMax = height
        xMin = 0
        xMax = width / 2
        cv_image = cv_image[yMin:yMax, xMin:xMax]

    aruco_dict = cv2.aruco.getPredefinedDictionary(aruco.DICT_4X4_250)
    parameters = aruco.DetectorParameters_create()

    corners, ids, rejected_points = aruco.detectMarkers(cv_image,
                                                        aruco_dict,
                                                        parameters=parameters)
    cv_image = aruco.drawDetectedMarkers(cv_image,
                                         corners,
                                         borderColor=(0, 255, 0))

    marker_length = 0.2  # meter

    try:
        rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(
            corners, marker_length, zed_parameters.cameraMatrix,
            zed_parameters.distCoeffs)
    except:
        rvecs, tvecs = aruco.estimatePoseSingleMarkers(
            corners, marker_length, zed_parameters.cameraMatrix,
            zed_parameters.distCoeffs)

    if rvecs != None:

        for i in range(0, len(rvecs)):

            rvec = rvecs[i]
            tvec = tvecs[i]

            R, _ = cv2.Rodrigues(rvec)
            # cameraRotationVector,_ = cv2.Rodrigues(cv2.transpose(R))
            # Maybe needed later
            cameraTranslationVector = np.dot(cv2.transpose(-R),
                                             cv2.transpose(tvec))
            angle = np.arctan2(cameraTranslationVector[2],
                               cameraTranslationVector[0])

            top_left_corner = tuple(corners[i][0][0].astype(int))
            bottom_left_corner = tuple(corners[i][0][3].astype(int))

            # Corner 0 is the top left corner and corner 3 the bottom left. This can be used
            # to detect flipped markers and change the angle accordingly
            if top_left_corner[1] > bottom_left_corner[1]:
                angle = np.pi - angle

            # Some corrections to have the angle within reasonable values
            angle = angle - np.pi / 2.0

            # cv2.putText(cv_image,str(np.round(np.rad2deg(angle[0]),2)), (top_left_corner[0],top_left_corner[1]+30), cv2.FONT_HERSHEY_SIMPLEX, 1, (25,25,255),2)

            # Now get the distance
            distance_marker = get_distance_of_line(0.2, top_left_corner,
                                                   bottom_left_corner,
                                                   zed_parameters.cameraMatrix,
                                                   zed_parameters.distCoeffs)

            # add information as Marker
            marker = Marker(ids[i], 1.0, corners, angle[0], distance_marker)
            markers.append(marker)

    return markers
    def mark_next_image(self, cv_image, ar_params, crop=False):
                
        frame = cv_image
    
        if(crop):
            height, width, channel = cv_image.shape
            yMin = 0
            yMax = height
            xMin = 0
            xMax = width / 2
            frame = frame[yMin:yMax, xMin:xMax]  
        
        aruco_dict = self.board.get_dictionary()
        parameters = aruco.DetectorParameters_create()
    
        res = aruco.detectMarkers(frame, aruco_dict, parameters=parameters)
        
        corners = res[0]
        ids = res[1]
        gray = frame
        markers = []
        safety_distance = 0.2
        evasion_needed = False
        safe_steer = None
        safe_motor = None
        
        if len(corners) > 0:
            gray = aruco.drawDetectedMarkers(frame, corners)            
        
            # Quick fix to make my code compatible with the newer versions
            try:
                rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners, 0.20, self.zed_parameters.cameraMatrix, self.zed_parameters.distCoeffs)
            except:
                rvec, tvec = aruco.estimatePoseSingleMarkers(corners, 0.20, self.zed_parameters.cameraMatrix, self.zed_parameters.distCoeffs)
            
            
            
            critical_dist_angle_pairs = []
            
            for i in range(0, len(rvec)):
                
                # Get two dictionaries with xy positions about the corners of one marker and calculate also distance and angle to them
                center_line_xy, center_line_dist_ang = self.get_marker_from_image(gray, rvec[i][0], tvec[i][0], self.zed_parameters.cameraMatrix, self.zed_parameters.distCoeffs)
               
                
                # They are drawn onto the current image
                self.drawPointAtSingleMarker(gray, center_line_xy, center_line_dist_ang)
                
                if(center_line_dist_ang['distance'] < safety_distance):
                    critical_dist_angle_pairs.append(center_line_dist_ang)
                    evasion_needed = True
                
                # Finally they are filled in the marker data object

                marker = Marker(ids[i], confidence=1.0, center_line_xy=center_line_xy, center_line_dist_ang=center_line_dist_ang)
                markers.append(marker)
            
            if(evasion_needed and self.bagfile_handler == None):
                safe_motor, safe_steer = self.get_safe_commands(critical_dist_angle_pairs,ar_params)
                cv2.putText(gray, str(np.round(safe_motor, 2)) + "," + str(safe_steer), (10, 300), cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 0, 255), 4)
            
            # If an evasion is needed draw onto the image safe values
            if(evasion_needed and self.bagfile_handler != None):
                safe_motor, safe_steer = self.get_safe_commands(critical_dist_angle_pairs)
                self.bagfile_handler.evasion_data.append({'timestamp':self.bagfile_handler.timestamp, 'motor_command':safe_motor, 'steering_command':safe_steer})
                
                cv2.putText(gray, str(np.round(safe_motor, 2)) + "," + str(safe_steer), (10, 300), cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 0, 255), 4)
            
        return gray, markers, safe_motor, safe_steer, evasion_needed