示例#1
0
文件: Model.py 项目: ArcticSaru/SMAUG
    def read_data(self):
        data_files = os.listdir(self.datadir)
        data_files.remove('data.json')

        dataloc = op.join(self.datadir, 'data.json')

        for det in data_files:
            detpath = op.join(self.datadir, det)
            detdf = pd.read_json(detpath)
            self.detectors[detdf.at['ID', 'value']] = detdf

        self.data = pd.read_json(dataloc)
        self.data['Detector'] = self.data['ID'].str.split('_').apply(
            lambda row: row[0])

        # Organise data in cones
        row = self.data.loc[self.data['ID'] == self.coneID].squeeze()

        thiscone = Cone(self, row['ID'], row)
        self.cones[row['ID']] = thiscone
def find_velocity(agent, RVOi, apexes, v_pref, max_vel):
    #Assumes all agents and obstacles are convex
    cones = []
    for apex, points in zip(apexes, RVOi):
        if len(points) > 0:
            min_angle = math.pi
            max_angle = -math.pi
            min_point = points[0]
            max_point = points[0]
            angles = []
            u = [(points[0][0] - apex[0]), (points[0][1] - apex[1])]
            for point in points:
                moved_point = [(point[0] - apex[0]), (point[1] - apex[1])]
                angle_dif = angle(u, moved_point)
                angles.append(angle_dif)
                if angle_dif < min_angle:
                    min_angle = angle_dif
                    min_point = point
                if angle_dif > max_angle:
                    max_angle = angle_dif
                    max_point = point

            cones.append(Cone.Cone(apex, min_point, max_point))

    within_VO = False
    for cone in cones:
        if cone.contains(v_pref):
            within_VO = True
    if not within_VO:
        return v_pref, cones, []

    else:
        potential_v_left = []
        potential_v_right = []
        all_v = []
        for i in range(20, 1, -1):
            for j in range(0, 121):
                new_v = rotate(v_pref, j / 120 * math.pi)
                magnitude = math.sqrt(new_v[0]**2 + new_v[1]**2)
                new_v_normalised = (new_v[0] / magnitude, new_v[1] / magnitude)
                new_v = [
                    new_v_normalised[0] * i * max_vel / 20,
                    new_v_normalised[1] * i * max_vel / 20
                ]
                new_v_loc = [agent.p[0] + new_v[0], agent.p[1] + new_v[1]]
                within_VO = False
                for cone in cones:
                    if cone.contains(new_v):
                        within_VO = True
                if not within_VO:
                    potential_v_left.append(new_v)
                all_v.append(new_v)

                new_v = rotate(v_pref, -j / 120 * math.pi)
                magnitude = math.sqrt(new_v[0]**2 + new_v[1]**2)
                new_v_normalised = (new_v[0] / magnitude, new_v[1] / magnitude)
                new_v = [
                    new_v_normalised[0] * i * max_vel / 20,
                    new_v_normalised[1] * i * max_vel / 20
                ]
                new_v_loc = [agent.p[0] + new_v[0], agent.p[1] + new_v[1]]
                within_VO = False
                for cone in cones:
                    if cone.contains(new_v):
                        within_VO = True
                if not within_VO:
                    potential_v_right.append(new_v)
                all_v.append(new_v)

                potential_v = potential_v_left + potential_v_right

    if len(potential_v) > 0:
        new_v = potential_v[0]
        new_v_dist = dist(new_v, v_pref)
        for v in potential_v:
            v_dist = dist(v, v_pref)
            v_angle = angle(agent.v, v)
            if new_v_dist > v_dist and abs(v_angle) < 0.5 * math.pi:
                new_v_dist = v_dist
                new_v = v

        #new_v = potential_v[0]

        return new_v, cones, all_v
    else:
        return (0, 0), cones, all_v
示例#3
0
def process(img, depth_img, K_RGB):
    def distance_from_rgb(x, y, w, h, standing):
        # code from LAR laboratory
        if standing:
            u1_homogeneous = np.array([x, (y + h) / 2, 1])
            u2_homogeneous = np.array([x + w, (y + h) / 2, 1])
        else:
            u1_homogeneous = np.array([(x + w) / 2, y, 1])
            u2_homogeneous = np.array([(x + w) / 2, (y + h), 1])
        x1 = np.matmul(np.linalg.inv(K_RGB), u1_homogeneous)
        x2 = np.matmul(np.linalg.inv(K_RGB), u2_homogeneous)
        cos_alpha = np.dot(x1, x2) / (np.linalg.norm(x1) * np.linalg.norm(x2))
        alpha = np.arccos(cos_alpha)
        return 0.025 / np.sin(alpha / 2)

    def camera_coord(x, y, z):
        u_mid_homogeneous = np.array([x, y, 1])
        return np.matmul(np.linalg.inv(K_RGB), u_mid_homogeneous * z)

    def convert_to_robot_coord(coordinates):
        T = np.array([[0.0, 0.0, 1.0, -0.087], #generated by quaternion_to_rot_matrix.py
                       [-1.0, 0.0, 0.0, 0.013],
                       [0.0, -1.0, 0.0, 0.287],
                       [0, 0, 0, 1]])
        coord = np.append(coordinates, [1])
        new_coord = np.matmul(T, coord)
        return Coordinates.Coordinates(new_coord[0], new_coord[1], new_coord[2])

    HSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    color_min = [(0, 35, 25), (49, 30, 25), (107, 30, 25)] # for R, G, B in format (h_min, s_min, v_min)
    color_max = [(15, 255, 255), (77, 255, 255), (135, 255, 255)] # for R, G, B in format (h_max, s_max, v_max)
    cones = []

    for i in range(3):
        color = "Red"
        if i == 1: color = "Green"
        elif i == 2: color = "Blue"
        frame_threshold = cv2.inRange(HSV, color_min[i], color_max[i])
        contours, hierarchy = cv2.findContours(frame_threshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
        for cnt in contours:
            area = cv2.contourArea(cnt)
            if area < 500:
                continue
            x, y, w, h = cv2.boundingRect(cnt)
            approx = cv2.approxPolyDP(cnt, 0.02 * cv2.arcLength(cnt, True), True)
            if len(approx) > 4: # not a square
                continue
            if float(h) / w > 8: #standing Cone
                standing = True
            elif float(w) / h > 8: # Cone on the floor
                standing = False
            else: #invalid ratio
                continue
            mid = (int(x + w / 2), int(y + h / 2))
            z = depth_img[mid[1], mid[0]]
            if z is None:
                z = distance_from_rgb(x, y, w, h, standing)
            else:
                z += 0.025 # convert to distance from the middle of the bannsiter
            pos = camera_coord(mid[0], mid[1], z) # position in camera coordinates
            pos = convert_to_robot_coord(pos)
            new = Cone.Cone(color, pos, standing)
            cones.append(new)
    return cones