Beispiel #1
0
 def __init__(self,
              drone,
              vfov=45,
              hfov=80,
              expectedDistance=8.0,
              northToSouth=8.0,
              eastToWest=8.0):
     self.drone = drone
     self.vertical_fov = vfov
     self.horizontal_fov = hfov
     self.expectedDistance = expectedDistance
     self.northToSouth = northToSouth
     self.eastToWest = eastToWest
     self.maxDistance = max([northToSouth, eastToWest])
     self.detector = Detector(drone, vfov, hfov)
     self.frameTime = 0
     self.width = 0
     self.height = 0
     self.pi = np.radians(180)
     # Ground robot detection parameters
     self.groundRobotVisible = False
     self.groundRobotOrientation = None
     self.groundRobotDistance = None
     self.groundRobotAngle = None
     self.groundFramePosition = None
     self.groundFrameDistance = None
     # East gate
     self.eastGateVisible = False
     self.eastGateOrientation = None
     self.eastGateDistance = None
     self.eastGateAngle = None
     self.eastFramePosition = None
     self.eastFrameDistance = None
     # North gate
     self.northGateVisible = False
     self.northGateOrientation = None
     self.northGateDistance = None
     self.northGateAngle = None
     self.northFramePosition = None
     self.northFrameDistance = None
     # Landing pad
     self.landingPadVisible = False
     self.landingPadOrientation = None
     self.landingPadDistance = None
     self.landingPadAngle = None
     self.landingFramePosition = None
     self.landingFrameDistance = None
     # Bear
     self.bearVisible = False
     self.bearDistance = None
     self.bearAngle = None
     self.bearFramePosition = None
     self.bearFrameDistance = None
     # New frame flag
     self.newFrameProcessed = False
     self.processLine = True
     self.processMarker = False
     self.processBear = False
Beispiel #2
0
def load_config():
    with open('config.json') as json_file:
        data = json.load(json_file)
        Detector.set_threshold(float(data['threshold']))
        Recorder.set_observed_objects(set(data['observed_objects']))
        Recorder.set_record_length(int(data['record_length']))
        Recorder.set_button(int(data['button_pin']))
    Recorder()
    Camera()
    Detector()
Beispiel #3
0
    def __init__(self, weight_path, network_config_path, object_config_path,
                 robots_config_path):
        """
        Load necessary modules and files.

        Parameters
        ----------
        weight_path: str
            file path of YOLOv3 network weights
        network_config_path: str
            file path of YOLOv3 network configurations
        object_config_path: str
            file path of object information in YOLOv3 network
        robots_config_path: str
            file path of robots' remote server configuration
        """

        # fix robot movement order
        self.orders = ['thief', 'policeman1']
        # self.orders = ['policeman1', 'policeman2']
        # self.orders = ['thief', 'policeman1', 'policeman2']

        # initialize internal states
        self.graph = None
        self.objects_on_graph = None
        self.instructions = None

        # set up escape nodes
        self.escape_nodes = set()

        # construct the camera system
        self.camera = Camera(1)

        # construct the object detector
        self.detector = Detector(weight_path, network_config_path,
                                 object_config_path)

        # load gaming board image and get centers' coordinates of triangles
        self.gaming_board_image = self.camera.get_image()
        self.centers = self.detector.detect_gaming_board(
            self.gaming_board_image)

        # construct the graph builder
        self.graph_builder = GraphBuilder(self.centers)

        # construct the strategy module
        self.strategy = Strategy(self.orders)

        # construct the control system
        self.controller = Controller(self.detector, self.camera.get_image,
                                     robots_config_path)

        # connect to each robot
        self.controller.connect()
Beispiel #4
0
 def test_object_boxes(self):
     camera_raw = Camera('../data/videos/2019-06-01.avi', save=False, num_skip=0, draw=False)
     camera_labeled = Camera(None, save=False, window_name='labeled')
     weight_path = '../model/custom_tiny_yolov3.weights'
     network_config_path = '../cfg/custom-tiny.cfg'
     object_config_path = '../cfg/custom.data'
     detector = Detector(weight_path, network_config_path, object_config_path, auto_id=True)
     while True:
         image = camera_raw.get_image()
         if image is None:
             break
         image = camera_raw.rgb_to_bgr(image)
         object_list = detector.detect_objects(image)
         boxes = camera_raw.draw_boxes(image, object_list)
         camera_labeled.display(boxes)
     del camera_raw
     del camera_labeled
Beispiel #5
0
def test_drive():
    images = glob.glob("bgr_data/2019-05-09_04-48-50/" + "*.jpg")
    image_num = len(images)
    print(image_num)
    hostname = socket.gethostname()
    run_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
    folder_name = "set_" + hostname + "_" + run_time
    set_path = Constant.DATA_SET_PATH + folder_name
    os.makedirs(set_path)
    sys.stdout = Logger(set_path + "/log.txt", sys.stdout)
    de = Detector()
    d = Driver()
    # server = Server()
    # client = Client()
    # d.client = client
    is_upload = True
    # video_stream_thread = threading.Thread(target=server.get_video_stream)
    # video_stream_thread.setDaemon(True)
    # video_stream_thread.start()
    # tl_state_thread = threading.Thread(target=client.get_tl_state)
    # tl_state_thread.setDaemon(True)
    # tl_state_thread.start()
    objects_info_dict = {}
    start = time.time()
    # i = 0
    for image in images:
        print(image.split('/')[-1])
        objects_info, objects_num, image_array = de.detect(cv2.imread(image))
        d.objects_info = objects_info
        d.objects_num = objects_num
        d.image_array = image_array
        cmd = d.drive()
        # server.send_msg(cmd)
        print("commond sent to pi: ", cmd)
        # server.send_msg(cmd.encode(encoding="utf-8"))
        objects_info_dict[ObjInfoKey(image_array)] = objects_info
        print("* " * 50)
        cv2.waitKey(1)
    end = time.time()
    local_path = object_dict_to_csv(objects_info_dict, folder_name)
    print("local_path: ", local_path)
    print(end - start)
    Uploader("server_conf.conf", local_path,
             Constant.SERVER_DATA_PATH).upload()
    cv2.destroyAllWindows()
from object_detector import Detector
import cv2

cap = cv2.VideoCapture('../data/videos/2019-06-02.MOV')
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('../data/videos/2019-06-02-labeled-latest.avi', fourcc, 30.0, (1024, 768))
WEIGHT_PATH = '../model/custom_tiny_yolov3.weights'
NETWORK_CONFIG_PATH = '../cfg/custom-tiny.cfg'
OBJECT_CONFIG_PATH = '../cfg/custom.data'
detector = Detector(WEIGHT_PATH, NETWORK_CONFIG_PATH, OBJECT_CONFIG_PATH)

colors = {
    'thief': (255, 0, 0),
    'policeman1': (0, 255, 0),
    'policeman2': (0, 0, 255)
}
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 1
lineType = 2
window_name = 'test'
cv2.namedWindow(window_name)
while True:
    ret, frame = cap.read()
    if frame is None:
        break
    else:
        image = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
    frame = cv2.resize(frame, (1024, 768))
    object_list = detector.detect_objects(image)
    print(object_list)
    if len(object_list) > 0:
if len(sys.argv) > 1:
    filename = osp.join(root_path, sys.argv[1] + '.txt')
    if osp.isfile(filename):
        f = open(filename)
        lh = int(f.readline())
        uh = int(f.readline())
        ls = int(f.readline())
        us = int(f.readline())
        lv = int(f.readline())
        uv = int(f.readline())

    if len(sys.argv) == 3:
        mode = sys.argv[2]
        if sys.argv[2] in ['ball', 'basket']:
            detector = Detector(filename, 'Detector', mode)

lower_hsv = np.array([lh, ls, lv])
upper_hsv = np.array([uh, us, uv])

# set up camera pipeline
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 360, rs.format.z16, 60)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 60)
pipeline.start(config)
align_to = rs.stream.color
align = rs.align(align_to)

# creating a window for later use
cv2.namedWindow('result')
Beispiel #8
0
class Game:
    """
    Each game is an instance of class Game.
    """
    def __init__(self, weight_path, network_config_path, object_config_path,
                 robots_config_path):
        """
        Load necessary modules and files.

        Parameters
        ----------
        weight_path: str
            file path of YOLOv3 network weights
        network_config_path: str
            file path of YOLOv3 network configurations
        object_config_path: str
            file path of object information in YOLOv3 network
        robots_config_path: str
            file path of robots' remote server configuration
        """

        # fix robot movement order
        self.orders = ['thief', 'policeman1']
        # self.orders = ['policeman1', 'policeman2']
        # self.orders = ['thief', 'policeman1', 'policeman2']

        # initialize internal states
        self.graph = None
        self.objects_on_graph = None
        self.instructions = None

        # set up escape nodes
        self.escape_nodes = set()

        # construct the camera system
        self.camera = Camera(1)

        # construct the object detector
        self.detector = Detector(weight_path, network_config_path,
                                 object_config_path)

        # load gaming board image and get centers' coordinates of triangles
        self.gaming_board_image = self.camera.get_image()
        self.centers = self.detector.detect_gaming_board(
            self.gaming_board_image)

        # construct the graph builder
        self.graph_builder = GraphBuilder(self.centers)

        # construct the strategy module
        self.strategy = Strategy(self.orders)

        # construct the control system
        self.controller = Controller(self.detector, self.camera.get_image,
                                     robots_config_path)

        # connect to each robot
        self.controller.connect()

    def is_over(self):
        """
        Check if the game is over.

        Returns
        -------
        game_over: bool
            True if the thief is at the escape point or the policemen have caught the thief, otherwise False.
        """
        game_over = False
        if self.instructions is None or self.objects_on_graph is None or self.graph is None:
            return game_over
        if 'thief' in self.objects_on_graph:
            if self.objects_on_graph['thief'] in self.escape_nodes:
                game_over = True
                logger.info('The thief wins!')
            else:
                for name, instruction in self.instructions.items():
                    if name != 'thief':
                        if self.instructions['thief'][1] == instruction[1]:
                            game_over = True
                            logger.info('The policemen win!')
        return game_over

    def shuffle(self):
        random.randint(5, 10)

    def forward(self):
        """
        Push the game to the next step.
        """
        # get objects' coordinates and categories
        image = self.camera.get_image()
        object_list = self.detector.detect_objects(image)

        # build a graph based on object list
        graph, objects_on_graph = self.graph_builder.build(object_list)
        self.graph = graph
        self.objects_on_graph = objects_on_graph

        # generate instructions based on the graph
        instructions = self.strategy.get_next_steps_shortest_path(
            graph, objects_on_graph)
        self.instructions = instructions
        logger.info('instructions:{}'.format(instructions))

        if self.is_over():
            return
        # move robots until they reach the right positions
        while not self.controller.is_finished(self.centers, object_list,
                                              instructions):
            # obtain feedback from camera
            image = self.camera.get_image()
            object_list = self.detector.detect_objects(image)

            # calculate control signals
            control_signals = self.controller.calculate_control_signals(
                self.centers, object_list, instructions)

            # cut extra signals
            real_signals = []
            for name in self.orders:
                for signal in control_signals:
                    if signal['name'] == name:
                        # if True:
                        real_signals.append(signal)
                if len(real_signals) > 0:
                    break

            # update internal states
            self.controller.update_state(object_list)

            # move robots
            self.controller.move_robots(real_signals)

            # obtain feedback from camera
            image = self.camera.get_image()
            object_list = self.detector.detect_objects(image)

            # update internal states
            self.controller.update_state(object_list)

    def get_report(self):
        """
        Generate a game report(json, xml or plain text).

        Returns
        -------
        game_report: object or str
            a detailed record of the game
        """
        game_report = None
        return game_report
Beispiel #9
0
 def __init__(self):
     Detector.register_observer(self)
     if Recorder.thread is None:
         Recorder.thread = threading.Thread(
             target=self.listen_button_event).start()
Beispiel #10
0
class DroneVision:

    EAST_ENTRANCE_MARKER_ID = 39
    NORTH_ENTRANCE_MARKER_ID = 41
    LANDING_PAD_MARKER_ID = 40
    GROUND_ROBOT_MARKER_ID = 1
    RED_VEHICLE_MARKER_ID = 10
    ALL_MARKER_IDS = [
        GROUND_ROBOT_MARKER_ID, EAST_ENTRANCE_MARKER_ID,
        NORTH_ENTRANCE_MARKER_ID, LANDING_PAD_MARKER_ID, RED_VEHICLE_MARKER_ID
    ]

    def __init__(self,
                 drone,
                 vfov=45,
                 hfov=80,
                 expectedDistance=8.0,
                 northToSouth=8.0,
                 eastToWest=8.0):
        self.drone = drone
        self.vertical_fov = vfov
        self.horizontal_fov = hfov
        self.expectedDistance = expectedDistance
        self.northToSouth = northToSouth
        self.eastToWest = eastToWest
        self.maxDistance = max([northToSouth, eastToWest])
        self.detector = Detector(drone, vfov, hfov)
        self.frameTime = 0
        self.width = 0
        self.height = 0
        self.pi = np.radians(180)
        # Ground robot detection parameters
        self.groundRobotVisible = False
        self.groundRobotOrientation = None
        self.groundRobotDistance = None
        self.groundRobotAngle = None
        self.groundFramePosition = None
        self.groundFrameDistance = None
        # East gate
        self.eastGateVisible = False
        self.eastGateOrientation = None
        self.eastGateDistance = None
        self.eastGateAngle = None
        self.eastFramePosition = None
        self.eastFrameDistance = None
        # North gate
        self.northGateVisible = False
        self.northGateOrientation = None
        self.northGateDistance = None
        self.northGateAngle = None
        self.northFramePosition = None
        self.northFrameDistance = None
        # Landing pad
        self.landingPadVisible = False
        self.landingPadOrientation = None
        self.landingPadDistance = None
        self.landingPadAngle = None
        self.landingFramePosition = None
        self.landingFrameDistance = None
        # Bear
        self.bearVisible = False
        self.bearDistance = None
        self.bearAngle = None
        self.bearFramePosition = None
        self.bearFrameDistance = None
        # New frame flag
        self.newFrameProcessed = False
        self.processLine = True
        self.processMarker = False
        self.processBear = False

    def angle_between(self, a, b, c):
        ang = math.atan2(c[1] - b[1], c[0] - b[0]) - math.atan2(
            a[1] - b[1], a[0] - b[0])

        if ang > self.pi:
            ang = self.pi - ang
        elif ang < -1 * self.pi:
            ang = (2 * self.pi) + ang

        return ang

    def calculateFrontalDistance(self, origImg, frameTime, Display=True):

        height, width, _ = origImg.shape

        self.height = height
        self.width = width
        theta = self.drone.camera_tilt
        zeta = np.degrees(self.drone.pitch)

        self.newFrameProcessed = True

        vehicle_found, vehicle_bounding_box = (False, None)
        # On every frame set visiblitiy to false and then calculate them again
        self.groundRobotVisible = False
        self.northGateVisible = False
        self.eastGateVisible = False
        self.landingPadVisible = False
        self.bearVisible = False
        droneCentre = np.array([width / 2, height])

        if self.processMarker or self.processBear:
            self.detector.setImage(origImg, frameTime)

        if self.processBear:
            # Find MR York
            bear_found, bear_bounding_box = self.detector.findMrYork(False)
            if bear_found and bear_bounding_box is not None:
                self.bearVisible = True
                self.bearFramePosition = (
                    (bear_bounding_box[0] + bear_bounding_box[2]) / 2,
                    (bear_bounding_box[1] + bear_bounding_box[3]) / 2)
                mPhi = (self.vertical_fov / 2) - ((
                    (height - self.bearFramePosition[1]) / height) *
                                                  self.vertical_fov)
                mAngle = np.radians(90 - (mPhi - theta - zeta))
                self.bearDistance = self.drone.altitude * np.tan(mAngle)
                v2 = np.array(droneCentre) - np.array(self.bearFramePosition)
                self.bearFrameDistance = np.linalg.norm(v2)
                self.bearAngle = (
                    float((width / 2) - self.bearFramePosition[0]) /
                    float(width / 2)) * (float(self.horizontal_fov) / 2)

        if self.processMarker:

            corners, ids = self.detector.getMarkers()
            # initialize frame position to none
            siteFramePosition = None

            # Marker identification
            if ids is not None:
                for i in range(len(ids)):
                    if ids[i][0] not in self.ALL_MARKER_IDS:
                        continue
                    markerCentre = (corners[i][0][:, 0].mean(),
                                    corners[i][0][:, 1].mean())
                    cv2.circle(origImg, markerCentre, 3, (0, 255, 255), -1)
                    markerFront = (corners[i][0][0:2, 0].mean(),
                                   corners[i][0][0:2, 1].mean())
                    cv2.circle(origImg, markerFront, 3, (255, 255, 0), -1)
                    markerAngleDeg = (float((width / 2) - markerCentre[0]) /
                                      float(width / 2)) * (
                                          float(self.horizontal_fov) / 2)

                    # v1 = np.array(markerFront) - np.array(markerCentre)
                    v2 = np.array(droneCentre) - np.array(markerCentre)

                    markerOrientation = self.angle_between(
                        markerFront, markerCentre, droneCentre)
                    mPhi = (self.vertical_fov / 2) - ((
                        (height - markerCentre[1]) / height) *
                                                      self.vertical_fov)
                    mAngle = np.radians(90 - (mPhi - theta - zeta))
                    markerDistance = self.drone.altitude * np.tan(mAngle)
                    markerFrameDistance = np.linalg.norm(v2)

                    if ids[i][0] == self.RED_VEHICLE_MARKER_ID:
                        vehicle_found = True
                        vehicle_bounding_box = [
                            corners[i][0][0][0], corners[i][0][0][1],
                            corners[i][0][0][0] + 40, corners[i][0][0][1] + 40
                        ]
                        siteFramePosition = markerCentre
                    elif ids[i][0] == self.GROUND_ROBOT_MARKER_ID:
                        self.groundRobotVisible = True
                        self.groundRobotAngle = self.drone.yaw - np.radians(
                            markerAngleDeg)
                        self.groundRobotDistance = markerDistance
                        self.groundRobotOrientation = markerOrientation
                        self.groundFramePosition = markerCentre
                        self.groundFrameDistance = markerFrameDistance
                    elif ids[i][0] == self.EAST_ENTRANCE_MARKER_ID:
                        self.eastGateVisible = True
                        self.eastGateAngle = self.drone.yaw - np.radians(
                            markerAngleDeg)
                        self.eastGateDistance = markerDistance
                        self.eastGateOrientation = markerOrientation
                        self.eastFramePosition = markerCentre
                        self.eastFrameDistance = markerFrameDistance
                    elif ids[i][0] == self.NORTH_ENTRANCE_MARKER_ID:
                        self.northGateVisible = True
                        self.northGateAngle = self.drone.yaw - np.radians(
                            markerAngleDeg)
                        self.northGateDistance = markerDistance
                        self.northGateOrientation = markerOrientation
                        self.northFramePosition = markerCentre
                        self.northFrameDistance = markerFrameDistance
                    elif ids[i][0] == self.LANDING_PAD_MARKER_ID:
                        self.landingPadVisible = True
                        self.landingPadAngle = self.drone.yaw - np.radians(
                            markerAngleDeg)
                        self.landingPadDistance = markerDistance
                        self.landingPadOrientation = markerOrientation
                        self.landingFramePosition = markerCentre
                        self.landingFrameDistance = markerFrameDistance

            site_found = False
            accident_site_angle = None

            if vehicle_bounding_box is not None:
                site_found = vehicle_found
                accident_site_position = (vehicle_bounding_box[0] +
                                          vehicle_bounding_box[2]) / 2
                accident_site_degree = (
                    float((width / 2) - accident_site_position) /
                    float(width / 2)) * (float(self.horizontal_fov) / 2)
                accident_site_angle = self.drone.yaw - np.radians(
                    accident_site_degree)

                # Site distance
                yvals = np.array([float(vehicle_bounding_box[1])])
                phi = (self.vertical_fov / 2) - ((
                    (height - yvals) / height) * self.vertical_fov)
                angle = np.radians(90 - (phi - theta - zeta))
                distance = self.drone.altitude * np.tan(angle)
                self.drone.siteDistance = np.average(distance) * 0.6

            self.drone.vehicleFound = site_found
            self.drone.siteAngle = accident_site_angle
            self.drone.siteFramePosition = siteFramePosition
        # End of marker and site processing

        if self.processLine:
            # Navigate
            guideLine, guideTheta = self.findFrontGuide(
                origImg, frameTime, False)

            # Always update the guide line here
            self.drone.guideLine = guideLine

            if guideLine is not None:
                # Calculate distance to front guide
                yvals = np.array(
                    [float(guideLine[0][1]),
                     float(guideLine[1][1])])
                phi = (self.vertical_fov / 2) - ((
                    (height - yvals) / height) * self.vertical_fov)
                angle = np.radians(90 - (phi - theta - zeta))
                distance = self.drone.altitude * np.tan(angle)
                averageDistance = np.average(distance)
                if averageDistance < self.maxDistance + 0.5:  # Ignore if the distance is out of bound
                    self.drone.guideLine = guideLine
                    self.drone.guideTheta = guideTheta
                    self.drone.guideAngularError = np.radians(90) - guideTheta
                    self.drone.goodGuide = averageDistance >= self.expectedDistance - 1 and averageDistance <= self.expectedDistance + 1
                    self.drone.setDistance(averageDistance)

                # Evaluate if guide is good
                # A good guide gives rough estimation with 20 cm error
                # How do we determine if the guide is a good guide

                if self.frameTime > 0 and self.drone.state != self.drone.FLIGHT_STATE_NOT_FLYING:
                    self.expectedDistance = self.expectedDistance - (
                        self.drone.speedX * (frameTime - self.frameTime))
                    self.frameTime = self.frameTime - frameTime

                if Display:
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    line_color = (0, 255, 0)
                    if not self.drone.goodGuide:
                        line_color = (0, 0, 255)
                    else:
                        if averageDistance < self.expectedDistance:
                            self.expectedDistance = averageDistance
                    cv2.line(origImg, guideLine[0], guideLine[1], line_color,
                             2)
                    cv2.putText(
                        origImg, '%.2f : %.2f - %.2f : %.2f - %.2f' %
                        (distance[0], distance[1], averageDistance,
                         self.expectedDistance, self.drone.speedX), (10, 50),
                        font, 1, (255, 255, 255), 2, cv2.LINE_AA)

        if Display:
            cv2.imshow('Distance', origImg)

    def calculateIntersects(self, rho, theta, width, height):
        a = np.cos(theta)
        b = np.sin(theta)

        points = []
        intersects = [None, None, None, None]

        if b != 0:
            # Check x = 0 crossing
            x1 = 0
            y1 = int(rho / b)
            if y1 >= 0 and y1 <= height:
                points.append((x1, y1))
                intersects[0] = 1

        if a != 0:
            # Check y = 0 crossing
            x1 = int(rho / a)
            y1 = 0
            if x1 >= 0 and x1 <= width:
                points.append((x1, y1))
                intersects[1] = 1

        if b != 0:
            # Check x = width
            x1 = width
            y1 = int((rho - a * width) / b)
            if y1 >= 0 and y1 <= height:
                points.append((x1, y1))
                intersects[2] = 1

        if a != 0:
            # Check y = height
            x1 = int((rho - (b * height)) / a)
            y1 = height
            if x1 >= 0 and x1 <= width:
                points.append((x1, y1))
                intersects[3] = 1

        return points, intersects

    def findFrontGuide(self, origImg, frameTime, Display=True):
        # start = time.time()

        im = np.float32(origImg) / 255.0

        # Calculate gradient
        gx = cv2.Sobel(im, cv2.CV_32F, 1, 0, ksize=1)
        gy = cv2.Sobel(im, cv2.CV_32F, 0, 1, ksize=1)

        mag, _ = cv2.cartToPolar(gx, gy, angleInDegrees=True)

        _, thresh = cv2.threshold(cv2.cvtColor(mag * 255, cv2.COLOR_BGR2GRAY),
                                  10, 255, cv2.THRESH_BINARY)

        edges = cv2.Canny(np.uint8(thresh), 100, 200, apertureSize=3)

        lines = cv2.HoughLines(edges, 1, np.pi / 180, 210)

        height, width, _ = origImg.shape

        guideLine = None
        guideTheta = None

        halfPi = math.pi / 2.0

        if lines is not None:
            # print("Lines found: ", lines.shape)
            for line in lines:
                for rho, theta in line:

                    if theta < halfPi - 0.2 or theta > halfPi + 0.2:
                        break

                    points, _ = self.calculateIntersects(
                        rho, theta, width, height)

                    if guideLine is None:
                        guideLine = points
                        guideTheta = theta
                    else:
                        if (points[0][1] + points[1][1]
                            ) > guideLine[0][1] + guideLine[1][1]:
                            guideLine = points
                            guideTheta = theta

                    #cv2.line(origImg, points[0], points[1], (0,0,255), 2)
        else:
            '''
            Line not found
            '''
            # print("No line detected")

        if guideLine is not None and Display:
            cv2.line(origImg, guideLine[0], guideLine[1], (0, 255, 0), 2)

        if Display:
            cv2.imshow('Front Guide Line', origImg)
            cv2.imshow('Edges', edges)
            cv2.imshow('Magnitude', mag)

        # end = time.time()

        # print(end - start)

        return guideLine, guideTheta
Beispiel #11
0
def start_training(args):
    model_details=ModelDetails(args)
    detector=Detector(model_details)
    detector.load_data()
    detector.load_model()
    for epoch in range(detector.start_epoch, detector.start_epoch + args.epochs):
        try:
          detector.train(epoch)
          detector.test(epoch)
        except KeyboardInterrupt:
          detector.test(epoch)
          break;
        detector.load_data()
Beispiel #12
0
def object_detection():
    return Response(generate_frames(Detector()),
                    mimetype='multipart/x-mixed-replace; boundary=frame')
def main():

    # ----------------------------------------- #
    # INITIALIZE DETECTOR
    # ----------------------------------------- #
    detector = Detector(path_to_ckpt=DETECTOR_GRAPH_PATH)

    # ----------------------------------------- #
    # INITIALIZE TRACKER
    # ----------------------------------------- #
    mot_tracker = Sort(max_age=DEFAULT_MAX_AGE,
                       min_hits=DEFAULT_MIN_HITS,
                       use_time_since_update=DEFAULT_USE_TIME_SINCE_UPDATE,
                       iou_threshold=DEFAULT_IOU_THRESHOLD,
                       tracker_type=TRACKER_TYPE)

    # ----------------------------------------- #
    # INITIALIZE STREAM
    # ----------------------------------------- #
    cap = cv2.VideoCapture(INPUT_VIDEO_PATH)
    out = None

    # ----------------------------------------- #
    frame_num = 0

    while cap.isOpened():
        ret, image = cap.read()
        if ret == 0:
            break

        frame_time_start = time.time()

        dets = []
        datas = []

        # --- GET DETECTIONS --- #
        boxes, scores, classes = detector.predict(image=image)
        for box, score, clas in zip(boxes, scores, classes):
            dets.append(box)
            datas.append([score, clas])

        # --- GET PREDICTIONS --- #
        tracks, tracks_ids = mot_tracker.update_and_get_tracks(dets, image)

        # --- SHOW IMAGES AND BOXES --- #
        if DISPLAY:
            if SHOW_BBOXES:

                for det, data in zip(dets, datas):
                    xmin, ymin, xmax, ymax = [int(i) for i in det]

                    # Display boxes.
                    cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (255, 255, 255), 5)

                for id, track in zip(tracks_ids, tracks):
                    xmin, ymin, xmax, ymax = [int(i) for i in track]

                    cv2.rectangle(image, pt1=(xmin, ymin), pt2=(xmax, ymax), color=TRACKER_COLORS[id], thickness=2)

            cv2.imshow('MultiTracker', image)
            cv2.waitKey()

        # --- Write to disk --- #
        if WRITE:
            if out is None:
                out_file_name = INPUT_VIDEO_PATH.rsplit('.', 1)[0] + '_out.mp4'
                fourcc = cv2.VideoWriter_fourcc(*'XVID')
                out = cv2.VideoWriter(out_file_name, fourcc, 30.0, (image.shape[1], image.shape[0]))

            out.write(image)

        # --- Print Stats ---
        print('FRAME NUMBER: %d. FPS: %f' % (frame_num, 1 / (time.time() - frame_time_start)))

        frame_num += 1

    cap.release()
    if out is not None:
        out.release()
            print("Saved images: Session {}, Image {}".format(
                self.session_idx, self.image_idx))
            self.image_idx += 1


if __name__ == '__main__':
    try:
        cam_proc = RealsenseProcessing()
        cam_proc.run()
        rate = rospy.Rate(RATE)
        i = 0
        while not rospy.is_shutdown():
            cam_proc.get_frame()

            ball_detector = Detector(
                osp.join(COLOR_CONFIG_PATH, "ball_green.txt"), "BallDetector",
                "ball")
            ball_res, cx, cy, contour_area, w, h = ball_detector.detect(
                cam_proc.regular_image, cam_proc.hsv)
            cam_proc.pub_ball.publish(Point(cx, cy, 0))

            basket_detector = Detector(
                osp.join(COLOR_CONFIG_PATH,
                         "basket_{}.txt".format(BASKET_COLOR)),
                "BasketDetector", "basket")
            basket_res, basket_cx, basket_cy, basket_contour_area, basket_w, basket_h = basket_detector.detect(
                cam_proc.regular_image, cam_proc.hsv)
            cam_proc.pub_basket.publish(
                Point(basket_cx, int(round(basket_cy + basket_h / 2)), 0))
            # print("Lower border: " + str(int(round(basket_cy + basket_h/2))))