Exemple #1
0
def main():
    img_source = input_output.Source(photo_path + photo_file)
    detector = detectors.Detector(
        '/Users/elijah/Dropbox/Programming/detectors/configs/closest_obstacle.json'
    )

    while (True):
        frame_ = img_source.get_frame()

        #frame = cv2.cvtColor (frame_, cv2.COLOR_RGB2BGR)
        frame = frame_

        cv2.waitKey(1)

        (obstacle_pixels, labels), _ = detector.detect(frame,
                                                       "obstacle detector")

        #draw obstacles on the frame
        result = frame.copy()

        #os.system ("clear")
        #print (obstacle_pixels)

        #for i in range (len (obstacle_pixels)):
        #    x = i
        #    y = obstacle_pixels [i]

        #    type = labels [i]

        #    result = cv2.circle (result, (x, y), 5, (12 + type * 150, 250 - type * 120, 190 + type * 110), thickness = -1)

        stages = detector.get_stages_picts("obstacle detector")

        for i in range(len(stages)):
            cv2.imshow(str(i), stages[i])

        #cv2.imshow ("frame", result)

        time.sleep(0.02)

        keyb = cv2.waitKey(1) & 0xFF

        if (keyb == ord('q')):
            break

    cv2.destroyAllWindows()
Exemple #2
0
    def __init__(self, stream, settings):

        self.__settings = settings.getSettings()
        self.__videocap = stream
        self.__videowriter = writer

        self.__detectors = {'original':detectors.Detector(self.__settings),
                            'colour': detectors.ColourDetector(self.__settings),
                            'colourdiff': detectors.ColourDiffDetector(self.__settings),
                            'difference':detectors.DiffDetector(self.__settings),
                            'background':detectors.BackgroundSubDetector(self.__settings),
                            'watershed': detectors.WatershedDetector(self.__settings),
                            }

        self.__trackers = {'csrt': trackers.CSRTTracker(self.__settings),
                            }
        
        self.__detector = self.detectors['original']

        self.__isrunning = True
        self.__istracking = False
        self.__isrecording = False
Exemple #3
0
#frame = cv2.cvtColor(frame, cv2.COLOR_YCrCb2RGB)
#frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

cv2.namedWindow('Colorbars')

#source = Source ("../images/2019_08_11_08h00m33s/00014.png")
#source = Source ("../images/00014.png")
#source = Source ("../images/obst_bottom.png")
source = Source("1")

#detector = detectors.Detector ('../configs/multiple_objects1.json')
#detector = detectors.Detector ('../configs/closest_obstacle.json')

low_th = (140, 70, 40)
high_th = (220, 130, 100)
detector = detectors.Detector()

detector.add_filter(detectors.colorspace_to_colorspace("RGB", "HSV"), "a",
                    "colorspace")
detector.add_filter(detectors.inrange(low_th, high_th), "a", "inrange")

#detector.add_filter (detectors.filter_connected_components (10), "a", "filter")

detector.add_filter(detectors.leave_max_area_cc(), "a", "max cc extraction")
detector.add_filter(detectors.bottom_cc_point(), "a",
                    "bottom point extraction")

#detector.add_filter (detectors.bottom_bbox_point (), "a", "desired point extraction")

cv2.createTrackbar("l1", "Colorbars", 0, 255, nothing)
cv2.createTrackbar("h1", "Colorbars", 255, 255, nothing)
def main():
    detector = detectors.Detector(
        '/Users/elijah/Dropbox/Programming/detectors/configs/object_tracking.json'
    )

    img_source = Source(photo_path + photo_file, "", True)
    obstacle_source = Source(photo_path + obstacle_file)

    target_tracker = tracker.Tracker()

    img_sh = img_source.shape()
    obst_sh = obstacle_source.shape()

    print(img_sh, obst_sh)

    x_obs_rot = int(img_sh[1] / 2)
    y_obs_rot = int(img_sh[0] / 2)
    obs_radius = int(x_obs_rot / 3)

    #print (x_obs_rot, y_obs_rot, radius)

    angle = 0
    angular_speed = 0.231415234567654

    turn_num = 0

    while (True):
        frame = img_source.get_frame()
        obstacle = obstacle_source.get_frame()

        x_obs = x_obs_rot + int(obs_radius * math.cos(angle))
        y_obs = y_obs_rot + int(obs_radius * math.sin(angle))

        angle += angular_speed

        frame[y_obs:y_obs + obst_sh[0], x_obs:x_obs + obst_sh[1], :] = obstacle

        #frame = frame_

        cv2.waitKey(1)
        os.system('clear')

        (x, y), success = detector.detect(frame, "obstacle detector")
        measurement_time = time.time()

        #print (x, y, measurement_time)

        target_tracker.add_measurement((x, y), measurement_time)

        result = frame.copy()

        if (turn_num > 10):
            target_tracker.calc_cycle_parameters()

            (center_x,
             center_y) = target_tracker.cycle_parameters["circle center"]
            radius = target_tracker.cycle_parameters["radius"]

            lowest_x = int(center_x)
            lowest_y = int(center_y + radius)

            result = cv2.circle(result, (lowest_x, lowest_y),
                                9, (20, 150, 190),
                                thickness=-1)

            time_to_approach = target_tracker.predict_time(
                (lowest_x, lowest_y))

            if (time_to_approach < 0.2):
                result = cv2.circle(result, (lowest_x, lowest_y),
                                    15, (220, 15, 90),
                                    thickness=-1)

            print(time_to_approach)

        turn_num += 1

        #draw circle on the frame
        if (success == True):
            #print ("detected")

            result = cv2.circle(result, (x, y),
                                9, (120, 15, 190),
                                thickness=-1)

        else:
            print("not detected")

        stages = detector.get_stages_picts("obstacle detector")

        for i in range(2):
            cv2.imshow(str(i), stages[i])

        #processing_stages = detector.stages ()

#resultant_frame = form_images (processing_stages)

        cv2.imshow("frame", result)

        time.sleep(0.02)

        #clear_output (wait=True)

        keyb = cv2.waitKey(1) & 0xFF

        if (keyb == ord('q')):
            break

    cam.release()

    cv2.destroyAllWindows()
Exemple #5
0
def main():
    INPUT_SOURCE = PHOTO

    #cam_num = max (get_available_cameras ())

    #cam = cv2.VideoCapture (cam_num)

    #if (INPUT_SOURCE != CAMERA):
    #    cam.release ()

    #if (INPUT_SOURCE == VIDEO):
    #    cam = cv2.VideoCapture (video_path + video_file)

    if (INPUT_SOURCE == PHOTO):
        img = cv2.imread(photo_path + photo_file)

    #cv2.namedWindow ("frame", cv2.WINDOW_NORMAL)
    #cv2.namedWindow ("frame")

    #cv2.resizeWindow ("frame", (640*2, 480*2))
    #cv2.resizeWindow ("frame", (480, 640))

    str_num = 0

    low_th = (57, 150, 110)
    high_th = (67, 160, 120)

    detector = detectors.Detector()
    detector.add_filter(detectors.inrange(low_th, high_th), "inrange")
    detector.add_filter(detectors.max_area_cc_bbox(), "bbox extraction")

    while (True):
        #if (INPUT_SOURCE == CAMERA or INPUT_SOURCE == VIDEO):
        #    ret, frame_ = cam.read ()

        if (INPUT_SOURCE == PHOTO):
            frame_ = img.copy()

        #frame = cv2.cvtColor (frame_, cv2.COLOR_RGB2BGR)
        frame = frame_

        cv2.waitKey(1)

        #top left, bottom right
        bbox_tl, bbox_br = detector.detect(frame)

        #draw bbox on the frame
        frame_with_bbox = cv2.rectangle(frame.copy(), bbox_tl, bbox_br,
                                        (255, 0, 0), 5)

        stages = detector.get_stages()

        for i in range(2):
            cv2.imshow(str(i), stages[i])

        #processing_stages = detector.stages ()

#resultant_frame = form_images (processing_stages)

        cv2.imshow("frame", frame_with_bbox)

        time.sleep(0.02)

        #clear_output (wait=True)

        keyb = cv2.waitKey(1) & 0xFF

        if (keyb == ord('q')):
            break

    cam.release()

    cv2.destroyAllWindows()
Exemple #6
0
def main():
    INPUT_SOURCE = PHOTO

    #cam_num = max (get_available_cameras ())

    #cam = cv2.VideoCapture (cam_num)

    #if (INPUT_SOURCE != CAMERA):
    #    cam.release ()

    #if (INPUT_SOURCE == VIDEO):
    #    cam = cv2.VideoCapture (video_path + video_file)

    if (INPUT_SOURCE == PHOTO):
        img = cv2.imread(photo_path + photo_file)

    str_num = 0

    detector = detectors.Detector('multiple_objects.json')

    while (True):
        #if (INPUT_SOURCE == CAMERA or INPUT_SOURCE == VIDEO):
        #    ret, frame_ = cam.read ()

        if (INPUT_SOURCE == PHOTO):
            frame_ = img.copy()

        #frame = cv2.cvtColor (frame_, cv2.COLOR_RGB2BGR)
        frame = frame_

        cv2.waitKey(1)
        os.system('clear')

        (bbox_tl, bbox_br), success = detector.detect(frame, "ball detector")
        result = frame.copy()

        if (success == True):
            print("detected")

            result = cv2.rectangle(frame.copy(), bbox_tl, bbox_br, (255, 0, 0),
                                   5)

        else:
            print("not detected")

        (x, y), success = detector.detect(frame, "obstacle detector")

        #draw circle on the frame
        if (success == True):
            print("detected")

            result = cv2.circle(result, (x, y),
                                9, (120, 15, 190),
                                thickness=-1)

        else:
            print("not detected")

        stages = detector.get_stages()

        for i in range(2):
            cv2.imshow(str(i), stages[i])

        #processing_stages = detector.stages ()

#resultant_frame = form_images (processing_stages)

        cv2.imshow("frame", result)

        time.sleep(0.02)

        #clear_output (wait=True)

        keyb = cv2.waitKey(1) & 0xFF

        if (keyb == ord('q')):
            break

    cam.release()

    cv2.destroyAllWindows()
Exemple #7
0
def main():
    INPUT_SOURCE = PHOTO

    #cam_num = max (get_available_cameras ())

    #cam = cv2.VideoCapture (cam_num)

    #if (INPUT_SOURCE != CAMERA):
    #    cam.release ()

    #if (INPUT_SOURCE == VIDEO):
    #    cam = cv2.VideoCapture (video_path + video_file)

    if (INPUT_SOURCE == PHOTO):
        img = cv2.imread(photo_path + photo_file)

    obstacle = cv2.imread(photo_path + obstacle_file)
    obstacle_sh = obstacle.shape

    str_num = 0

    detector = detectors.Detector(
        '/Users/elijah/Dropbox/Programming/detectors/configs/object_tracking.json'
    )

    sh = img.shape
    x_obs_rot = sh[1] // 2
    y_obs_rot = sh[0] // 2
    radius = x_obs_rot // 2
    angle = 0

    while (True):
        #if (INPUT_SOURCE == CAMERA or INPUT_SOURCE == VIDEO):
        #    ret, frame_ = cam.read ()

        if (INPUT_SOURCE == PHOTO):
            frame_ = img.copy()

        x_obs = x_obs_rot + radius * math.cos(angle)
        y_obs = y_obs_rot + radius * math.sin(angle)

        angle += 0.1

        frame[x_obs:x_obs + obstacle_sh[1],
              y_obs:y_obs + obstacle_sh[0], :] = obstacle

        frame = frame_

        cv2.waitKey(1)
        os.system('clear')

        (x, y), success = detector.detect(frame, "obstacle detector")

        #draw circle on the frame
        if (success == True):
            print("detected")

            result = cv2.circle(result, (x, y),
                                9, (120, 15, 190),
                                thickness=-1)

        else:
            print("not detected")

        stages = detector.get_stages()

        for i in range(2):
            cv2.imshow(str(i), stages[i])

        #processing_stages = detector.stages ()

#resultant_frame = form_images (processing_stages)

        cv2.imshow("frame", result)

        time.sleep(0.02)

        #clear_output (wait=True)

        keyb = cv2.waitKey(1) & 0xFF

        if (keyb == ord('q')):
            break

    cam.release()

    cv2.destroyAllWindows()
Exemple #8
0
def main():
    #detector = detectors.Detector ('/Users/elijah/Dropbox/Programming/detectors/configs/basketball.json')
    detector = detectors.Detector(
        '/Users/elijah/Dropbox/Programming/detectors/configs/closest_obstacle.json'
    )

    source = input_output.Source(
        "/Users/elijah/Dropbox/Programming/detectors/images/2019_08_11_08h11m07s/"
    )
    #source  = input_output.Source ("/Users/elijah/Dropbox/Programming/detectors/data/output.avi")
    #source  = input_output.Source ("-1")

    #fourcc = cv2.VideoWriter_fourcc(*'XVID')
    #out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
    #a = 0

    while (True):
        frame = source.get_frame()

        #out.write(frame)
        #print (a)
        #a += 1
        #if (a > 49):
        #    break

        (x, y), success = detector.detect(frame, "obstacle detector")

        #result = frame.copy ()

        #draw circle on the frame
        if (success == True):
            print("detected")

            #result = cv2.circle (result, (x, y), 9, (120, 15, 190), thickness = -1)

        else:
            print("not detected")

        stages = detector.get_stages_picts("obstacle detector")

        #for i in range (len (stages)):
        #    cv2.imshow (str (i), stages [i])

        resultant_frame = input_output.form_grid(stages)
        print(resultant_frame.shape)

        #cv2.imshow ("frame", result)
        cv2.imshow("frame", resultant_frame)

        time.sleep(0.02)

        keyb = cv2.waitKey(1) & 0xFF

        if (keyb == ord('q')):
            break

    out.release()

    #cam.release ()

    cv2.destroyAllWindows()