コード例 #1
0
    def test_waypoint_visualization_with_ngon_generator_predicted_human(self):
        # initialize the drone state
        waypoint_generator = NGonWaypointGenerator(n=6, radius=4)
        gamma = 0.9
        cinematic_controller = CinematicController(
            waypoint_generator=waypoint_generator, bb_filter_gamma=gamma)
        cinematic_controller.update_latest_drone_state(DroneState(x=2, y=-3))
        cinematic_controller.person_predictor = PolynomialPredictor(
            poly_degree=2, weight_decay_factor=1)
        cinematic_controller.update_latest_bbs([BoundingBox((10, 10), (0, 0))])
        time.sleep(.001)
        cinematic_controller.update_latest_bbs([BoundingBox((5, 5), (50, 0))])
        time.sleep(.001)
        cinematic_controller.update_latest_bbs([BoundingBox((2, 2), (50, 0))])
        time.sleep(.001)
        cinematic_controller.update_latest_bbs([BoundingBox((1, 1), (50, 0))])

        # initialize the drone waypoints
        waypoints = cinematic_controller.generate_waypoints()

        # initialize the person state
        person_states = cinematic_controller.person_predictor.predict_next_person_state(
            [2, 4, 6, 8, 10, 15, 20, 25, 30])

        # initialize the environment
        environment = Environment()
        environment.add_obstacles([[(6, 0), (4, 2), (6, 2)],
                                   [(8, -4), (9, -3), (10, -3), (10, -5),
                                    (9, -3.5)], [(-6, -2), (-1, -5),
                                                 (-1, -2)]])

        # initialize visualization object and plot
        viz = WaypointVisualization(waypoints, person_states, environment)
        viz.plot('plots/moving_target.png')
コード例 #2
0
 def test_track_single_bb_over_time_with_noise(self):
     # Check that, to start, the smooth bb is None
     assert self.cinematic_controller.smoothed_bounding_box is None
     previous_smoothed_bb = None
     # The idea is to create a bounding box that moves from bottom left to upper right.
     num_bbs = 25
     x_step_size = 1.0
     slope_of_movement = 0.5
     fixed_dimensions = (10, 10)
     for bb_index in range(num_bbs):
         new_x = bb_index * x_step_size
         new_y = new_x * slope_of_movement
         new_bb = BoundingBox(fixed_dimensions, (new_x, new_y))
         # Now create a "distractor" bb a little bit out of the way and smaller
         distractor_x = new_x + 2.5  # These are arbitrary choices. Feel free to edit.
         distractor_y = new_y - 0.5
         distractor_dimensions = (fixed_dimensions[0] * 0.5,
                                  fixed_dimensions[1] * 0.5)
         distractor_bb = BoundingBox(distractor_dimensions,
                                     (distractor_x, distractor_y))
         # Note how the distractor_bb and new_bb are both passed in.
         self.cinematic_controller.update_latest_bbs(
             [new_bb, distractor_bb])
         if previous_smoothed_bb is not None:
             # Check that the smoothed bb has moved up and to the right, and at the correct angle.
             new_smoothed_bb = self.cinematic_controller.smoothed_bounding_box
             new_smoothed_bb_centroid = new_smoothed_bb.get_centroid()
             x_diff = new_smoothed_bb_centroid[
                 0] - previous_smoothed_bb.get_centroid()[0]
             y_diff = new_smoothed_bb_centroid[
                 1] - previous_smoothed_bb.get_centroid()[1]
             assert x_diff > 0
             assert y_diff > 0
             assert y_diff == x_diff * slope_of_movement
コード例 #3
0
 def test_track_single_bb_over_time(self):
     # Check that, to start, the smooth bb is None
     assert self.cinematic_controller.smoothed_bounding_box is None
     previous_smoothed_bb = None
     # The idea is to create a bounding box that moves from bottom left to upper right.
     num_bbs = 25
     x_step_size = 1.0
     slope_of_movement = 0.5
     fixed_dimensions = (10, 10)
     for bb_index in range(num_bbs):
         new_x = bb_index * x_step_size
         new_y = new_x * slope_of_movement
         new_bb = BoundingBox(fixed_dimensions, (new_x, new_y))
         self.cinematic_controller.update_latest_bbs([new_bb])
         if previous_smoothed_bb is not None:
             # Check that the smoothed bb has moved up and to the right, and at the correct angle.
             new_smoothed_bb = self.cinematic_controller.smoothed_bounding_box
             new_smoothed_bb_centroid = new_smoothed_bb.get_centroid()
             x_diff = new_smoothed_bb_centroid[
                 0] - previous_smoothed_bb.get_centroid()[0]
             y_diff = new_smoothed_bb_centroid[
                 1] - previous_smoothed_bb.get_centroid()[1]
             assert x_diff > 0
             assert y_diff > 0
             assert y_diff == x_diff * slope_of_movement
def main():
    # create parser instance
    config = ConfigParser()

    # Read detection.cfg
    config.read('config/detection.cfg')
    crop = config.get('blob', 'crop')
    swap_rb = config.get('blob', 'swap_rb')
    scale_factor = config.get('blob', 'scale_factor')
    output_width = config.get('blob', 'output_width')
    output_length = config.get('blob', 'output_length')
    confidence = config.get('blob', 'confidence')
    threshold = config.get('blob', 'threshold')
    yolov_labels = config.get('yolov3', 'yolov3_labels')
    yolov_weights = config.get('yolov3', 'yolov3_test_weights_file')
    yolov_config = config.get('yolov3', 'yolov3_config_file')
    device_id = config.get('camera', 'device_id')
    frame_wait_ms = config.get('camera', 'frame_wait_ms')
    frame_pause_interval = config.get('camera', 'frame_pause_interval')

    bounding_box = BoundingBox(scale_factor, output_length, output_width, swap_rb, crop,
                               yolov_labels, yolov_config, yolov_weights, confidence, threshold)

    capture_stream = CaptureStream(device_id)
    camera = capture_stream.get_camera_stream()

    frame_count = 1
    if camera.isOpened():
        while True:
            # Read each frame
            ret_val, frame  = camera.read()

            # Draw new bounding box
            if  frame_count == 1 or frame_count % int(frame_pause_interval) == 0:
                input_height, input_width = frame.shape[:2]
                blob = bounding_box.get_blob(frame)
                output_layers = bounding_box.get_output_layers(blob)
                boxes, confidences, class_identifiers = bounding_box.get_bounding_boxes(output_layers, input_height, input_width)
                suppression = bounding_box.suppress_weak_overlapping_boxes(boxes, confidences)
                frame_with_boxes = bounding_box.draw_boxes_labels(frame, boxes, confidences, class_identifiers, suppression)
            # Use existing bounding box
            else:
                frame_with_boxes = bounding_box.draw_boxes_labels(frame, boxes, confidences, class_identifiers, suppression)

            frame_count += 1
            cv.imshow("device: " + str(device_id), frame_with_boxes)

            # End camera capture with escape key
            interrupt_key = cv.waitKey(int(frame_wait_ms))
            if interrupt_key == 27:
                break

        # Cleanup
        capture_stream.close_camera_stream()
        capture_stream.destory_all_windows()
コード例 #5
0
    def test_generate_square_waypoints_around_prediction(self):
        self.cinematic_controller.person_predictor = PolynomialPredictor(
            weight_decay_factor=2)
        self.cinematic_controller.update_latest_drone_state(origin_drone_state)
        self.cinematic_controller.update_latest_bbs(
            [BoundingBox((10, 10), (0, 0))])
        time.sleep(1)
        self.cinematic_controller.update_latest_bbs(
            [BoundingBox((9, 9), (0, 0))])
        time.sleep(1)
        self.cinematic_controller.update_latest_bbs(
            [BoundingBox((8, 8), (0, 0))])
        time.sleep(1)
        self.cinematic_controller.update_latest_bbs(
            [BoundingBox((7, 7), (0, 0))])
        time.sleep(1)
        self.cinematic_controller.update_latest_bbs(
            [BoundingBox((6, 6), (0, 0))])
        self.cinematic_controller.set_waypoint_generator(
            NGonWaypointGenerator(n=4, radius=1))

        # Now ask for waypoints, which should describe a square around moving person
        waypoints = self.cinematic_controller.generate_waypoints()
        assert len(waypoints) == 4  # 4 points
        waypoint1, waypoint2, waypoint3, waypoint4 = waypoints
        # Check that the positions are correct
        allowed_error = 0.01
        assert waypoint1.get_position()[0] > 0
        assert abs(waypoint1.get_position()[1] + 1) < allowed_error
        assert waypoint2.get_position()[0] > waypoint1.get_position()[0]
        assert abs(waypoint2.get_position()[1]) < allowed_error
        assert waypoint3.get_position()[0] < waypoint2.get_position()[0]
        assert abs(waypoint3.get_position()[1] - 1) < allowed_error
        assert waypoint4.get_position()[0] < waypoint3.get_position()[0]
        assert abs(waypoint4.get_position()[1]) < allowed_error
        # Check that the drone yaw gets updated, too
        epsilon = 0.01  # How much mathematical error is allowed
        assert abs(waypoint1.get_attitude()[2] - math.pi / 2) < epsilon
        assert abs(waypoint2.get_attitude()[2] - math.pi) < epsilon
        assert abs(waypoint3.get_attitude()[2] - 3 * math.pi / 2) < epsilon
        assert abs(waypoint4.get_attitude()[2] - 2 * math.pi) < epsilon
コード例 #6
0
    def compute_low_pass_filter_bb(self):
        latest_width, latest_height = self.latest_bounding_box.get_dimensions()
        latest_x, latest_y = self.latest_bounding_box.get_centroid()

        smoothed_width, smoothed_height = self.smoothed_bounding_box.get_dimensions()
        smoothed_x, smoothed_y = self.smoothed_bounding_box.get_centroid()

        new_width = smoothed_width * self.bb_filter_gamma + latest_width * (1 - self.bb_filter_gamma)
        new_height = smoothed_height * self.bb_filter_gamma + latest_height * (1 - self.bb_filter_gamma)
        new_x = smoothed_x * self.bb_filter_gamma + latest_x * (1 - self.bb_filter_gamma)
        new_y = smoothed_y * self.bb_filter_gamma + latest_y * (1 - self.bb_filter_gamma)
        return BoundingBox((new_width, new_height), (new_x, new_y))
コード例 #7
0
    def detect_person(self, image_filepath, visualize_bb=False):
        image = cv2.imread(image_filepath)
        # Resize image.
        image = imutils.resize(image, width=min(400, image.shape[1]))
        orig = image.copy()
        # Detect people in the image.
        rects, weights = self.hog.detectMultiScale(image,
                                                   winStride=(4, 4),
                                                   padding=(8, 8),
                                                   scale=1.05)

        # Draw the original bounding boxes
        for (x, y, w, h) in rects:
            cv2.rectangle(orig, (x, y), (x + w, y + h), (0, 0, 255), 2)

        # apply non-maxima suppression to the bounding boxes using a
        # fairly large overlap threshold to try to maintain overlapping
        # boxes that are still people
        rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
        pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)

        # Draw the final bounding boxes
        for (xA, yA, xB, yB) in pick:
            cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2)

        # show some information on the number of bounding boxes
        print("[INFO] {}: {} original boxes, {} after suppression".format(
            image_filepath, len(rects), len(pick)))

        # Show the output images
        if visualize_bb:
            cv2.imshow("Before NMS", orig)
            cv2.imshow("After NMS", image)
            cv2.waitKey(2000)

        # Convert from the rectangle format used in visualization to BoundingBoxes.
        bounding_boxes = []
        for (xA, yA, xB, yB) in pick:
            centroid = (np.mean((xA, xB)), np.mean((yA, yB)))
            width = np.abs(xA - xB)
            height = np.abs(yA - yB)
            bb = BoundingBox((width, height), centroid)
            bounding_boxes.append(bb)

        return bounding_boxes
コード例 #8
0
    def test_generate_waypoints_bb_too_low(self):
        target_bb = self.waypoint_generator.get_target_bb()
        target_centroid = target_bb.get_centroid()
        low_bb_centroid = (target_centroid[0], target_centroid[1] - 10)
        low_bb = BoundingBox(target_bb.get_dimensions(), low_bb_centroid)
        self.cinematic_controller.update_latest_bbs([low_bb])
        self.cinematic_controller.update_latest_drone_state(origin_drone_state)
        self.cinematic_controller.set_waypoint_generator(
            FixedBBWaypointGenerator())

        # Now ask for waypoints, which should guide the drone to fly down
        waypoints = self.cinematic_controller.generate_waypoints()
        assert len(waypoints) == 1
        waypoint = waypoints[0]
        assert waypoint.get_position()[0] == 0
        assert waypoint.get_position()[1] == 0
        assert waypoint.get_position()[2] < 0  # Fly down.
        assert waypoint.get_attitude() == (0, 0, 0)
コード例 #9
0
    def test_generate_waypoints_bb_to_right(self):
        target_bb = self.waypoint_generator.get_target_bb()
        target_centroid = target_bb.get_centroid()
        right_bb_centroid = (target_centroid[0] + 10, target_centroid[1])
        right_bb = BoundingBox(target_bb.get_dimensions(), right_bb_centroid)
        self.cinematic_controller.update_latest_bbs([right_bb])
        self.cinematic_controller.update_latest_drone_state(origin_drone_state)
        self.cinematic_controller.set_waypoint_generator(
            FixedBBWaypointGenerator())

        # Now ask for waypoints, which should guide the drone to turn right to center the bounding box.
        waypoints = self.cinematic_controller.generate_waypoints()
        assert len(waypoints) == 1
        waypoint = waypoints[0]
        assert waypoint.get_position() == (0, 0, 0)
        assert waypoint.get_attitude()[0] == 0
        assert waypoint.get_attitude()[1] == 0
        assert waypoint.get_attitude()[2] < 0
コード例 #10
0
    def test_generate_yaw_waypoints_left(self):
        yaw_waypoint_generator = YawWaypointGenerator()
        self.cinematic_controller.set_waypoint_generator(
            yaw_waypoint_generator)
        target_x = yaw_waypoint_generator.get_target_centroid_x()
        target_centroid = (target_x, 0)
        left_bb_centroid = (target_centroid[0] - 10, target_centroid[1])
        left_bb = BoundingBox((10, 10), left_bb_centroid)
        self.cinematic_controller.update_latest_bbs([left_bb])
        self.cinematic_controller.update_latest_drone_state(origin_drone_state)

        # Now ask for waypoints, which should guide the drone to turn left to center the bounding box.
        waypoints = self.cinematic_controller.generate_waypoints()
        assert len(waypoints) == 1
        waypoint = waypoints[0]
        assert waypoint.get_position() == (0, 0, 0)
        assert waypoint.get_attitude()[0] == 0
        assert waypoint.get_attitude()[1] == 0
        assert waypoint.get_attitude()[2] > 0
コード例 #11
0
def main():
    # create parser instance
    config = ConfigParser()

    # Read detection.cfg
    config.read('config/detection.cfg')
    crop = config.get('blob', 'crop')
    swap_rb = config.get('blob', 'swap_rb')
    scale_factor = config.get('blob', 'scale_factor')
    output_width = config.get('blob', 'output_width')
    output_length = config.get('blob', 'output_length')
    confidence = config.get('blob', 'confidence')
    threshold = config.get('blob', 'threshold')
    yolov_labels = config.get('yolov3', 'yolov3_labels')
    yolov_weights = config.get('yolov3', 'yolov3_test_weights_file')
    yolov_config = config.get('yolov3', 'yolov3_config_file')
    input_image = config.get('image', 'input_image')
    output_image = config.get('image', 'output_image')

    bounding_box = BoundingBox(scale_factor, output_length, output_width,
                               swap_rb, crop, yolov_labels, yolov_config,
                               yolov_weights, confidence, threshold)

    # Read frame
    frame = cv.imread(input_image)
    input_height, input_width = frame.shape[:2]
    blob = bounding_box.get_blob(frame)
    output_layers = bounding_box.get_output_layers(blob)

    # Get and draw bounding boxes
    boxes, confidences, class_identifiers = bounding_box.get_bounding_boxes(
        output_layers, input_height, input_width)
    suppression = bounding_box.suppress_weak_overlapping_boxes(
        boxes, confidences)
    frame_with_boxes = bounding_box.draw_boxes_labels(frame, boxes,
                                                      confidences,
                                                      class_identifiers,
                                                      suppression)

    # Write resulting frame
    cv.imwrite(output_image, frame_with_boxes)
    print("Wrote image to: {}".format(output_image))

    # Cleanup
    cv.destroyAllWindows()
コード例 #12
0
    def create_bb_from_tf_result(image,
                                 output_dict,
                                 labels,
                                 detect_thresh=0.5):
        imheight, imwidth, _ = image.shape
        for ix, score in enumerate(output_dict['detection_scores']):
            #max_score = max(score)  # FIXME: this is bad
            if score > detect_thresh:
                [ymin, xmin, ymax, xmax] = output_dict['detection_boxes'][ix]
                classid = output_dict['detection_classes'][ix]
                classname = labels[classid]

                if classid == 1:  # refers to person

                    dimensions = ((xmax - xmin) * imwidth,
                                  (ymax - ymin) * imheight)
                    centroid = (np.mean([xmax * imwidth, xmin * imwidth]),
                                np.mean([ymax * imheight, ymin * imheight]))
                    bb = BoundingBox(dimensions, centroid)
                    return bb
コード例 #13
0
    def test_generate_waypoints_too_close(self):
        # Give a bounding box for now that is too close (but is centered correctly)
        target_bb = self.waypoint_generator.get_target_bb()
        target_dimensions = target_bb.get_dimensions()
        larger_bb_dimensions = (target_dimensions[0] * 2,
                                target_dimensions[1] * 2)
        larger_bb = BoundingBox(larger_bb_dimensions, target_bb.get_centroid())
        self.cinematic_controller.update_latest_bbs([larger_bb])
        self.cinematic_controller.update_latest_drone_state(origin_drone_state)
        self.cinematic_controller.set_waypoint_generator(
            FixedBBWaypointGenerator())

        # Now ask for waypoints, which should guide the drone to back up to make the box smaller.
        waypoints = self.cinematic_controller.generate_waypoints()
        assert len(waypoints) == 1
        waypoint = waypoints[0]
        assert waypoint.get_position()[0] < 0  # X component is now negative
        assert waypoint.get_position()[1] == 0
        assert waypoint.get_position()[2] == 0
        assert waypoint.get_attitude() == (0, 0, 0)
コード例 #14
0
    def test_generate_waypoints_too_far(self):
        # Give a bounding box for now that is too far (but is centered correctly)
        target_bb = self.waypoint_generator.get_target_bb()
        target_dimensions = target_bb.get_dimensions()
        larger_bb_dimensions = (target_dimensions[0] * 0.5,
                                target_dimensions[1] * 0.5)
        larger_bb = BoundingBox(larger_bb_dimensions, target_bb.get_centroid())
        self.cinematic_controller.update_latest_bbs([larger_bb])
        self.cinematic_controller.update_latest_drone_state(origin_drone_state)
        self.cinematic_controller.set_waypoint_generator(
            FixedBBWaypointGenerator())

        # Now ask for waypoints, which should guide the drone to go forward to make the box bigger
        waypoints = self.cinematic_controller.generate_waypoints()
        assert len(waypoints) == 1
        waypoint = waypoints[0]
        assert waypoint.get_position(
        )[0] > 0  # X component is now positive because the drone flew forward along x.
        assert waypoint.get_position()[1] == 0
        assert waypoint.get_position()[2] == 0
        assert waypoint.get_attitude() == (0, 0, 0)
コード例 #15
0
from utils.bounding_box import BoundingBox
from utils.prepare_result import prepareResultFrom
from utils.plotter import Plotter
import matplotlib.pyplot as plt

# params
relativeSize = 0.06
uncommonDimensions = 0
closenessThreshold = 1.5

# obtain the data sets from the csv files
non_time_series_datasets = getDatasetsFromFolder(
    getNonTimeSeriesDatasetsPath())
# data context
dataContext = [
    BoundingBox(minimun=-2, maximun=2),
    BoundingBox(minimun=-2, maximun=2)
]

# iterate over the data sets
for datIndx in range(len(non_time_series_datasets)):
    # new dyclee for each data set
    dyclee = Dyclee(dataContext=dataContext,
                    relativeSize=relativeSize,
                    uncommonDimensions=uncommonDimensions,
                    closenessThreshold=closenessThreshold)
    # start
    X = non_time_series_datasets[datIndx]['dataset']
    dName = non_time_series_datasets[datIndx]['name']
    k = non_time_series_datasets[datIndx]['k']
    baseFolder = getClusteringResultsPath() + dName + '/'
コード例 #16
0
 def __init__(self, target_bb=BoundingBox((10, 10), (20, 20))):
     self.target_bb = target_bb
     # This number should be really small, since we're converting area in pixels to a displacement in meters.
     self.area_gain = 0.0005
     self.z_gain = 0.5
     self.yaw_gain = 0.5
コード例 #17
0
    fig.canvas.manager.window.showMaximized()
    fig.suptitle(title, fontweight="bold")
    # show
    if not generate_gif:
        plt.show()  # plot figure to see resutls; for testing
    return fig


# fetch real data set
dataset = pd.read_csv(getTimeSeriesDatasetsPath() + getRealDatasetName() + ".csv", sep=',')
dataset['to_timestamp'] = dataset['to_timestamp'].astype('datetime64[ns]')
dataset = dataset[dataset['to_timestamp'].apply(validate_timestamp, date = datetime.datetime(2018, 10, 2, 10, 30))]

# data context
dataContext = [ # hardcodeamos los límites de sweden
                BoundingBox(minimun=11.0273686052 , maximun=23.9033785336),
                BoundingBox(minimun=55.3617373725, maximun=69.1062472602),
            ]
# dyclee
dyclee = Dyclee(dataContext = dataContext, relativeSize = params["relativeSize"], speed = params["speed"], lambd = params["lambd"],
                periodicRemovalAt = params["periodicRemovalAt"], periodicUpdateAt = params["periodicUpdateAt"],
                timeWindow = params["timeWindow"], closenessThreshold = params["closenessThreshold"])

tGlobal = 6000 # siguiendo lo del notebook, estoy tomando casi cada media hs
ac = 0 # represents amount of processed elements

folder = getClusteringResultsPath() + getRealDatasetName() + '/' + getDycleeName() + '/' # for saving the prepared resuls
if will_calculate_DBCV:
    resetStorage(folder) 

first = True # first clustering; 'end' picture will be added at the beggining (to understand the loop)
コード例 #18
0
from cinematic_waypoints.cinematic_controller import CinematicController
from cinematic_waypoints.waypoint_visualization import WaypointVisualization
from cinematic_waypoints.waypoint_generator.fixed_bb_waypoint_generator import FixedBBWaypointGenerator
from cinematic_waypoints.waypoint_generator.yaw_waypoint_generator import YawWaypointGenerator
from cinematic_waypoints.waypoint_generator.ngon_waypoint_generator import NGonWaypointGenerator
from utils.bounding_box import BoundingBox
from utils.drone_state import DroneState
from utils.environment import Environment
from utils.person_state import PersonState
from person_detection.person_predictor.polynomial_predictor import PolynomialPredictor
import math
import os
import time

# Define a few helpful variables common across a few tests
bb_10_10_0_0 = BoundingBox((10, 10), (0, 0))
bb_10_20_0_0 = BoundingBox((10, 20), (0, 0))
bb_20_10_0_0 = BoundingBox((20, 10), (0, 0))
bb_20_20_0_0 = BoundingBox((20, 20), (0, 0))
# Will often use this state as a starting point. Drone is at rest at the origin, facing along the +x direction.
# Tests may have to be updated if the understanding of yaw changes.
origin_drone_state = DroneState(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)


# Class for testing CinematicController class.
# Remember to start each test method with the name "test_..."
class TestCinematicController(unittest.TestCase):
    # Before each test, create a brand new cinematic controller to play with.
    def setUp(self):
        self.waypoint_generator = FixedBBWaypointGenerator()
        self.gamma = 0.9
コード例 #19
0
from utils.bounding_box import BoundingBox
from utils.clustering_managers.non_time_series_clustering_mngr import NonTimeseriesClusteringMngr

# non time series ----------------------------------
# dataContext
dataContext = [
    BoundingBox(minimun=-3, maximun=3),
    BoundingBox(minimun=-3, maximun=3)
]

print("\n" + "Non time series clustering")
nonTimeSeriesClusMngr = NonTimeseriesClusteringMngr(dataContext)
nonTimeSeriesClusMngr.main()
コード例 #20
0
from utils.bounding_box import BoundingBox
from utils.clustering_managers.time_series_mngrs.denstream_clustering_manager import DenstreamClusteringManager
from utils.clustering_managers.time_series_mngrs.clustream_clustering_manager import ClustreamClusteringManager
from utils.clustering_managers.time_series_mngrs.dyclee_clustering_manager import DycleeClusteringManager

# time series ----------------------------------
# dataContext
dataContext = [BoundingBox(minimun=-2 , maximun=2),
               BoundingBox(minimun=-2 , maximun=2)]

# print("\n" + "DenStream")
denStreamClusMngr = DenstreamClusteringManager(dataContext)
denStreamClusMngr.main()

print("\n" + "CluStream")
cluStreamClusMngr = ClustreamClusteringManager(dataContext)
cluStreamClusMngr.main()

print("\n" + "DyClee")
dyCleeClusMngr = DycleeClusteringManager(dataContext)
dyCleeClusMngr.main()


def main():
    # create parser instance
    config = ConfigParser()

    # Read detection.cfg
    config.read('config/detection.cfg')
    crop = config.get('blob', 'crop')
    swap_rb = config.get('blob', 'swap_rb')
    scale_factor = config.get('blob', 'scale_factor')
    output_width = config.get('blob', 'output_width')
    output_length = config.get('blob', 'output_length')
    confidence = config.get('blob', 'confidence')
    threshold = config.get('blob', 'threshold')
    yolov_labels = config.get('yolov3', 'yolov3_labels')
    yolov_weights = config.get('yolov3', 'yolov3_test_weights_file')
    yolov_config = config.get('yolov3', 'yolov3_config_file')
    input_video = config.get('video', 'input_video')
    output_video = config.get('video', 'output_video')
    frames_per_second = int(config.get('video', 'frames_per_second'))

    # convert string to boolean
    if config.get('video', 'color') == 'True':
        color = True
    else:
        color = False

    bounding_box = BoundingBox(scale_factor, output_length, output_width,
                               swap_rb, crop, yolov_labels, yolov_config,
                               yolov_weights, confidence, threshold)

    video_stream = cv.VideoCapture(input_video)
    writer = None

    # Obtain video frame metadata
    try:
        if imutils.is_cv2():
            prop = cv.cv.CV_CAP_PROP_FRAME_COUNT
        else:
            prop = cv.CAP_PROP_FRAME_COUNT
        total = int(video_stream.get(prop))
        print("Total frames in video: {}".format(total))
    except:
        print("Could not determine # of frames in video")
        total = -1

    frame_count = 1
    while True:
        # Read frame
        ret_val, frame = video_stream.read()
        if not ret_val:
            break
        start_time = time.time()
        input_height, input_width = frame.shape[:2]

        # Get and draw bounding boxes
        blob = bounding_box.get_blob(frame)
        output_layers = bounding_box.get_output_layers(blob)
        boxes, confidences, class_identifiers = bounding_box.get_bounding_boxes(
            output_layers, input_height, input_width)
        suppression = bounding_box.suppress_weak_overlapping_boxes(
            boxes, confidences)
        frame_with_boxes = bounding_box.draw_boxes_labels(
            frame, boxes, confidences, class_identifiers, suppression)
        end_time = time.time()

        # Write frame
        if writer is None:
            four_cc = cv.VideoWriter_fourcc(*"MJPG")
            writer = cv.VideoWriter(output_video, four_cc, frames_per_second,
                                    (frame.shape[1], frame.shape[0]), color)
            if total > 0:
                elapsed_time = (end_time - start_time)
                print(
                    "Single frame took:  {:.4f} seconds".format(elapsed_time))
                print("Estimated total time to finish: {:.4f} seconds".format(
                    elapsed_time * total))

        if frame_count % 10 == 0:
            print("Total frames written: {}".format(frame_count))

        frame_count += 1
        writer.write(frame_with_boxes)

    print("Wrote video to: {}".format(output_video))

    # Cleanup
    video_stream.release()
    cv.destroyAllWindows()