Exemplo n.º 1
0
    def simple_synchronous():
        """
        this function is the most simple version of CV
        - no tracking
        - no multiprocessing/async/multithreading
        """
        for counter in count(start=0,
                             step=1):  # counts up infinitely starting at 0
            # get the latest image from the camera
            frame = get_latest_frame()
            if frame is None:
                print(
                    "assuming the video stream ended because latest frame is None"
                )
                return

            # run the model
            boxes, confidences, classIDs = modeling.get_bounding_boxes(
                frame, confidence, threshold)

            # figure out where to aim
            x, y = aiming.aim(boxes)

            # optional value for debugging/testing
            if not (on_next_frame is None):
                on_next_frame(counter, frame, (boxes, confidences), (x, y))

            # send data to embedded
            embedded_communication.send_output(x, y)
Exemplo n.º 2
0
def debug_each_frame(frame_index, frame, model_ouput, aiming_output):
    """
    this function is designed to be called every time main() processes a frame
    its only purpose is to bundle all of the debugging output
    """

    print(f'processing frame {frame_index}')

    # extract the output
    boxes, confidences = model_ouput
    x, y = aiming_output

    # load/show the image
    image = Image(frame)

    for each in boxes:
        image.add_bounding_box(each)

    if PARAMETERS["testing"]["save_frame_to_file"]:
        frames.append(image.img)

    if PARAMETERS["testing"]["open_each_frame"]:
        # NOTE: this currently fails in nix-shell on Mac with error message:
        #     qt.qpa.plugin: Could not find the Qt platform plugin "cocoa" in ""
        #     its fixable, see "qt.qpa.plugin: Could not find the Qt" on https://nixos.wiki/wiki/Qt
        #     but its hard to fix
        image.show("frame")

    # allow user to quit by pressing q (at least I think thats what this checks)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        exit(0)
Exemplo n.º 3
0
 def update(self, image):
     # Attempts to update the object's location
     ok, location = self.tracker.update(image)
     print("TRACKER CONTINUE", ok)
     if ok == False:
         self.exists = False
     # Returns the location if the location was updated, otherwise None
     return location if ok else None
Exemplo n.º 4
0
 def __init__(self, port, baudrate):
     if port is None or port == 0:
         self.port = None  # disable port
         print(
             'Embedded Communication: Port is set to None. No communication will be established.'
         )
     else:
         self.port = Serial(port, baudrate=baudrate, timeout=3.0)
Exemplo n.º 5
0
    def run(self):
        """
        Continuously read and save current input from camera
        """
        cam = cv2.VideoCapture(self.input_source)
        while True:
            # Wait for a coherent pair of frames: depth and color
            ok, frame = cam.read()
            self.frame = frame
            print(self.frame.shape)

            cv2.waitKey(1)
            if self.debug_mode:
                current_time = time.time()
                print('FPS: ', 1 / (current_time - last_time))
                last_time = current_time
    def modelMulti(frame, confidence, threshold, best_bounding_box, track,
                   model, betweenFrames, collectFrames):
        #run the model and update best bounding box to the new bounding box if it exists, otherwise keep tracking the old bounding box
        boxes, confidences, classIDs, frame = model.get_bounding_boxes(
            frame, confidence, threshold)
        potentialbbox = track.init(frame, boxes)

        for f in range(len(betweenFrames)):
            if potentialbbox is None:
                break
            potentialbbox = track.update(betweenFrames[f])
            print(potentialbbox)

        best_bounding_box[:] = potentialbbox if potentialbbox else [
            -1, -1, -1, -1
        ]
        betweenFrames[:] = []
        collectFrames.value = False
Exemplo n.º 7
0
    def init(self, image, bboxes):
        # creates the tracker and returns None if there are no bounding boxes to track
        self.tracker = cv2.TrackerKCF_create()
        print("inside init for KCF")
        print(bboxes)
        if len(bboxes) == 0:
            return None
        # Finds the coordinate for the center of the screen
        center = (image.shape[1] / 2, image.shape[0] / 2)

        # Makes a dictionary of bounding boxes using the bounding box as the key and its distance from the center as the value
        bboxes = {
            tuple(bbox):
            self.distance(center,
                          (bbox[0] + bbox[2] / 2, bbox[1] + bbox[3] / 2))
            for bbox in bboxes
        }

        # Finds the centermost bounding box
        bbox = min(bboxes, key=bboxes.get)

        # Attempts to start the tracker
        self.tracker.init(image, bbox)
        self.exists = True
        print("TRACKER INIT")

        # returns the tracked bounding box if tracker was successful, otherwise None
        return bbox
Exemplo n.º 8
0
def init(image, bboxes, video = []):
    global tracker
    # creates the tracker and returns None if there are no bounding boxes to track
    tracker = TrackerKCF_create()
    print("inside init for KCF")
    print(bboxes)
    if len(bboxes) == 0:
        return None
    # Finds the coordinate for the center of the screen
    center = (image.shape[1] / 2, image.shape[0] / 2)

    # Makes a dictionary of bounding boxes using the bounding box as the key and its distance from the center as the value
    bboxes = {tuple(bbox): distance(center, (bbox[0] + bbox[2] / 2, bbox[1] + bbox[3] / 2)) for bbox in bboxes}

    # Finds the centermost bounding box
    bbox = min(bboxes, key=bboxes.get)

    # Attempts to start the tracker
    ok = tracker.init(image, bbox)
    print(ok)
    
    # returns the tracked bounding box if tracker was successful, otherwise None
    return bbox if ok else None
Exemplo n.º 9
0
    # allow user to quit by pressing q (at least I think thats what this checks)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        exit(0)


#
# setup main(s)
#
simple_synchronous, synchronous_with_tracker = setup(
    # comment out lines (arguments) below to get closer
    # and closer to realistic output
    get_latest_frame=
    get_next_video_frame,  # can be swapped with get_latest_video_frame
    on_next_frame=debug_each_frame,
    modeling=test_modeling,
    tracker=test_tracking,
    aiming=test_aiming
    # send_output=simulated_send_output, # this should be commented in once we actually add aiming
)

#
# run mains (with simulated values)
#
print('Starting synchronous_with_tracker() with simulated IO')
synchronous_with_tracker()

# save all the frames as a video
print("Starting process of saving frames to a video file")
Video.create_from_frames(frames, save_to=PATHS["video_output"])
print(f"video output has been saved to {PATHS['video_output']}")
#
# setup main(s)
#
simple_synchronous, synchronous_with_tracker, multiprocessing_with_tracker = setup(
    # comment out lines (arguments) below to get closer
    # and closer to realistic output
    get_frame=get_next_video_frame
    if PARAMETERS['videostream']['testing']['grab_frame'] == 0 else
    get_latest_video_frame,  # 0 means grab next frame, 1 means grab latest frame
    on_next_frame=debug_each_frame,
    modeling=test_modeling,
    tracker=test_tracking,
    aiming=test_aiming
    # send_output=simulated_send_output, # this should be commented in once we actually add aiming
)

#
# run mains (with simulated values)
#
main_function = PARAMETERS['testing']['main_function']
if main_function == 0:
    simple_synchronous()
elif main_function == 1:
    synchronous_with_tracker()
else:
    multiprocessing_with_tracker()

# save all the frames as a video
print("Starting process of saving frames to a video file")
Video.create_from_frames(frames, save_to=PATHS["video_output"])
print(f"video output has been saved to {PATHS['video_output']}")
Exemplo n.º 11
0
import cv2
import pdb, time
from toolbox.globals import ENVIRONMENT, PATHS, PARAMETERS, print

# parameters
# TODO: put this in the info.yaml
SHOW_IMAGES = False

# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 60)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 60)
profile = pipeline.start(config)
depth_scale = profile.get_device().first_depth_sensor().get_depth_scale()
print(depth_scale)

# Start streaming
try:
    while True:
        start_t = time.time()
        # Wait for a coherent pair of frames: depth and color
        frames = pipeline.wait_for_frames()
        color_frame = frames.get_color_frame()
        align = rs.align(rs.stream.color)
        frames = align.process(frames)

        depth_frame = frames.get_depth_frame()
        if depth_frame and color_frame:
            print('ok')
Exemplo n.º 12
0
import numpy as np
import cv2
# relative imports
from toolbox.globals import ENVIRONMENT, PATHS, PARAMETERS, print

# TODO: I don't know what this number is, and we should probably figure it out
MAGIC_NUMBER_1 = 416

#
# init the model
#
print("[INFO] loading YOLO from disk...")
net = cv2.dnn.readNetFromDarknet(PATHS["model_config"], PATHS["model_weights"])
layer_names = net.getLayerNames()
output_layer_names = [
    layer_names[index[0] - 1] for index in net.getUnconnectedOutLayers()
]
W, H = None, None


def get_bounding_boxes(frame, iconfidence, ithreshold):
    """
    ex: boxes, confidences, class_ids = get_bounding_boxes(frame, 0.5, 0.3)
    
    @frame: should be an cv2 image (basically a numpy array)
    @iconfidence: should be a value between 0-1
    @ithreshold: should be a value between 0-1
    -
    @@returns:
    - a tuple containing
        - a list of bounding boxes, each formatted as (x, y, width, height)
import time
# relative imports
from toolbox.video_tools import Video
from toolbox.globals import ENVIRONMENT, PATHS, PARAMETERS, print

# 
# initilize
# 
test_video = Video(PATHS["main_test_video"])
print('Loading all frames into ram for simulated testing')
all_frames = list(test_video.frames())
print(f'Found {len(all_frames)} frames')
start_time = None
framerate = PARAMETERS["videostream"]["testing"]["assumed_framerate"]

def get_latest_video_frame():
    # kick of the start time if hasn't started yet
    global start_time
    if start_time == None:
        start_time = time.time()
    
    # figure out which frame should be retrieved based on the elapsed time
    seconds_since_start = time.time() - start_time 
    which_frame = int(seconds_since_start * framerate)
    
    # get that frame from the list of all frames
    if which_frame < len(all_frames):
        return all_frames[which_frame]
    else:
        return None