Пример #1
0
    def __init__(self):
        """
        """

        # Public
        self.logger = logging.getLogger(__name__)

        # Private
        self._send_message = None
        self._config = None
        self._net = None
        self._out = None
        self._H = None
        self._W = None
        self._size = None
        self._cap = None
        self._processed_frame = None
        self._frame = None
        self._filepath = ""
        self._started = False
        self._recording = False
        self._track = True
        self._ct = centroidtracker.CentroidTracker()
        self._frame_lock = threading.Lock()
        self._process_lock = threading.Lock()
        self._record_lock = threading.Lock()
        self._capture_thread = threading.Thread(
            target=self._read_frame, args=())
        self._process_thread = threading.Thread(
            target=self._process_frame, args=())

        self.logger.debug("__init__() returned")
        return None
Пример #2
0
    def __init__(self, name, port):
        self.msg_bus = MessageBus(name, port, 'controller')
        self.msg_bus.register_callback('join', self.handle_message)
        self.msg_bus.register_callback('img', self.handle_message)
        self.msg_bus.register_callback('img_metadata', self.handle_message)
        self.msg_bus.register_callback('img_tracking', self.handle_message)
        self.model = None
        signal.signal(signal.SIGINT, self.signal_handler)
        self.logfile = None
        self.logfile2 = None
        self.logfile3 = None
        self.dalgorithm = "yolo"
        self.starttime = 0.0
        self.endtime = 0.0
        self.cpu = None
        self.ram = None
        self.label_path = None
        self.classes = None
        self.cuda = None
        self.colors = None
        self.input_size = None
        self.confidence = None
        self.nms_thresh = None
        self.net = None # load caffe model
        self.framecnt = 0
        self.gettimegap()
        self.tr = None
        self.encode_param = None

        self.imgq = queue.Queue(2000) # q for image.
        self.timerq = queue.Queue(2000) # q for image wait time
        self.framecntq = queue.Queue(2000) # q for frame cnt
        self.dev_nameq = queue.Queue(2000) # q for devnames
        self.typeq= queue.Queue(2000) # q for type of msg
        #self.image_dequeue_proc()

        self.ct = centroidtracker.CentroidTracker(50, maxDistance =50, queuesize = 10)
        self.frame_skips = None # how many frames should be skipped before detection 
        self.trackers = []
        self.trobs = {}
        self.boundary ={}
        self.objstatus ={}
        self.where={}
        self.framethr = 50
        self.sumframebytes = 0
        self.movingdelta = 0
        self.futuresteps = 0
        self.frame_skip = 10
Пример #3
0
def smooth_objects(all_frames) -> List[List[ObjectFrame]]:
    tracker_list = []
    # Since we assume that objects cannot change types, we separately run the
    # object tracking algorithm for each object type
    obj_classes = [obj['name'] for frame in all_frames for obj in frame]

    # Initialize a centroid tracker for each object type
    trackers = {
        obj_type: centroidtracker.CentroidTracker(maxDisappeared=15)
        for obj_type in set(obj_classes)
    }

    for (frame_idx, frame) in enumerate(all_frames):

        new_frame_list = []
        for obj_type in set(obj_classes):
            trackers[obj_type].update([
                obj['box_points'] for obj in frame if obj['name'] is obj_type
            ])  # Update the centroid tracker

            for (ID, centroid) in trackers[obj_type].objects.items():
                # new_ID = obj_type + '_' + str(ID) # Concatenate object type with object ID
                for obj in frame:
                    if obj['name'] is obj_type:
                        # Since we need to output (ID, bounding box) and Centroid Tracker doesn't record bounding boxes
                        # match each ID with its bounding box by centroid; this solution implicitly assumes each object of a
                        # specified type within each frame has a distinct centroid
                        obj_centroid = calc_centroid(obj['box_points'])
                        size = calc_size(obj['box_points'])
                        if max(abs(obj_centroid[0] - centroid[0]),
                               abs(obj_centroid[1] -
                                   centroid[1])) < sys.float_info.epsilon:
                            new_frame_list.append(
                                ObjectFrame(obj_type, ID, obj_centroid, size))

        tracker_list.append(new_frame_list)

    return tracker_list
from utils.camera import add_camera_args, Camera
from utils.od_utils import read_label_map, build_trt_pb, load_trt_pb, \
                           write_graph_tensorboard, detect
#from utils.visualization import BBoxVisualization
import math

ALPHA = 0.5
FONT = cv2.FONT_HERSHEY_PLAIN
TEXT_SCALE = 1.0
TEXT_THICKNESS = 1
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)

args = None

ct = ot.CentroidTracker()
temp = []

# Constants
DEFAULT_MODEL = 'ssd_mobilenet_v1_coco'
DEFAULT_LABELMAP = 'third_party/models/research/object_detection/' \
                   'data/mscoco_label_map.pbtxt'
WINDOW_NAME = 'Guardian'
BBOX_COLOR = (0, 255, 0)  # green


def parse_args():
    """Parse input arguments."""
    desc = ('This script captures and displays live camera video, '
            'and does real-time object detection with TF-TRT model '
            'on Jetson TX2/TX1/Nano')
# otherwise, grab a reference to the video file
else:
	print("[INFO] opening video file...")
	vs = cv2.VideoCapture(args["input"])

# initialize the video writing process object (we'll instantiate
# later if need be) along with the frame dimensions
writerProcess = None
W = None
H = None

# instantiate our centroid tracker, then initialize a list to store
# each of our dlib correlation trackers, followed by a dictionary to
# map each unique object ID to a trackable object
ct = centroidtracker.CentroidTracker(maxDisappeared=15, maxDistance=100)
trackers = []
trackableObjects = {}

# initialize the direction info variable (used to store information
# such as up/down or left/right people count)
directionInfo = None

# initialize the MOG foreground background subtractor and start the
# frames per second throughput estimator
mog = cv2.createBackgroundSubtractorMOG2()
fps = FPS().start()

# loop over frames from the video stream
while True:
	# grab the next frame and handle if we are reading from either
        # increment the total number of frames examined during the
        # start and end intervals
        self._numFrames += 1

    def elapsed(self):
        # return the total number of seconds between the start and
        # end interval
        return (self._end - self._start).total_seconds()

    def fps(self):
        # compute the (approximate) frames per second
        return self._numFrames / self.elapsed()


# initialize our centroid tracker and frame dimensions
ct = centroidtracker.CentroidTracker()
(H, W) = (None, None)
print(args["prototxt"], args["model"])

# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])

# initialize the video stream and allow the camera sensor to warmup
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
fps = FPS().start()

# loop over the frames from the video stream
for i in range(300):