Esempio n. 1
0
    def tracker(self):
        if not self._tracker:
            Logger.info("Tracker: initialization")
            from tracking import Tracker

            self._tracker = Tracker()
        return self._tracker
Esempio n. 2
0
def create_track_statistics(cur, con, query, params, log=True):
    tracker = Tracker(con)
    trackings = []
    time_values = []
    for i, param_set in enumerate(params):
        tracker.clear_track()
        start = time.time()
        cur.execute(query.format(*param_set))
        _ = cur.fetchone()
        end = time.time()
        time_values.append((end - start))
        trackings.append(tracker.get_tracking())
        if log:
            print(str(round((i * 100) / len(params), 2)) + '%', end='\r')
    return trackings, time_values
Esempio n. 3
0
    def __init__(self, host='0.0.0.0', port=8000):

        print("[+] Initializing Communication")

        self.status = [-1, -1, -1, -1, -1]
        self.rssi = [0, 0, 0, 0, 0]
        self.host = host
        self.port = port

        self.server = None

        self.tracker = Tracker()

        self.connection_thread = threading.Thread(target=self.connect)
        self.connection_thread.start()

        self.to_loop = threading.Thread(target=self.timeout_loop)
        self.to_loop.start()
Esempio n. 4
0
def main():
    args = parse_args()
    tracker = Tracker(path=args.path,
                      showResult=True,
                      saveData=args.saveData,
                      border=10)
    cap = cv2.VideoCapture(args.input)
    bbox = None
    objectID = args.id

    if args.output is not None:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter(os.path.join(args.output, args.id + '.avi'),
                              fourcc, 30.0, (1280, 720))

    while True:
        ret, frame = cap.read()

        if not ret:
            break

        frame = cv2.resize(frame, dsize=(1280, 720))

        key = cv2.waitKey(1) & 0xFF

        tracked_frame, bbox = tracker.update(frame, objectID, bbox)

        cv2.imshow('image', frame)
        if args.output is not None:
            out.write(frame)

        if key == ord('s'):
            bbox = cv2.selectROI('image', frame, fromCenter=False)
            tracker.init(frame, bbox)
            print('Selected {}'.format(bbox))
            continue
        if key == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
    if args.output is not None:
        out.release()
Esempio n. 5
0
    def __init__(self, n_features=1000, t=38):
        self.matcher = BFMatcher()
        self.extractor = ORB_Extractor(n_features)
        self.descriptors = []

        self.t = t  # minimum distance metric for matching
        self.set_max_features(n_features)
        self.features = []  # main feature set

        # Image size and center coordinates
        self.h = 0
        self.w = 0
        self.origin_y = 0
        self.origin_x = 0

        self.profiles = []
        self.profile = None
        self.pid = 0

        self.features = []
        self.color_ranges = []
        self.colors = []
        self.weights = []

        self.orb_detect = True
        self.lk_track = True
        self.tracker = Tracker()

        self.time = 0
        self.prev_detection = 0  # time (frame) of previous detection

        # PARAMETERS
        self.DETECTION_THRESHOLD = 14  # Confidence threshold for detection
        self.LOCK_THRESHOLD = 22  # Confidence threshold to activate tracking

        self.CYCLE_INTERVAL = 10  # Frames without detection before switching profiles
        self.TRACK_INTERVAL = 10  # Frames to perform tracking of detected points (after hard lock)
Esempio n. 6
0
trackers = []
trackables = {}

#file_name = './video/test3.mp4'
output_name = './output/output_test4.mp4'

# Load Yolo
net = cv2.dnn.readNet("./model/yolov4-tiny.weights", "./cfg/yolov4-tiny.cfg")
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]

net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
# initialize Tracker
tracker = Tracker()

# initialize the video writer
writer = None
writer_frame_count = 0
writer_flag = 0
videonumber = 0
fourcc = cv2.VideoWriter_fourcc(*"mp4v")

#gst_out2 = "appsrc ! video/x-raw ! videoconvert ! x264enc tune=zerolatency bitrate=100 speed-preset=superfast ! rtph264pay config-interval=1 ! udpsink host=192.168.0.46 port=10000 sync=false"

#writer2 =cv2.VideoWriter(gst_out2,cv2.CAP_GSTREAMER,0,float(),(640,480),True)

def writeFrame(img):
    # use global variable, writer
    global writer
Esempio n. 7
0

def piStim():
    GPIO.output(ttlPin, True)
    sleep(0.5)
    GPIO.output(ttlPin, False)


def rpiCallBack():
    global previousTime
    if time() > (previousTime + refractoryPeriod):
        previousTime = time()
        p = Process(target=piStim)
        p.daemon = True
        p.start()


tracker = Tracker(destFilePath='/home/pi/testTrack.mpg',
                  threshold=thrsh,
                  teleportationThreshold=1000,
                  plot=True,
                  fast=False,
                  minArea=50,
                  bgStart=5,
                  trackFrom=10,
                  trackTo=10000,
                  callback=rpiCallBack)
positions = tracker.track(roi=roi, record=True)

GPIO.cleanup(ttlPin)
Esempio n. 8
0
    # ROI
    if args.center and args.radius:
        roi = Circle(args.center, args.radius)
    else:
        roi = None

    ############################### TRACKING ########################################
    tracker = Tracker(srcFilePath=args.videoFile,
                      destFilePath=None,
                      threshold=args.threshold,
                      minArea=args.minArea,
                      maxArea=args.maxArea,
                      teleportationThreshold=args.teleportationThreshold,
                      bgStart=args.bgTime,
                      trackFrom=args.trackFrom,
                      trackTo=args.trackTo,
                      nBackgroundFrames=args.nBackgroundFrames,
                      nSds=args.nSds,
                      clearBorders=args.clearBorders,
                      normalise=False,
                      plot=args.plot,
                      fast=config['tracker']['fast'],
                      extractArena=False)
    positions = tracker.track(roi=roi)

    ################################ ANALYSIS ########################################
    os.chdir(destFolder)
    positions = filterPositions(
        positions, args.oneDKernel) if args.oneDKernel else positions
    samplingFreq = 1.0 / tracker._stream.fps