class TLD_IVMIT:
    def __init__(self, frame, window, init_frames_count = 20):
        self.buffer = [cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)]
        self.position = Position(self.buffer, *window)
        self.learning_component = LearningComponent(self.position.calculate_patch())
        self.detector = Detector(self.position, self.learning_component)
        self.tracker = Tracker(self.position)
        self.is_visible = True
        self.integrator = Integrator(self.learning_component)
        self.init_frames_count = init_frames_count
        self.detected_windows = None
        self.tracked_window = None

    def start(self, frame):
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        if self.init_frames_count == 0:
            start = time()
            self.tracked_window = self.tracker.track(frame, self.position)
            self.buffer[0] = frame
            print "Tracking:", time()- start

            start = time()
            self.detected_windows = self.detector.detect(self.position, self.tracked_window is not None)
            print "Detected windows count:", len(self.detected_windows)
            print "Detection:", time()- start

            start = time()
            # filtered_detected_windows = [(window, patch, proba) for window, patch, proba in self.detected_windows if proba > 0.7]
            single_window, self.is_visible = self.integrator.get_single_window(self.position, self.detected_windows, self.tracked_window)
            print "Integration:", time()- start

            if self.is_visible:
                self.position.update(*single_window)
            # start = time()
            # self.learning_component.n_expert()
            # self.learning_component.p_expert()
            # print "Update training set:", time()- start
        else:
            self.tracked_window = self.tracker.track(frame, self.position)
            self.buffer[0] = frame
            if self.tracked_window is not None:
                i = 0
                while i < 5:
                    self.position.update(x=np.random.randint(0,self.buffer[0].shape[1]-self.position.width))
                    if self.position.is_correct() and windows_intersection(self.position.get_window(), self.tracked_window) == 0:
                        self.learning_component.update_negatives(self.position.calculate_patch())
                        i += 1

                self.position.update(*self.tracked_window)
                self.learning_component.update_positives(self.position.calculate_patch())

                self.init_frames_count -= 1
            else:
                self.init_frames_count = 0
                self.is_visible = False

        return self.position
def create_track_statistics(cur, con, query, params, log=True):
    tracker = Tracker(con)
    trackings = []
    time_values = []
    for i, param_set in enumerate(params):
        tracker.clear_track()
        start = time.time()
        cur.execute(query.format(*param_set))
        _ = cur.fetchone()
        end = time.time()
        time_values.append((end - start))
        trackings.append(tracker.get_tracking())
        if log:
            print(str(round((i * 100) / len(params), 2)) + '%', end='\r')
    return trackings, time_values
Example #3
0
    def tracker(self):
        if not self._tracker:
            Logger.info("Tracker: initialization")
            from tracking import Tracker

            self._tracker = Tracker()
        return self._tracker
Example #4
0
    def __init__(self, host='0.0.0.0', port=8000):

        print("[+] Initializing Communication")

        self.status = [-1, -1, -1, -1, -1]
        self.rssi = [0, 0, 0, 0, 0]
        self.host = host
        self.port = port

        self.server = None

        self.tracker = Tracker()

        self.connection_thread = threading.Thread(target=self.connect)
        self.connection_thread.start()

        self.to_loop = threading.Thread(target=self.timeout_loop)
        self.to_loop.start()
Example #5
0
def main():
    args = parse_args()
    tracker = Tracker(path=args.path,
                      showResult=True,
                      saveData=args.saveData,
                      border=10)
    cap = cv2.VideoCapture(args.input)
    bbox = None
    objectID = args.id

    if args.output is not None:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter(os.path.join(args.output, args.id + '.avi'),
                              fourcc, 30.0, (1280, 720))

    while True:
        ret, frame = cap.read()

        if not ret:
            break

        frame = cv2.resize(frame, dsize=(1280, 720))

        key = cv2.waitKey(1) & 0xFF

        tracked_frame, bbox = tracker.update(frame, objectID, bbox)

        cv2.imshow('image', frame)
        if args.output is not None:
            out.write(frame)

        if key == ord('s'):
            bbox = cv2.selectROI('image', frame, fromCenter=False)
            tracker.init(frame, bbox)
            print('Selected {}'.format(bbox))
            continue
        if key == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
    if args.output is not None:
        out.release()
 def __init__(self, frame, window, init_frames_count = 20):
     self.buffer = [cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)]
     self.position = Position(self.buffer, *window)
     self.learning_component = LearningComponent(self.position.calculate_patch())
     self.detector = Detector(self.position, self.learning_component)
     self.tracker = Tracker(self.position)
     self.is_visible = True
     self.integrator = Integrator(self.learning_component)
     self.init_frames_count = init_frames_count
     self.detected_windows = None
     self.tracked_window = None
Example #7
0
    def __init__(self, n_features=1000, t=38):
        self.matcher = BFMatcher()
        self.extractor = ORB_Extractor(n_features)
        self.descriptors = []

        self.t = t  # minimum distance metric for matching
        self.set_max_features(n_features)
        self.features = []  # main feature set

        # Image size and center coordinates
        self.h = 0
        self.w = 0
        self.origin_y = 0
        self.origin_x = 0

        self.profiles = []
        self.profile = None
        self.pid = 0

        self.features = []
        self.color_ranges = []
        self.colors = []
        self.weights = []

        self.orb_detect = True
        self.lk_track = True
        self.tracker = Tracker()

        self.time = 0
        self.prev_detection = 0  # time (frame) of previous detection

        # PARAMETERS
        self.DETECTION_THRESHOLD = 14  # Confidence threshold for detection
        self.LOCK_THRESHOLD = 22  # Confidence threshold to activate tracking

        self.CYCLE_INTERVAL = 10  # Frames without detection before switching profiles
        self.TRACK_INTERVAL = 10  # Frames to perform tracking of detected points (after hard lock)
Example #8
0
trackers = []
trackables = {}

#file_name = './video/test3.mp4'
output_name = './output/output_test4.mp4'

# Load Yolo
net = cv2.dnn.readNet("./model/yolov4-tiny.weights", "./cfg/yolov4-tiny.cfg")
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]

net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
# initialize Tracker
tracker = Tracker()

# initialize the video writer
writer = None
writer_frame_count = 0
writer_flag = 0
videonumber = 0
fourcc = cv2.VideoWriter_fourcc(*"mp4v")

#gst_out2 = "appsrc ! video/x-raw ! videoconvert ! x264enc tune=zerolatency bitrate=100 speed-preset=superfast ! rtph264pay config-interval=1 ! udpsink host=192.168.0.46 port=10000 sync=false"

#writer2 =cv2.VideoWriter(gst_out2,cv2.CAP_GSTREAMER,0,float(),(640,480),True)

def writeFrame(img):
    # use global variable, writer
    global writer
    else:
        args.bgTime = viewer.timeStrToFrameIdx(args.bgTime)
        args.trackFrom = viewer.timeStrToFrameIdx(args.trackFrom)
        args.trackTo = viewer.timeStrToFrameIdx(args.trackTo)

    # ROI
    if args.center and args.radius:
        roi = Circle(args.center, args.radius)
    else:
        roi = None
        
    ############################### TRACKING ########################################
    tracker = Tracker(srcFilePath=args.videoFile, destFilePath=None,
                    threshold=args.threshold, minArea=args.minArea,
                    maxArea=args.maxArea, teleportationThreshold=args.teleportationThreshold,
                    bgStart=args.bgTime, trackFrom=args.trackFrom, trackTo=args.trackTo,
                    nBackgroundFrames=args.nBackgroundFrames, nSds=args.nSds,
                    clearBorders=args.clearBorders, normalise=False,
                    plot=args.plot, fast=config['tracker']['fast'], 
                    extractArena=False)
    positions = tracker.track(roi=roi)

    ################################ ANALYSIS ########################################
    os.chdir(destFolder)
    positions = filterPositions(positions, args.oneDKernel) if args.oneDKernel else positions
    samplingFreq = 1.0/tracker._stream.fps

    # Track
    plotTrack(positions, tracker.bg)
    plt.savefig('mousePath'+imgExt) if args.saveGraphs else plt.show()

    # Angles
Example #10
0

def piStim():
    GPIO.output(ttlPin, True)
    sleep(0.5)
    GPIO.output(ttlPin, False)


def rpiCallBack():
    global previousTime
    if time() > (previousTime + refractoryPeriod):
        previousTime = time()
        p = Process(target=piStim)
        p.daemon = True
        p.start()


tracker = Tracker(destFilePath='/home/pi/testTrack.mpg',
                  threshold=thrsh,
                  teleportationThreshold=1000,
                  plot=True,
                  fast=False,
                  minArea=50,
                  bgStart=5,
                  trackFrom=10,
                  trackTo=10000,
                  callback=rpiCallBack)
positions = tracker.track(roi=roi, record=True)

GPIO.cleanup(ttlPin)
Example #11
0
def draw(frame: np.ndarray, out: List[np.ndarray]) -> None:
    for person in out:
        for point in person:
            frame = cv2.circle(frame, (int(point[0]), int(point[1])), radius,
                               (0, 0, 255), thickness, cv2.FILLED)
    for person_id, point in tracked_dict.items():
        frame = cv2.putText(frame, f'person_{person_id}',
                            (int(point[0]), int(point[1])),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, color, thickness,
                            cv2.LINE_4)


if __name__ == '__main__':
    is_use_hashed_data = True
    video_streams = cv2.VideoCapture('pedastrians.mp4')
    tracker = Tracker()
    color = (255, 0, 0)
    radius = 3
    thickness = 2
    counter = 0
    if is_use_hashed_data:
        with open('core/skeletons.json', 'r') as file:
            frame2persons = json.load(file)
    else:
        pose_estimator = PoseEstiamtion(prediction_size=(640, 360),
                                        init_size=(1280, 720))

    while True:
        _, frame = video_streams.read()
        if frame is None:
            break
Example #12
0
    # ROI
    if args.center and args.radius:
        roi = Circle(args.center, args.radius)
    else:
        roi = None

    ############################### TRACKING ########################################
    tracker = Tracker(srcFilePath=args.videoFile,
                      destFilePath=None,
                      threshold=args.threshold,
                      minArea=args.minArea,
                      maxArea=args.maxArea,
                      teleportationThreshold=args.teleportationThreshold,
                      bgStart=args.bgTime,
                      trackFrom=args.trackFrom,
                      trackTo=args.trackTo,
                      nBackgroundFrames=args.nBackgroundFrames,
                      nSds=args.nSds,
                      clearBorders=args.clearBorders,
                      normalise=False,
                      plot=args.plot,
                      fast=config['tracker']['fast'],
                      extractArena=False)
    positions = tracker.track(roi=roi)

    ################################ ANALYSIS ########################################
    os.chdir(destFolder)
    positions = filterPositions(
        positions, args.oneDKernel) if args.oneDKernel else positions
    samplingFreq = 1.0 / tracker._stream.fps
Example #13
0
class TCPServer(object):

    def __init__(self, host='0.0.0.0', port=8000):

        print("[+] Initializing Communication")

        self.status = [-1, -1, -1, -1, -1]
        self.rssi = [0, 0, 0, 0, 0]
        self.host = host
        self.port = port

        self.server = None

        self.tracker = Tracker()

        self.connection_thread = threading.Thread(target=self.connect)
        self.connection_thread.start()

        self.to_loop = threading.Thread(target=self.timeout_loop)
        self.to_loop.start()

        # self.connection_thread.join()

    def connect(self):
        # print('waiting')
        # time.sleep(3)  # TODO prevents overwriting of flask socket?
        print("[+] Mesh server listening on port {}".format(self.port))
        self.server = socket(AF_INET, SOCK_STREAM)
        self.server.bind((self.host, self.port))

        self.server.listen(10)

        while True:
            client, address = self.server.accept()
            print("[+] Connection successful from {0}".format(address)) 
            
            client_thread = threading.Thread(target=self.start, args=(client,))
            client_thread.start()



    def timeout_loop(self):
        while True:
            self.tracker.check_timeouts()
            time.sleep(1)


    def start(self, client):
        while True:
            data = client.recv(4096)
            if not data:
                break

            data_list = data.decode().split(' ')

            # self.status[int(data_list[0])] = int(data_list[1])
            # self.rssi[int(data_list[0])] = int(data_list[2])
            node = int(data_list[0])
            status = int(data_list[1])
            rssi = float(data_list[2])

            print(node, status, rssi)

            alarm = self.tracker.respond_to_pi(node, status, rssi)
            # Sends an alert randomly for now
            client.send(str(alarm).encode())

        client.close()


    def close(self):
        self.server.close()
refractoryPeriod = 1

ttlPin = 5

GPIO.setmode(GPIO.BCM)
GPIO.setup(ttlPin, GPIO.OUT, initial=GPIO.LOW)

def piStim():
    GPIO.output(ttlPin, True)
    sleep(0.5)
    GPIO.output(ttlPin, False)

def rpiCallBack():
    global previousTime
    if time() > (previousTime + refractoryPeriod):
        previousTime = time()
        p = Process(target=piStim)
        p.daemon = True
        p.start()

tracker = Tracker(destFilePath='/home/pi/testTrack.mpg',
                  threshold=thrsh, teleportationThreshold=1000,
                  plot=True, fast=False,
                  minArea=50,
                  bgStart=5, trackFrom=10,
                  trackTo=10000,
                  callback=rpiCallBack)
positions = tracker.track(roi=roi, record=True)

GPIO.cleanup(ttlPin)
Example #15
0
class Detector:
    """Performs feature extraction and matching."""
    def __init__(self, n_features=1000, t=38):
        self.matcher = BFMatcher()
        self.extractor = ORB_Extractor(n_features)
        self.descriptors = []

        self.t = t  # minimum distance metric for matching
        self.set_max_features(n_features)
        self.features = []  # main feature set

        # Image size and center coordinates
        self.h = 0
        self.w = 0
        self.origin_y = 0
        self.origin_x = 0

        self.profiles = []
        self.profile = None
        self.pid = 0

        self.features = []
        self.color_ranges = []
        self.colors = []
        self.weights = []

        self.orb_detect = True
        self.lk_track = True
        self.tracker = Tracker()

        self.time = 0
        self.prev_detection = 0  # time (frame) of previous detection

        # PARAMETERS
        self.DETECTION_THRESHOLD = 14  # Confidence threshold for detection
        self.LOCK_THRESHOLD = 22  # Confidence threshold to activate tracking

        self.CYCLE_INTERVAL = 10  # Frames without detection before switching profiles
        self.TRACK_INTERVAL = 10  # Frames to perform tracking of detected points (after hard lock)

    def load_profile(self, profile):
        """Load object information."""
        profile.features = load_features(profile.feature_file, 1)
        self.profiles.append(profile)
        self.profile = self.profiles[-1]
        print('(Detector) Loaded profile: ', self.profile.label)

    def detect(self, frame, mask=None):
        """Main function. Performs ORB detection and tracking."""
        img = frame.copy()
        kp_map = np.zeros(frame.shape[:2])
        self.time += 1
        if self.time_since_detection() > self.CYCLE_INTERVAL:
            self.cycle_profile()
            print('Detect mode: ', self.profile.label)

        # Output
        confidence = 0
        x = 0
        y = 0

        if self.tracker.tracking > 0:
            tracking, new_pts = self.tracker.track(frame)
            if new_pts.shape[0] > 1:
                # img = draw_points(img, new_pts, (0, 0, 255))
                if tracking == 0:
                    """Tracking epoch complete. Get the new point locations and add them to the keypoint map."""
                    for pt in new_pts:
                        kp_map[int(pt[1]), int(pt[0])] += 2
                    self.tracker.tracking = 0
                    self.orb_detect = True
                else:
                    """Skip ORB detection and continue tracking."""
                    self.orb_detect = False
                    pt1, pt2 = bound(new_pts)
                    confidence = self.tracker.confidence
                    img, x, y = self.detection_offset(img, [pt1, pt2],
                                                      confidence)
            else:
                """Reset tracking and continue with ORB detection."""
                print('Reset tracking')
                self.tracker.tracking = 0
                self.orb_detect = True

        if self.orb_detect:  # Keypoint detection
            frame_kp, frame_des = self.detect_features(frame, mask)
            # for k in frame_kp:      # Draw all keypoints
            #     px, py = k.pt
            #     img[int(py), int(px)] = (0, 255, 0)

            kp, des = self.match(frame_kp, frame_des)
            # img = cv2.drawKeypoints(img, kp, img)
            if len(kp) > 3:
                pts = cv2.KeyPoint_convert(kp)
                pts = np.array([(int(pt[0]), int(pt[1]))
                                for pt in pts]).reshape(-1, 2)
                img = draw_points(img, pts, (255, 0, 0))
                for i, pt in enumerate(pts):
                    kp_map[pt[1], pt[0]] = 1

                kp_colors = np.array([frame[pt[1], pt[0]]
                                      for pt in pts]).reshape(-1, 1, 3)
                kp_colors = cv2.cvtColor(kp_colors, cv2.COLOR_BGR2HSV)

                candidate_centers = []  # keypoints within color range
                for c in range(len(self.profile.colors)):
                    [low_bound, high_bound] = self.profile.color_ranges[c]
                    res = np.array(
                        cv2.inRange(kp_colors, np.array(low_bound),
                                    np.array(high_bound)))
                    for i in range(res.size):
                        if res[i] == 255:
                            img = cv2.circle(img, (pts[i][0], pts[i][1]),
                                             3,
                                             self.profile.colors[c],
                                             thickness=1)
                            candidate_centers.append(pts[i])
                            kp_map[pts[i][1],
                                   pts[i][0]] = self.profile.weights[c]

                # Find max window score
                if len(candidate_centers) > 0:
                    max_score = self.DETECTION_THRESHOLD
                    max_i = -1
                    win_size = 35
                    for i, pt in enumerate(candidate_centers):
                        score = region_sum(kp_map, pt, win_size)
                        if score > max_score:
                            max_score = score
                            max_i = i

                    # Draw box around best keypoint window
                    if max_i >= 0:
                        confidence = max_score
                        pt1 = candidate_centers[max_i]
                        pt2 = (pt1[0] + win_size, pt1[1] + win_size)
                        pt1 = (pt1[0] - win_size, pt1[1] - win_size)
                        img, x, y = self.detection_offset(img, [pt1, pt2],
                                                          confidence=max_score)
                        if self.lk_track and self.tracker.tracking < 1 and max_score > self.LOCK_THRESHOLD:
                            """ Lock achieved: track points in the designated region. """
                            # Shrink region slightly
                            pt1 = np.add(pt1, 2)
                            pt2 = np.add(pt2, -2)
                            region_pts = inner_points(
                                pt1, pt2, np.array(candidate_centers))
                            if len(region_pts) > 2:
                                # print('Tracking %d pts' % len(region_pts))
                                self.tracker.start_track(
                                    frame,
                                    region_pts,
                                    track_len=self.TRACK_INTERVAL,
                                    confidence=max_score)

        if confidence > self.DETECTION_THRESHOLD:
            self.prev_detection = self.time
        return img, confidence, x, y

    def set_max_features(self, n):
        """Maximum number of features to extract in frame."""
        self.extractor.extractor.setMaxFeatures(n)

    def time_since_detection(self):
        return self.time - self.prev_detection

    def cycle_profile(self):
        self.pid += 1
        if self.pid >= len(self.profiles):
            self.pid = 0
        self.profile = self.profiles[self.pid]
        self.prev_detection = self.time

    def print_info(self):
        d_size = self.extractor.extractor.descriptorSize()
        d_type = self.extractor.extractor.descriptorType()
        d_norm = self.extractor.extractor.defaultNorm()
        print('Type:', d_type, 'Size', d_size, 'Norm', d_norm)

    def detect_features(self, img, mask=None):
        kp, des = self.extractor.extract(img, mask)
        return kp, des

    def detect_keypoints(self, img, mask=None):
        """Return array of keypoint coordinates."""
        kp, _ = self.detect_features(img, mask)
        # kp = sorted(kp, key=lambda x: x.response, reverse=True)[:n]
        return np.array(cv2.KeyPoint_convert(kp)).reshape(-1, 1, 2)

    def match(self, kp, des, features=None, mask=None):
        """Extracts features from input image and matches them to known features."""
        matches = self.matcher.match(des, self.profile.features, t=self.t)
        matched_kp = matching_keypoints(matches, kp)
        matched_des = [des[m.queryIdx] for m in matches]
        return matched_kp, matched_des

    def match_descriptors(self, des1, des2):
        matches = self.matcher.match(des1, des2, t=self.t)
        return matches

    def match_descriptors_radius(self, des1, des2):
        matches = self.matcher.radiusMatch(des1, des2, maxDistance=self.t)
        return matches

    def save_features(self, name='features', path='data/'):
        """Output feature set to a file using Pickle."""
        import pickle
        import os
        num_features = len(self.descriptors)
        name += '_' + str(num_features)
        ext = '.dat'
        file_name = path + name + ext
        if not os.path.exists(path):
            os.makedirs(path)
        with open(file_name, 'wb') as out_file:
            pickle.dump(self.descriptors, out_file, protocol=2)
        out_file.close()
        print('(Detector) Saved %d features to %s' %
              (len(self.descriptors), file_name))

    def load_features(self, file, sample=1):
        """Load features from Pickled file"""
        self.descriptors = load_features(file, sample)
        self.matcher.features = self.descriptors
        print('(Detector) Loaded %d features from %s' %
              (len(self.descriptors), file))

    def set_features(self, feature_set):
        self.descriptors = feature_set
        self.matcher.features = feature_set

    def detection_offset(self, img, bounding_box, confidence=0, show=True):
        """Given a bounding box of the target, output the offset from the center and annotate image with detection."""
        if self.w == 0:  # Calibrate image dimensions
            self.h, self.w = img.shape[:2]
            self.origin_x = int(self.w / 2)
            self.origin_y = int(self.h / 2)
            if self.w == 0 or self.h == 0:
                return img, 0, 0

        pt1 = bounding_box[0]
        pt2 = bounding_box[1]
        center_x, center_y = midpoint(pt1, pt2)  # Center of detection region

        x = int((center_x / self.w) *
                100)  # Convert coordinates to percentage of view window size
        y = int((center_y / self.h) * 100)
        if show:
            img = cv2.rectangle(img, pt1, pt2, (0, 255, 0), 2)  # bounding box
            img = cv2.rectangle(img, (pt1[0], pt1[1] - 2),
                                (pt1[0] + 39, pt1[1] - 17), (0, 0, 0),
                                -1)  # text box
            text = str(confidence)
            if self.profile is not None:
                text = text + '  ' + self.profile.label
            img = cv2.putText(img, text, (pt1[0], pt1[1] - 3),
                              cv2.FONT_HERSHEY_PLAIN, 1.3,
                              (255, 255, 255))  # label
            cv2.arrowedLine(
                img,
                (self.origin_x, self.origin_y),
                (center_x, center_y),
                color=(255, 255, 255),  # arrow
                thickness=1)  # offset
        return img, x, y
Example #16
0
from tracking import Tracker

size = 10**6
tracker = Tracker("Doing something", size)
for i in range(size):
	#DO SOMETHING
	tracker.increment()	#increment after every iteration