def videos(): cap = cv2.VideoCapture('images/video1.avi') objs = tracker.Tracker() while True: r, img = cap.read() if r: print("=" * 40) start_time = time.time() results = process_image(img) end_time = time.time() for centroid in objs.predict(): cv2.circle(img, centroid, 10, 0, thickness=2) for cat, conf, bounds in results: if conf < 0.90: continue draw_bounds(img, bounds) objs.add(bounds) track_time = time.time() print("Total Time:", end_time - start_time, track_time - end_time) cv2.imshow("preview", img) # Press Q on keyboard to exit if cv2.waitKey(25) & 0xFF == ord('q'): break
def __init__(self, my_id, routing_m, msg_f, bootstrap_mode=False): self._my_id = my_id self._routing_m = routing_m self.msg_f = msg_f self.bootstrap_mode = bootstrap_mode self._tracker = tracker.Tracker() self._token_m = token_manager.TokenManager()
def __init__(self, metainfo): self.peer_id = self._gen_peer_id() self.metainfo = Metainfo(metainfo) self.tracker = tracker.Tracker(self) self.trackerResponse = self.request_tracker() logging.debug('meta info details...') logging.debug(self.metainfo.get(b'info')) # list of {'ip': ip, 'port': port} peer dictionaries self.file_length = 0 if (b'length' in self.metainfo.get(b'info')): self.file_length = self.metainfo.get(b'info')[b'length'] elif (b'files' in self.metainfo.get(b'info')): for each_file in self.metainfo.get(b'info')[b'files']: self.file_length += each_file[b'length'] else: raise ValueError('file length not defined in the torrent file') self.piece_length = self.metainfo.get(b'info')[b'piece length'] self.num_pieces = int(self.file_length / self.piece_length) + 1 self.peers = self.get_peers() self.connected_peers = [] logging.info('file length : %d', self.file_length) logging.debug('received the following response from the tracker : ') logging.debug(self.trackerResponse) logging.info('number of pieces : %d', self.num_pieces) self.pieces_needed = self.gen_pieces( ) # list of pieces the client still needs self.pieces_completed = [] # list of pieces the client has
def __init__(self, dht_addr): self.my_addr = dht_addr self.my_id = identifier.RandomId() self.my_node = Node(self.my_addr, self.my_id) self.tracker = tracker.Tracker() self.token_m = token_manager.TokenManager() self.reactor = ThreadedReactor() self.rpc_m = RPCManager(self.reactor, self.my_addr[1]) self.querier = Querier(self.rpc_m, self.my_id) self.routing_m = RoutingManager(self.my_node, self.querier, bootstrap_nodes) self.responder = Responder(self.my_id, self.routing_m, self.tracker, self.token_m) self.responder.set_on_query_received_callback( self.routing_m.on_query_received) self.querier.set_on_response_received_callback( self.routing_m.on_response_received) self.querier.set_on_error_received_callback( self.routing_m.on_error_received) self.querier.set_on_timeout_callback(self.routing_m.on_timeout) self.querier.set_on_nodes_found_callback(self.routing_m.on_nodes_found) self.routing_m.do_bootstrap() self.rpc_m.add_msg_callback(QUERY, self.responder.on_query_received) self.lookup_m = LookupManager(self.my_id, self.querier, self.routing_m)
def testTracker(self): logging.info('testTracker') trackerConf = self.confData['tracker'] try: provider = tracker.check_provider(trackerConf['id']) if 'username' in provider['param'] and trackerConf['user'] == '': return { 'rtn': '422', 'error': messages.returnCode['422'].format(provider['name']) } mytracker = tracker.Tracker( trackerConf['id'], { 'username': trackerConf['user'], 'password': trackerConf['password'] }) except myExceptions.InputError as e: return { 'rtn': '404', 'error': messages.returnCode['404'].format('Tracker', e.expr) } except Exception, e: return { 'rtn': '404', 'error': messages.returnCode['404'].format('Tracker', e) }
def __init__(self, dht_addr, state_path, routing_m_mod, lookup_m_mod): self.state_filename = os.path.join(state_path, STATE_FILENAME) self.load_state() if not self._my_id: self._my_id = identifier.RandomId() self._my_node = Node(dht_addr, self._my_id) self._tracker = tracker.Tracker() self._token_m = token_manager.TokenManager() self._reactor = ThreadedReactor() self._reactor.listen_udp(self._my_node.addr[1], self._on_datagram_received) #self._rpc_m = RPCManager(self._reactor) self._querier = Querier(self._my_id) bootstrap_nodes = self.loaded_nodes or BOOTSTRAP_NODES del self.loaded_nodes self._routing_m = routing_m_mod.RoutingManager(self._my_node, bootstrap_nodes) # self._responder = Responder(self._my_id, self._routing_m, # self._tracker, self._token_m) self._lookup_m = lookup_m_mod.LookupManager(self._my_id) current_time = time.time() self._next_maintenance_ts = current_time self._next_save_state_ts = current_time + SAVE_STATE_DELAY self._running = False
def __init__(self, dht_addr): my_addr = dht_addr my_id = identifier.RandomId() my_node = Node(my_addr, my_id) tracker_ = tracker.Tracker() token_m = token_manager.TokenManager() self.reactor = ThreadedReactor() rpc_m = RPCManager(self.reactor, my_addr[1]) querier_ = Querier(rpc_m, my_id) routing_m = RoutingManager(my_node, querier_, bootstrap_nodes) responder_ = Responder(my_id, routing_m, tracker_, token_m) responder_.set_on_query_received_callback(routing_m.on_query_received) querier_.set_on_response_received_callback( routing_m.on_response_received) querier_.set_on_error_received_callback(routing_m.on_error_received) querier_.set_on_timeout_callback(routing_m.on_timeout) querier_.set_on_nodes_found_callback(routing_m.on_nodes_found) routing_m.do_bootstrap() rpc_m.add_msg_callback(QUERY, responder_.on_query_received) self.lookup_m = LookupManager(my_id, querier_, routing_m) self._routing_m = routing_m
def setup(self): routing_m = routing_manager.RoutingManagerMock() self.tracker = tracker.Tracker() self.token_m = token_manager.TokenManager() self.responder = responder.Responder(tc.SERVER_ID, routing_m, self.tracker, self.token_m) self.notification_callback_done = False self.responder.set_on_query_received_callback(self._notify_routing_m)
def __init__(self, argv, in3d=False): self.le_video = None self.re_video = None self.sc_video = None self.le_track = tracker.Tracker() self.re_track = tracker.Tracker() self.d_estimator = depth.DepthEstimator() self.calibrations = {i: [None, None] for i in range(1, 10)} self.calibrating = False self.active = False self.__setup_video_input(argv) self.left_e = eye.Eye(self.le_track, self.le_video) self.right_e = eye.Eye(self.re_track, self.re_video) self.marker = cv2.imread('marker2.png', cv2.IMREAD_GRAYSCALE) self.screen = None self.in3d = in3d self.pipe_father, self.pipe_child = Pipe()
def __init__(self,torrent_file): self.torrent_file = torrent_file random.seed(datetime.datetime.now()) self.peer_id = "".join([str(random.randint(0,9)) for i in range(20)]) self.torrent = torrent.Torrent(self.torrent_file, self.peer_id) self.event = "started" self.tracker = tracker.Tracker(self.torrent, 6881, self.event) self.tracker.results()
def test_MakeStockDict(self): trk = tracker.Tracker(WATCH_LIST_FILE) trk.UpdateStockDict() stock_dict = trk.stocks cmp_wl = [ 'APT', 'CBA', 'NAB', 'ANZ', 'TRS', 'IFL', 'BHP', 'CSL', 'AMP', 'WOW' ] assert stock_dict.keys().sort() == cmp_wl.sort()
def __init__(self): self.torrent = torrent.Torrent().load_from_path("torrent.torrent") self.tracker = tracker.Tracker(self.torrent) self.pieces_manager = pieces_manager.PiecesManager(self.torrent) self.peers_manager = peers_manager.PeersManager(self.torrent, self.pieces_manager) self.peers_manager.start() logging.info("PeersManager Started") logging.info("PiecesManager Started")
def setUp(self): """ Start the tracker. """ self.port = 8888 self.inmemory = True self.interval = 10 self.tracker = tracker.Tracker(port = self.port, \ inmemory = self.inmemory, \ interval = self.interval)
def run_square(): try: l = lights.Lights() t = tracker.Tracker() cam = camera.Camera() c = t.c l.headlights(True) time.sleep(0.2) l.headlights(False) cam.take_picture() cam.take_nav_picture() # print("Heading %f" % (c.heading())) c.test_read() t.move_dist(0.5) time.sleep(1) t.turn_relative(90) time.sleep(0.5) # print("Heading %f" % (c.heading())) c.test_read() cam.take_picture() cam.take_nav_picture() t.move_dist(0.5) time.sleep(1) t.turn_relative(90) time.sleep(0.5) # print("Heading %f" % (c.heading())) c.test_read() cam.take_picture() cam.take_nav_picture() t.move_dist(0.5) time.sleep(1) t.turn_relative(90) time.sleep(0.5) # print("Heading %f" % (c.heading())) c.test_read() cam.take_picture() cam.take_nav_picture() t.move_dist(0.5) time.sleep(1) t.turn_relative(90) time.sleep(0.5) # print("Heading %f" % (c.heading())) c.test_read() l.headlights(False) except: print("Exception: ", sys.exc_info()[0]) raise
def __init__(self): movie = '' self.torrent = torrent.Torrent().open_from_file(movie) self.tracker = tracker.Tracker(self.torrent) self.pieces_manager = pieces_manager.PiecesManager(self.torrent) self.peers_manager = peers_manager.PeersManager( self.torrent, self.pieces_manager) self.peers_manager.start() logging.info("PeersManager Started") logging.info("PiecesManager Started")
def main(): args = parse_args() point_tracker = tracker.Tracker(mode=args.mode) # scanning phase #point_tracker.scan_points() #point_tracker = tracker.Tracker() # mapping phase point_tracker.run()
def main(): """ Metoda spustí sledování kvasinek. Na začátku rozhodne, zda je na vstupu soubor s videem, nebo adresář s obrázky s příponou .jpg. Podle toho je pak zařízeno načítání jednotlivých framů. """ if len(sys.argv) > 1: filename = sys.argv[1] else: filename = "../../kvasinky1.avi" debug = False if len(sys.argv) > 2: debug = True # prostor pro inicializaci # 1.část bm = model.BackgroundModel() # 2.část tr = tracker.Tracker() if op.isfile(filename): # Nacitani videa pomoci opencv import cv2 cap = cv2.VideoCapture(filename) while (cap.isOpened()): ret, frame = cap.read() if ret: gray_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) loop(gray_img, bm, tr) else: break cap.release() cv2.destroyAllWindows() else: # Nacitani jpg pomoci scipy import scipy import scipy.misc import skimage import skimage.color files = files = sorted(glob.glob(op.join(filename, '*.jpg'))) for filename in files: img = scipy.misc.imread(filename) gray_img = skimage.color.rgb2gray(img) loop(gray_img, bm, tr) tr.saveCsvFile()
def main(): args = parse_args() point_tracker = tracker.Tracker(mode=args.mode, scan_file=args.scan_vid, track_file=args.track_vid) # scanning phase #point_tracker.scan_points() #point_tracker = tracker.Tracker() # mapping phase point_tracker.run()
def __init__(self): if len(sys.argv) <= 2 and len(sys.argv) > 1: self.torrent = torrent.Torrent().load_from_path(sys.argv[1]) else: print("Usage : python3 main.py <path_to_torrent_file> \n") sys.exit(1) self.tracker = tracker.Tracker(self.torrent) self.pieces_manager = pieces_manager.PiecesManager(self.torrent) self.peers_manager = peers_manager.PeersManager( self.torrent, self.pieces_manager) self.peers_manager.start() logging.info("PeersManager Started") logging.info("PiecesManager Started")
def __init__( self, num_of_cams=DEFAULT_CAMS, cam_info=DEFAULT_CAM_INFO ): self._cam_info = cam_info self._num_of_cams = num_of_cams self._ext_callib = {} self._trackers = [ tracker.Tracker( mode=tracker.Tracker.TRACKING_MODE ) for index in range(self._num_of_cams) ]
def test_GetWatchList(self): ## Check that it gets the correct watch list trk = tracker.Tracker(WATCH_LIST_FILE) cmp_wl = [ 'APT', 'CBA', 'NAB', 'ANZ', 'TRS', 'IFL', 'BHP', 'CSL', 'AMP', 'WOW' ] wl = trk.GetWatchList() assert wl.sort() == cmp_wl.sort() ## Check that it correctly removes codes deleted from watch_list trk.WATCH_LIST_FILE = CHANGED_WATCH_LIST_FILE cmp_cwl = [ 'APT', 'CBA', 'NAB', 'ANZ', 'TRS', 'IFL', 'BHP', 'CSL', 'AMP' ] cwl = trk.GetWatchList() assert cwl.sort() == cmp_cwl.sort()
def __init__(self, pymdht_version, my_node, state_filename, routing_m_mod, lookup_m_mod, experimental_m_mod, private_dht_name): if size_estimation: self._size_estimation_file = open('size_estimation.dat', 'w') self.state_filename = state_filename saved_id, saved_bootstrap_nodes = state.load(self.state_filename) my_addr = my_node.addr self._my_id = my_node.id # id indicated by user if not self._my_id: self._my_id = saved_id # id loaded from file if not self._my_id: self._my_id = self._my_id = identifier.RandomId() # random id self._my_node = Node(my_addr, self._my_id) self.msg_f = message.MsgFactory(pymdht_version, self._my_id, private_dht_name) self._tracker = tracker.Tracker() self._token_m = token_manager.TokenManager() self._querier = Querier() self._routing_m = routing_m_mod.RoutingManager( self._my_node, saved_bootstrap_nodes, self.msg_f) self._lookup_m = lookup_m_mod.LookupManager(self._my_id, self.msg_f) self._experimental_m = experimental_m_mod.ExperimentalManager( self._my_node.id, self.msg_f) current_ts = time.time() self._next_save_state_ts = current_ts + SAVE_STATE_DELAY self._next_maintenance_ts = current_ts self._next_timeout_ts = current_ts self._next_main_loop_call_ts = current_ts self._pending_lookups = []
def pipeline(img): ''' Pipeline function for detection and tracking ''' global frame_count global tracker_list global max_age global min_hits global track_id_list global debug frame_count += 1 start = time.time() img_dim = (img.shape[1], img.shape[0]) z_box = det.get_localization(img) # measurement if debug: print('Frame:', frame_count) x_box = [] if debug: for i in range(len(z_box)): img1 = helpers.draw_box_label(img, z_box[i], box_color=(255, 0, 0)) plt.imshow(img1) plt.show() if len(tracker_list) > 0: for trk in tracker_list: x_box.append(trk.box) matched, unmatched_dets, unmatched_trks \ = assign_detections_to_trackers(x_box, z_box, iou_thrd = 0.3) if debug: print('Detection: ', z_box) print('x_box: ', x_box) print('matched:', matched) print('unmatched_det:', unmatched_dets) print('unmatched_trks:', unmatched_trks) # Deal with matched detections if matched.size > 0: for trk_idx, det_idx in matched: z = z_box[det_idx] z = np.expand_dims(z, axis=0).T tmp_trk = tracker_list[trk_idx] tmp_trk.kalman_filter(z) xx = tmp_trk.x_state.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] x_box[trk_idx] = xx tmp_trk.box = xx tmp_trk.hits += 1 tmp_trk.no_losses = 0 # Deal with unmatched detections if len(unmatched_dets) > 0: for idx in unmatched_dets: z = z_box[idx] z = np.expand_dims(z, axis=0).T tmp_trk = tracker.Tracker() # Create a new tracker x = np.array([[z[0], 0, z[1], 0, z[2], 0, z[3], 0]]).T tmp_trk.x_state = x tmp_trk.predict_only() xx = tmp_trk.x_state xx = xx.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] tmp_trk.box = xx tmp_trk.id = track_id_list.popleft( ) # assign an ID for the tracker tracker_list.append(tmp_trk) x_box.append(xx) # Deal with unmatched tracks if len(unmatched_trks) > 0: for trk_idx in unmatched_trks: tmp_trk = tracker_list[trk_idx] tmp_trk.no_losses += 1 tmp_trk.predict_only() xx = tmp_trk.x_state xx = xx.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] tmp_trk.box = xx x_box[trk_idx] = xx # The list of tracks to be annotated good_tracker_list = [] for trk in tracker_list: if ((trk.hits >= min_hits) and (trk.no_losses <= max_age)): good_tracker_list.append(trk) x_cv2 = trk.box if debug: print('updated box: ', x_cv2) print() img = helpers.draw_box_label( img, x_cv2, helpers.trk_id_to_color( trk.id)) # Draw the bounding boxes on the # images # Book keeping deleted_tracks = filter(lambda x: x.no_losses > max_age, tracker_list) for trk in deleted_tracks: track_id_list.append(trk.id) tracker_list = [x for x in tracker_list if x.no_losses <= max_age] if debug: print('Ending tracker_list: ', len(tracker_list)) print('Ending good tracker_list: ', len(good_tracker_list)) return img
def createTracker(self): if self.tracker == None: #Use 'self' as the auth object self.tracker = tracker.Tracker(self,self.peers,0) return self.tracker
def createTracker(self): if self.tracker == None: self.auth = MockAuth() self.tracker = tracker.Tracker(self.auth,self.peers,0) return self.tracker
def pipeline(img): """ Pipeline function for detection and tracking """ global frame_count global tracker_list global max_age global min_hits global track_id_list global debug global points frame_count += 1 img_dim = (img.shape[1], img.shape[0]) z_box = det.get_localization(img) # measurement if debug: print('Frame:', frame_count) x_box =[] if debug: img1 = img.copy() for i in range(len(z_box)): img1 = helpers.draw_box_label(i, img1, z_box[i], box_color=(255, 0, 0)) plt.imshow(img1) plt.show() if len(tracker_list) > 0: for trk in tracker_list: x_box.append(trk.box) matched, unmatched_dets, unmatched_trks \ = assign_detections_to_trackers(x_box, z_box, iou_thrd = 0.3) if debug: print('Detection: ', z_box) print('x_box: ', x_box) print('matched:', matched) print('unmatched_det:', unmatched_dets) print('unmatched_trks:', unmatched_trks) # Deal with matched detections if matched.size > 0: for trk_idx, det_idx in matched: z = z_box[det_idx] z = np.expand_dims(z, axis=0).T tmp_trk = tracker_list[trk_idx] tmp_trk.kalman_filter(z) xx = tmp_trk.x_state.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] x_box[trk_idx] = xx tmp_trk.box = xx tmp_trk.hits += 1 # Deal with unmatched detections if len(unmatched_dets) > 0: for idx in unmatched_dets: z = z_box[idx] z = np.expand_dims(z, axis=0).T tmp_trk = tracker.Tracker() # Create a new tracker x = np.array([[z[0], 0, z[1], 0, z[2], 0, z[3], 0]]).T tmp_trk.x_state = x tmp_trk.predict_only() xx = tmp_trk.x_state xx = xx.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] tmp_trk.box = xx tmp_trk.id = track_id_list.popleft() # assign an ID for the tracker print(tmp_trk.id) tracker_list.append(tmp_trk) x_box.append(xx) # Deal with unmatched tracks if len(unmatched_trks) > 0: for trk_idx in unmatched_trks: tmp_trk = tracker_list[trk_idx] tmp_trk.no_losses += 1 tmp_trk.predict_only() xx = tmp_trk.x_state xx = xx.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] tmp_trk.box = xx x_box[trk_idx] = xx # The list of tracks to be annotated good_tracker_list = [] if (len(tracker_list) == 0): print('list should be cleared now') points = [] for trk in tracker_list: if ((trk.hits >= min_hits) and (trk.no_losses <= max_age)): good_tracker_list.append(trk) x_cv2 = trk.box if debug: print('updated box: ', x_cv2) print() for good_track in good_tracker_list: box = good_track.box if (len(z_box) != 0): y_up, x_left, y_down, x_right = box #center = (int((x_left + x_right) / 2), int((y_up + y_down) / 2)) legs = (int((x_left + x_right) / 2), y_down) points.append(legs) img = cv2.circle(img, legs, 20, (255, 0, 0), thickness=-1) img = helpers.draw_box_label(good_track.id, img, box) # Draw the bounding boxes on the images if (len(points) > 1): for i in range(len(points) - 1): cv2.line(img, points[i], points[i + 1], (255, 0, 0), 2) # Book keeping ??? deleted_tracks = filter(lambda x: x.no_losses > max_age, tracker_list) for trk in deleted_tracks: track_id_list.append(trk.id) tracker_list = [x for x in tracker_list if x.no_losses <= max_age] if debug: print('Ending tracker_list: ', len(tracker_list)) print('Ending good tracker_list: ', len(good_tracker_list)) cv2.imshow("frame", img) return img
import mc import os, sys __cwd__ = os.getcwd().replace(";", "") sys.path.append(os.path.join(__cwd__, 'libs')) sys.path.append(os.path.join(__cwd__, 'external')) import tracker myTracker = tracker.Tracker('UA-19866820-2') if (__name__ == "__main__"): mc.ActivateWindow(14000) import app app.down(id=55, path="home", push=False) myTracker.trackView('home')
def pipeline(img): ''' Pipeline function for detection and tracking ''' global frame_count global tracker_list global dead_tracker_list global track_id_start_value global max_age global min_hits global track_id_list global debug global args frame_count += 1 img_dim = (img.shape[1], img.shape[0]) z_box = det.get_localization(img) # measurement img_raw = np.copy(img) if debug: print('Frame:', frame_count) x_box = [] if debug: for i in range(len(z_box)): if not args['dots']: img1 = helpers.draw_box_label(img, z_box[i], box_color=(255, 0, 0)) #plt.imshow(img1) plt.show() if len(tracker_list) > 0: for trk in tracker_list: trk.predict_only() xx = trk.x_state xx = xx.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] trk.box = xx x_box.append(trk.box) matched, unmatched_dets, unmatched_trks \ = assign_detections_to_trackers(x_box, z_box, iou_thrd = 0.1) if debug: print('Detection: ', z_box) print('x_box: ', x_box) print('matched:', matched) print('unmatched_det:', unmatched_dets) print('unmatched_trks:', unmatched_trks) font = cv2.FONT_HERSHEY_SIMPLEX font_size = 1 font_color = (255, 255, 255) cv2.putText(img, str(len(z_box)), (100, 100), font, font_size, font_color, 1, cv2.LINE_AA) pos = 30 for trk_idx, det_idx in matched: iou = helpers.box_iou2(tracker_list[trk_idx].box, z_box[det_idx]) cv2.putText(img, tracker_list[trk_idx].id + " " + str(iou), (100, 100 + pos), font, font_size, font_color, 1, cv2.LINE_AA) pos += 30 cv2.putText(img, str(frame_count), (100, 100 + pos), font, font_size, font_color, 1, cv2.LINE_AA) # Deal with matched detections if matched.size > 0: for trk_idx, det_idx in matched: z = z_box[det_idx] ymin, xmin, ymax, xmax = z_box[det_idx] z = np.expand_dims(z, axis=0).T tmp_trk = tracker_list[trk_idx] person_im = img_raw[ymin:ymax, xmin:xmax] logo_boxes = logo_det.get_localization(person_im) if (len(logo_boxes) > 0): l_ymin, l_xmin, l_ymax, l_xmax = logo_boxes[0] img = helpers.draw_box_label(img, [ l_ymin + z[0], l_xmin + z[1], l_ymax + z[0], l_xmax + z[1] ], id="logo", box_color=(255, 0, 0)) logo_x = (l_xmax - l_xmin) / 2 + (l_xmin + z[1]) logo_y = (l_ymax - l_ymin) / 2 + (l_ymin + z[0]) img = cv2.circle(img, (logo_x, logo_y), 5, trk.color, 2) tmp_trk.logo_x_coords.append(logo_x) tmp_trk.logo_y_coords.append(logo_y) if not args['dots']: img = helpers.draw_box_label(img, tmp_trk.box, id=tmp_trk.id, box_color=(0, 255, 0)) #tmp_trk.kalman_filter(z) tmp_trk.update_only(z) xx = tmp_trk.x_state.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] x_box[trk_idx] = xx tmp_trk.box = xx tmp_trk.y_coords.append(int((xx[2] - xx[0]) / 2 + xx[0])) tmp_trk.x_coords.append(int((xx[3] - xx[1]) / 2 + xx[1])) tmp_trk.hits += 1 tmp_trk.no_losses = 0 # Deal with unmatched detections if len(unmatched_dets) > 0: for idx in unmatched_dets: z = z_box[idx] ymin, xmin, ymax, xmax = z_box[idx] z = np.expand_dims(z, axis=0).T tmp_trk = tracker.Tracker() # Create a new tracker x = np.array([[z[0], 0, z[1], 0, z[2], 0, z[3], 0]]).T tmp_trk.x_state = x person_im = img_raw[ymin:ymax, xmin:xmax] logo_boxes = logo_det.get_localization(person_im) if (len(logo_boxes) > 0): l_ymin, l_xmin, l_ymax, l_xmax = logo_boxes[0] img = helpers.draw_box_label(img, [ l_ymin + z[0], l_xmin + z[1], l_ymax + z[0], l_xmax + z[1] ], id="logo", box_color=(255, 0, 0)) logo_x = (l_xmax - l_xmin) / 2 + (l_xmin + z[1]) logo_y = (l_ymax - l_ymin) / 2 + (l_ymin + z[0]) img = cv2.circle(img, (logo_x, logo_y), 5, trk.color, 2) tmp_trk.logo_x_coords.append(logo_x) tmp_trk.logo_y_coords.append(logo_y) tmp_trk.predict_only() xx = tmp_trk.x_state xx = xx.T[0].tolist() xx = [xx[0], xx[2], xx[4], xx[6]] tmp_trk.box = xx tmp_trk.y_coords.append(int((xx[2] - xx[0]) / 2 + xx[0])) tmp_trk.x_coords.append(int((xx[3] - xx[1]) / 2 + xx[1])) tmp_trk.id = str( track_id_start_value) # assign an ID for the tracker track_id_start_value += 1 tracker_list.append(tmp_trk) x_box.append(xx) # Deal with unmatched tracks if len(unmatched_trks) > 0: for trk_idx in unmatched_trks: tmp_trk = tracker_list[trk_idx] if not args['dots']: img = helpers.draw_box_label(img, tmp_trk.box, id=tmp_trk.id, box_color=(255, 255, 0)) tmp_trk.no_losses += 1 #tmp_trk.predict_only() #xx = tmp_trk.x_state #xx = xx.T[0].tolist() #xx =[xx[0], xx[2], xx[4], xx[6]] #tmp_trk.box =xx #x_box[trk_idx] = xx # The list of tracks to be annotated good_tracker_list = [] for trk in tracker_list: print(trk.id) if ((trk.hits >= min_hits) and (trk.no_losses <= max_age)): good_tracker_list.append(trk) x_cv2 = trk.box if debug: print('updated box: ', x_cv2) print() if args['dots']: print("COLOR IS: {}".format(trk.color)) for i in zip(trk.x_coords, trk.y_coords): img = cv2.circle(img, i, 10, trk.color, 5) if not args['dots']: img = helpers.draw_box_label( img, x_cv2, id=trk.id) # Draw the bounding boxes on the # images # Book keeping deleted_tracks = filter(lambda x: x.no_losses > max_age, tracker_list) for trk in deleted_tracks: dead_tracker_list.append(trk) tracker_list = [x for x in tracker_list if x.no_losses <= max_age] if debug: print('Ending tracker_list: ', len(tracker_list)) print('Ending good tracker_list: ', len(good_tracker_list)) return img
cap = cv2.VideoCapture(ip_camera_path) detector = detectorAPI.DetectorAPI(path_to_ckpt=model_path) r, frameInit = cap.read() cap.release() centroidsInit, scoresInit, classesInit, numInit = detector.processFrame( frameInit) frame_grabber = GetFrame(ip_camera_path).start() frame_shower = ShowFrame(frame=frameInit).start() frame_processor = ProcessFrame(frame=frameInit, detector=detector, centroids=centroidsInit, classes=classesInit, scores=scoresInit, num=numInit).start() tracker = tracker.Tracker() while True: frame = frame_grabber.frame # Grab a frame frame_processor.unprocessedFrame = frame # Process the frame frameWithBoxes = detector.addBoxesToFrame(frame_processor.centroids, frame_processor.scores, frame_processor.classes, frame_processor.num, frame) #if not tracker.personDict: #tracker.fill_persondict(frame_processor.positions) #else: #distances = tracker.calculateDistanceFromPersonsToPoints(tracker.personDict, frame_processor.positions) #print(frame_processor.positions) tracker.tracking_algorithm(frame_processor.positions)
def setup(self): self.t = tracker.Tracker(.01, 5)