def runVideo(cap): Frame.frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) Frame.frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) first_go = True frame = None current_frame = None previous_frame = None while (True): if first_go != True: previous_frame = Frame(current_frame.getFrame()) ret, frame = cap.read() if ret == False: break else: current_frame = Frame(frame) if first_go != True: difference_frame = current_frame.getDifferenceFrame(previous_frame) thresholded_frame = difference_frame.getBinary(threshold=100) dilated_frame = thresholded_frame.getDilated(iterations=10) valid_contours = dilated_frame.findObjects( minContourZone=settings.MIN_COUNTOUR_ZONE) Tracker.registerNewObjects(valid_contours, current_frame) # set which frame to display for user ready_frame = current_frame ready_frame.addBoundingBoxesFromContours(Tracker) st.write("test") ready_frame.putText("Threads: " + str(threading.active_count()), (7, 20)) ready_frame.putText( "Object Bufor size: " + str(len(Tracker.objectsToClassify)), (7, 50)) ready_frame.putText("FPS: " + FPS.tick(), (7, 80)) ready_frame.putText( "Cars passed: " + str(len(Tracker.lostObjects)), (7, 110)) ready_frame.putText("Test var: " + str(12), (7, 140)) ready_frame.show() else: first_go = False current_frame.show() if cv2.waitKey(1) & 0xFF == ord('q'): logger('cars found: ' + str(Tracker.lostObjects), settings.LOG) break cap.release() cv2.destroyAllWindows() stopThreads()
def main(): img = cv2.imread(os.path.abspath(os.path.dirname(__file__)) + '/bolt.png') split = 8 shape = get_size(img) tracker = Tracker(split, shape) init = False top_left = np.array((0, 0)) bottom_right = shape while 1: draw = img.copy() # img = np.zeros(img.shape) if init: draw = tracker.track_hog(img) break else: pass # draw = cv2.rectangle(draw, tuple(top_left), # tuple(bottom_right), (0, 255, 0), # thickness=2) cv2.imshow('Video', draw) retval = cv2.waitKey(10) if retval == 27: break elif retval == 32: if not init: tracker.set_reference(img, top_left, bottom_right) init = True elif retval == 112: cv2.waitKey(0)
def main(): """ Executes the program. """ tracker = Tracker() plt.ion() last_total_data_used = 0 while True: # Retrieve the up and down speeds time.sleep(0.5) down_speed = 8 * (tracker.get_current_download_speed() / (2**20)) up_speed = 8 * (tracker.get_current_upload_speed() / (2**20)) # Store it add_data(down_speed, up_speed) # Data used total_data_used = round(tracker.get_total_data_used() / (2**20), 3) write_data_used(last_total_data_used, total_data_used) last_total_data_used = total_data_used # Update & display the plot recv_curve, = plt.plot(times, speeds_recv) sent_curve, = plt.plot(times, speeds_sent) plt.legend([recv_curve, sent_curve], ['Download', 'Upload']) plt.ylabel('Mb/s', fontsize=8) ax = plt.gca() ax.tick_params(axis='x', labelsize=6) ax.tick_params(axis='y', labelsize=6) ax.get_figure().canvas.set_window_title('Internet speed - N3RO') plt.draw() plt.pause(0.0001) plt.clf()
def __init__(self, status=ax.State.START_SESSION): self.tracker = Tracker() self.tracker.appendContainer(ax.State.STARTING_STATE) self.startTime = time.localtime(time.time())[:] self.endTime = -1 self.sessionName = 'PlaceHolder' #load settings from setting.txt f = open('userSettings.txt', 'r') fileData = f.read().split('\n') f.close() lastSessionName = fileData[0].split(' = ')[-1] self.numberOfSessions = int(fileData[1].split(' = ')[-1]) if lastSessionName == 'None': #no last sessions self.status = ax.State.START_SESSION #name is given by: ###_dd_mm_yyyy self.sessionName = str(self.numberOfSessions + 1) + '_' + str( self.startTime[2]) + '_' + str(self.startTime[1]) + '_' + str( self.startTime[0]) else: #load the pickle to see if the previous session ended lastSession = loadPickle(lastSessionName) #loadPickle(lastSession) if lastSession.status == ax.State.END_SESSION: #start a new session self.status = ax.State.START_SESSION self.sessionName = str(self.numberOfSessions + 1) + '_' + str( self.startTime[2]) + '_' + str( self.startTime[1]) + '_' + str(self.startTime[0]) else: #otherwise this session is the previous session for i in lastSession.__dict__.keys(): self.__dict__[i] = lastSession.__dict__[i]
def __init__(self, width=160, height=60, max_obj=10, history=10): Tracker.__init__(self, max_obj, history) self.alpha = 0.5 self.ximg = None self.yimg = None self.zimg = None self.width = width self.height = height self.cartNormal = 2.0 # Set to dstance in meter
def __init__(self, *args): self.log = Logs().getLog() super().__init__() self.point_scale = 20 self.distance_scale = 40 # self.label_y_offset = 10 self.DB = Database() self.tracker = Tracker(self.DB) self.track_unknown = True
def moveAtoB(self, net, start, finish): self.current_vertex = start.name self.start_vertex = start.name self.finish_vertex = finish.name self.tracker = Tracker(self) self.report_velocity() self.drawer.carList.append(self.tracker) self.movement(net) self.drawer.carList.remove(self.tracker) self.removeCar() start.setUnallocated() finish.setUnallocated()
class TestTracker(unittest.TestCase): def setUp(self) -> None: self.cap = None self.tracker = Tracker(self.cap) def test_velocity_computation(self): self.tracker.position = ((75, 30), (46, 200)) self.assertEqual(self.tracker.get_velocity(), deque([None, (-58, 340)])) def test_acceleration_computation(self): self.tracker.velocity = ([None, (-58, 340), (46, 223)]) self.assertEqual(self.tracker.get_accel(), deque([None, (208, -234)]))
def __init__(self, algorithm): self.threshold_filter = ThresholdFilter( np.array([24, 125, 100], dtype=np.uint8), np.array([36, 255, 255], dtype=np.uint8) ) self.algo = algorithm() # Camera Setups self.left_tracker = Tracker(1) self.right_tracker = Tracker(2) self.horizontal_fov = 120.0 self.vertical_fov = 60.0 self.d = 100 self.centroid_algo = Centroid() self.left_transformed_image = np.copy( self.left_tracker.image ) self.right_transformed_image = np.copy( self.right_tracker.image ) self.valid = True
def __init__(self, argv): FvwmModule.__init__(self, argv) self.set_mask() self.send("Set_Mask 4294967295") self.send("Move 2963p 178p", window=0x3e00004) # pager sends this ## SetMessageMask(fd, ## M_VISIBLE_NAME | ## M_ADD_WINDOW| ## M_CONFIGURE_WINDOW| ## M_DESTROY_WINDOW| ## M_FOCUS_CHANGE| ## M_NEW_PAGE| ## M_NEW_DESK| ## M_RAISE_WINDOW| ## M_LOWER_WINDOW| ## M_ICONIFY| ## M_ICON_LOCATION| ## M_DEICONIFY| ## M_RES_NAME| ## M_RES_CLASS| ## M_CONFIG_INFO| ## M_END_CONFIG_INFO| ## M_MINI_ICON| ## M_END_WINDOWLIST| ## M_RESTACK); ## SetMessageMask(fd, ## MX_VISIBLE_ICON_NAME| ## MX_PROPERTY_CHANGE); self.register("M_CONFIGURE_WINDOW", self.ConfigureWindow) log("windowlist") #self.tracker = self.get_windowlist() self.tracker = Tracker() log("windowlist done") # for win in self.tracker.get_windows(): # log((win, win.name, win.x, win.y, win.width, win.height, win.desk)) self.send("Send_WindowList") # pager sends this self.send("NOP FINISHED STARTUP") self.lastSend = None
def main(): vid = 'OJ' cap = cv2.VideoCapture( os.path.abspath(os.path.dirname(__file__)) + '/%s.gif' % vid) if not cap.isOpened(): return -1 split = 50 _, frame = cap.read() size = get_size(frame) tracker = Tracker(split, size) init = False if vid == 'OJ': top_left = np.array(([120, 90])) bottom_right = np.array(([175, 124])) elif vid == 'bmw': top_left = np.array(([66, 140])) bottom_right = np.array(([125, 190])) while cap.isOpened(): ret, frame = cap.read() if not ret: break draw = frame.copy() if not init: draw = cv2.rectangle(draw, tuple(top_left), tuple(bottom_right), (0, 255, 0), thickness=2) tracker.set_reference(frame, top_left, bottom_right) else: draw = tracker.track(frame) cv2.imshow('fame', draw) duration = 100 if init else 0 key = cv2.waitKey(duration) if key == 27: break elif key == 32: init = True cap.release() cv2.destroyAllWindows()
class Experiment(): def __init__(self): logging.info('Init Experiment...') self.player = VideoPlayer() self.tracker = Tracker() self.tracker.start() pass def start_from_i(self, index, name, is_male, age): get_gender = lambda b: 'male' if b else 'female' userID = str(name) + get_gender(is_male) + str( age) + datetime.now().strftime('_%m%d%H%M%S') self.player.set_userID(userID) self.tracker.set_userID(userID) for i in range(index, self.player.video_sum): self.tracker.start_track_video_i(i) time.sleep(0.5) self.player.play_i(i) self.player.show_black() frame_sum, frame_failed = self.tracker.stop_current_track() if frame_sum > 0 and float(frame_failed) / frame_sum > 0.1: self.player.show_attention() self.player.stop() def start(self, name, is_male=True, age=22): self.start_from_i(0, name, is_male, age)
class Interactive(): # What model to download. MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017' MODEL_FILE = MODEL_NAME + '.tar.gz' DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/' # Path to frozen detection graph. This is the actual model that is used for the object detection. PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb' # List of the strings that is used to add correct label for each box. PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt') NUM_CLASSES = 90 def __init__(self): self.loadModel() def loadModel(self): self.tracker = Tracker() def load_image_into_numpy_array(self, image): (im_width, im_height) = image.size return np.array(image.getdata()).reshape( (im_height, im_width, 3)).astype(np.uint8) def getBox(self, boxes, scores, classes, width, height): myList = [] for i in range(len(boxes)): if (classes[i] == 0 and scores[i] > 0.5): # is this order correct i dont know...... x0 = boxes[i][1] y0 = boxes[i][0] x1 = boxes[i][3] y1 = boxes[i][2] myList.append([x0, y0, x1, y1, classes[i]]) return (myList) def objectDetection(self, TEST_IMAGE_PATH): result_list = [] self.tracker.start_model() image = cv2.imread(TEST_IMAGE_PATH[0]) height, width = image.shape[:2] image, out_boxes, out_scores, out_classes = self.tracker.get_single_image( image) result_list.append( self.getBox(out_boxes, out_scores, out_classes, width, height)) return (result_list)
def __init__(self, algorithm): self.threshold_filter = ThresholdFilter( np.array([24, 125, 100], dtype=np.uint8), np.array([36, 255, 255], dtype=np.uint8)) self.algo = algorithm() # Camera Setups self.left_tracker = Tracker(1) self.right_tracker = Tracker(2) self.horizontal_fov = 120.0 self.vertical_fov = 60.0 self.d = 100 self.centroid_algo = Centroid() self.left_transformed_image = np.copy(self.left_tracker.image) self.right_transformed_image = np.copy(self.right_tracker.image) self.valid = True
def main(): ap = argparse.ArgumentParser() ap.add_argument("-v", "--videopath", help="Path to video") ap.add_argument("-d", "--detector", help="Detection Algo") args = vars(ap.parse_args()) if (args["videopath"] == "0"): source = 0 elif (args["videopath"] == "1"): source = 1 else: source = args["videopath"] if (args["detector"] == "HG" or args["detector"] == "HC"): detector_algo = args["detector"] else: print " Detector algo not correct" quit() ############ Detection Part starts here ############## dtector = Detector(src=source, detector=detector_algo).start() while True: frame = dtector.read() frame = imutils.resize(frame, width=400) cv2.imshow("Detection", frame) key = cv2.waitKey(20) & 0xFF if key == 27: break dtector.stop() rect, img = dtector.get_roi() cv2.destroyAllWindows() # print rect ############ Detection Part ends here ############## ############ Tracking Part starts here ############## global stop_arduino_thread q = Queue() tracker = Tracker(rect, img, src=source).start() print tracker data = tracker.get_points() q.put(data) thread_arduino = Thread(target=send_arduino, args=(q, )) thread_arduino.start() while True: frame = tracker.read() frame = imutils.resize(frame, width=400) cv2.imshow("Frame", frame) data = tracker.get_points() q.put(data) key = cv2.waitKey(50) & 0xFF if key == 27: break stop_arduino_thread = True tracker.stop() cv2.destroyAllWindows()
def trackHeadTask(): global tracker if tracker is None: tracker = Tracker() file = request.files['file'] # imageBytes = base64.b64encode(file.read()) try: result_exc = celery_app.task.headsDet.s(file.read()).delay() heads = result_exc.get(timeout=15) if heads is None: return (jsonify([]), 200) heads1 = np.ones((len(heads), 7)) for i in range(len(heads)): head = heads[i] for j in range(len(head)): heads1[i, j] = head[j] trackMsg = tracker.traceHead(heads1) except Exception as e: trackMsg=[] return (jsonify(trackMsg), 200)
def main(): """ Application start """ logging.basicConfig(level=logging.INFO) get_module_logger().setLevel(logging.INFO) arg_parser = get_arg_parser() args = arg_parser.parse_args() tle, tle_provider = try_to_get_tle(args.tle) location = Location.get_location(args.location) tle_parser = TLEParser(location, tle) motor_control = ArduinoHardware(args.port, args.baudrate, True) tracker = Tracker(tle_provider, tle_parser, motor_control) tracker.run(int(args.azimuth), int(args.altitude))
def _analysis(self): parser=LogParser() parser.setRedundantLevel(self.args.level) l=parser.parse(self.args.trace) if self.args.c_project_dir is not None: macro_inspector=MacroInspector(self.args.c_project_dir) tracker=Tracker(l,macro_inspector) else: tracker=Tracker(l) for line in l: print str(l.index(line))+"#"+str(line) traceIndex=(len(l)+self.args.index)%len(l) vs=self.build_tiantvars_list() tracker.setStartJobs(traceIndex, vs) TG=tracker.track() output=file(self.args.output_path+"/"+self.args.name+".dot", 'w') print TG.serialize2dot() output.write(TG.serialize2dot()) output.close() subprocess.call("dot -Tsvg "+self.args.output_path+"/"+self.args.name+".dot -o "+self.args.output_path+"/"+self.args.name+".svg", shell = True)
def __init__(self, addr, t_reactor): self.reactor = t_reactor self.file_addr = addr self.metainfo = Metainfo(self.file_addr) self.tracker = Tracker(self, self.metainfo) self.handshake_message = Handshake(self.metainfo, self.tracker) self.file_handler = FileReadWrite(self.metainfo) self.requester = Requester(self, self.metainfo) self.bitfield = bitarray(0 * self.metainfo.no_of_pieces) self.peer_list = list() self.buildPeerList() self.protocol_factory = CoreTCP.PeerConnectionFactory(self) print self.peer_list
def do_all_preparations(sequence_name, result_path, debug_path, base_vot_path, params): sequence_path, result_path, debug_path = prepare_all_paths( sequence_name, result_path, debug_path, base_vot_path) polygon_matrix = prepare_poly_matrix(sequence_path) poly_array = polygon_matrix[0, :] image_paths = get_all_image_paths(sequence_path + 'color' + '/') handle_first_image(image_paths, result_path, poly_array) test_tracker = Tracker(uf.read_image(image_paths[0]), poly_array, None, **params) run_params_data = dict({ 'test_tracker': test_tracker, 'polygon_matrix': polygon_matrix, 'image_paths': image_paths, 'result_path': result_path }) Tracker_params['debug_images_path_temp'] = debug_path return run_params_data
class CompletionLister: def __init__(self, classifier): self.classifier = classifier self.tracker = Tracker() def scanDirs(self, directories): """Recursively walk directory to train classifier""" self.directories = directories for directory in directories: print "Scanning directory " + directory scanDir.scanDir( directory, lambda path, content: self._handleFile(path, content)) def _handleFile(self, path, content): self.classifier.train(path, content) self.tracker.scannedFile(path, content) def train(self, path, content, event): """Train the classifier using the given content""" isSubdir = False for directory in self.directories: canonDir = os.path.realpath(directory) canonPath = os.path.realpath(path) if canonPath.startswith(canonDir): isSubdir = True break if isSubdir: self.classifier.train(path, content) self.tracker.event(path, content, event) def analyze(self, input): """Return a list of completions ordered by probability""" self.classifier.train(input.path, input.content) self.tracker.analyze(input) return self.classifier.predict(input)[:50] def accepted(self, input, selection): """Train classifier based on what was actually selected""" # FixMe: [correctness] Train self.tracker.accepted(input, selection)
def addTracker(self): name = input('Enter tracker name: ') url = input('Enter tracker url: ') email = input('Enter email to notify: ') desirePrice = float(input('Enter desire price: ')) for web in self.__webs.keys(): if re.search(web, url): newTracker = Tracker(name, url, self.__webs[web], email, desirePrice) self.__trackers.append(newTracker) if self.__thread: self.__thread.updateTrackers(self.__trackers)
class CompletionLister: def __init__(self, classifier): self.classifier = classifier self.tracker = Tracker() def scanDirs(self, directories): """Recursively walk directory to train classifier""" self.directories = directories for directory in directories: print "Scanning directory " + directory scanDir.scanDir(directory, lambda path,content: self._handleFile(path, content)) def _handleFile(self, path, content): self.classifier.train(path, content) self.tracker.scannedFile(path, content) def train(self, path, content, event): """Train the classifier using the given content""" isSubdir = False for directory in self.directories: canonDir = os.path.realpath(directory) canonPath = os.path.realpath(path) if canonPath.startswith(canonDir): isSubdir = True break if isSubdir: self.classifier.train(path, content) self.tracker.event(path, content, event) def analyze(self, input): """Return a list of completions ordered by probability""" self.classifier.train(input.path, input.content) self.tracker.analyze(input) return self.classifier.predict(input)[:50] def accepted(self, input, selection): """Train classifier based on what was actually selected""" # FixMe: [correctness] Train self.tracker.accepted(input, selection)
def do_all_preparations(sequence_name, result_path, debug_path, base_vos_path, params): images_path, masks_path, result_path, debug_path = prepare_all_paths( sequence_name, result_path, debug_path, base_vos_path) image_paths = get_all_image_paths(images_path) mask_paths = get_all_image_paths(masks_path) initial_mask = uf.read_image(mask_paths[0]) / 255 initial_mask = np.expand_dims(initial_mask, axis=-1) if Tracker_params['save_images']: uf.save_mask_image(initial_mask, result_path, os.path.basename(image_paths[0])) test_tracker = Tracker(uf.read_image(image_paths[0]), None, initial_mask, **params) run_params_data = dict({ 'test_tracker': test_tracker, 'image_paths': image_paths, 'result_path': result_path }) Tracker_params['debug_images_path_temp'] = debug_path return run_params_data
# from SharedFile import SharedFile from Tracker import Tracker # print("1. Create first tracker and config file") # print("2. Create tracker from config file") # choice = input("->") # path = input("Enter path to file to share") # if choice == '1': # my_tracker = Tracker(path) #my_tracker = Tracker('/home/misterk/Desktop/lorem.txt') my_tracker = Tracker('/home/misterk/Desktop/lorem.txt', 1024, 'tracker_config.json')
from Tracker import Tracker x = Tracker() x.mainloop()
class DuckieTrackingScene(QGraphicsScene): def __init__(self, *args): self.log = Logs().getLog() super().__init__() self.point_scale = 20 self.distance_scale = 40 # self.label_y_offset = 10 self.DB = Database() self.tracker = Tracker(self.DB) self.track_unknown = True def update(self): self.tracker.getStationObjectsPositions() self.tracker.getTrackedObjectsPositions(self.track_unknown) self.clear() self.updateStations() self.updateTrackedObjects() def updateStations(self): self.log.debug("[+] Started updateStationLayout...") positions = [] for mac in self.tracker.STATION_MACS: response = self.DB.getNewestLocationByMac(mac) if (response == None): self.log.debug("[x] Failed updateStationLayout...") return positions.append([response["x"], response["y"]]) self.drawTriangle(positions) self.log.debug("[-] Completed updateStationLayout...") def updateTrackedObjects(self): self.log.debug("[+] Starting updateTrackedObjects...") for mac in self.tracker.TRACKING_MACS: response = self.DB.getNewestLocationByMac(mac) if (response == None): continue now = time.time() if ( now - response["timestamp"] > 3 ): # If more than 3 seconds has passed since this measurement has been taken, meaning it hasn't been detected again in 3 seconds continue self.log.debug("[*] Updating tracked objects...") pos = [response["x"], response["y"]] self.drawLabel(pos, response["mac"]) self.drawNode(pos) self.log.debug("[-] Completed updatingTrackedObjects...") def drawTriangle(self, positions): self.log.debug("[+] Starting drawTriangle...") self.log.debug("[?] Positions -- A: {0}, B: {1}, C: {2}".format( positions[0], positions[1], positions[2])) # We assume the A node is 0,0 for simplicity self.drawLabel(positions[0], "A") self.drawNode(positions[0]) # We assume the B node is north of the A node so the detection can be self organizing self.drawLabel(positions[1], "B") self.drawNode(positions[1]) # Draw third node self.drawLabel(positions[2], "C") self.drawNode(positions[2]) # Draw lines for all nodes [ self.drawLine(positions[x - 1], positions[x]) for x in range(len(positions)) ] self.log.debug("[-] Completed drawTriangle...") def drawNode(self, xy): # xy = [x, y] pen = QPen(Qt.black, 2) brush = QBrush(Qt.black) radius = 0.25 * self.point_scale xy = [x * self.distance_scale for x in xy] # x1, y1, x2, y2, pen, brush self.addEllipse(xy[0] - radius / 2, xy[1] - radius / 2, radius, radius, pen, brush) def drawLine(self, node1, node2): # node1 = [x, y], node2 = [x,y] pen = QPen(QPen(Qt.black, 2, Qt.DashLine)) # x1, y1, x2, y2, pen self.addLine(node1[0] * self.distance_scale, node1[1] * self.distance_scale, node2[0] * self.distance_scale, node2[1] * self.distance_scale, pen) def drawLabel(self, pos, text): pen = QPen(QPen(Qt.black, 1, Qt.SolidLine)) t = self.addText(text) t.setPos(pos[0] * self.distance_scale, pos[1] * self.distance_scale)
def __init__(self, classifier): self.classifier = classifier self.tracker = Tracker()
def run_machine_vision(): """ Manage both the detector (neural net) and the tracker in a seperate process than the animation. """ #use imutils.FileVideoStream to read video from a file for testing #vs = FileVideoStream('no_vis_light.mp4').start() #use imutils.VideoStream to read video from a webcam for testing vs = VideoStream(src=0).start() #Threaded application of PTGrey Camera-- Use PTCamera_Threaded #vs = PTCamera(resolution = video_dims).start() #Non-threaded application of PTGrey Camera #vs = PTCamera(resolution = video_dims) #Let the camera warm up and set configuration time.sleep(2) print("[INFO] loading model...") #create an insance of the detector net = Deep_Detector('deploy.prototxt.txt', 'res10_300x300_ssd_iter_140000.caffemodel', refresh_rate=2, confidence=.4) #initialize a tracker print("[INFO] initializing tracker") tracker = Tracker(quality_threshold=6) last_detector_update_time = time.time() current_time = time.time() tracking_face = False tracked_center = (0, 0) running = True start_machine_vision_time = time.time() #count = 0 #detector_count = 0 #check to make sure that the identified face is of a reasonable size; For the PTGrey Camera, I found ~50 works well. #other cameras will require other thresholds face_width_threshold = 200 no_frame_count = 0 while running: current_time = time.time() #Reading from the camera is I/O gating. frame = vs.read() if frame.all() != None: no_frame_count = 0 frame = imutils.resize(frame, width=300) if not tracking_face or current_time - last_detector_update_time > net.get_refresh_rate( ): last_detector_update_time = current_time tracking_face = run_detector(net, frame, tracker, face_width_threshold) #count += 1 #detector_count += 1 if tracking_face: #count += 1 track_quality = tracker.get_track_quality(frame) if track_quality >= tracker.get_quality_threshold(): run_tracker(tracker, frame) else: tracking_face = False #Wait sixteen milliseconds before looping again. OpenCV will freeze if this number #is too low or the waitKey call is omitted. If waitKey is called with no params, #the program will wait indefinitely for the user to hit a key before it #runs another loop; nice for debugging. #Quit the program if the user hits the "q" key on the keyboard if cv2.waitKey(16) == ord('q'): break else: no_frame_count += 1 if no_frame_count == 50: print( 'Received too many null frames; exiting machine_vision_subprocess' ) break end_machine_vision_time = time.time() #fps = count / (end_machine_vision_time - start_machine_vision_time) #print('Machine Vision fps: ' + str(fps)) vs.stop() cv2.destroyAllWindows()
#! /usr/bin/env python # -*- coding: utf-8 -*- __author__ = 'yong' from Tracker import Tracker xiami = 'http://dlsw.baidu.com/sw-search-sp/soft/3d/20621/XMusicSetup_2_0_2_1618.1394071033.exe' vbox = 'https://atlas.hashicorp.com/puphpet/boxes/centos65-x64/versions/20151130/providers/virtualbox.box' landen='https://raw.githubusercontent.com/getlantern/lantern-binaries/master/lantern-installer.exe' mysql='http://124.202.164.12/files/21550000086CE8B0/120.52.73.12/cdn.mysql.com//Downloads/MySQL-5.6/mysql-5.6.31-win32.zip' tracker = Tracker(vbox, num=6) tracker.start()
def __init__(self): UserInterface.kill_tracker() super(UserInterface, self).__init__() self.__scene_size = 880 self.setCentralWidget(QtWidgets.QWidget()) self.__main_layout = QtWidgets.QHBoxLayout() self.centralWidget().setLayout(self.__main_layout) self.setGeometry(0, 0, self.__scene_size + 420, self.__scene_size + 20) self.setWindowTitle('TimeTrackerGUI') self.show() self.__scroll_area = QtWidgets.QScrollArea() self.__scroll_area.setWidgetResizable(True) self.__main_layout.addWidget(self.__scroll_area) self.__button_layout = QtWidgets.QVBoxLayout() self.__main_layout.addLayout(self.__button_layout) self.sorting_button = QtWidgets.QPushButton("SORTING BY \nUSE TIME") self.sorting_button.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred) self.ascending_sorting_button = QtWidgets.QPushButton("DESCENDING\nSORTING") self.ascending_sorting_button.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred) self.hide_button = QtWidgets.QPushButton("HIDDEN\nINVISIBLE") self.hide_button.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred) self.favourites_button = QtWidgets.QPushButton("FAVOURITES\nOFF") self.favourites_button.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred) self.sorting_button.setStyleSheet("font: 10pt Arial") self.ascending_sorting_button.setStyleSheet("font: 10pt Arial") self.hide_button.setStyleSheet("font: 10pt Arial") self.favourites_button.setStyleSheet("font: 10pt Arial") self.__button_layout.addWidget(self.sorting_button) self.__button_layout.addWidget(self.ascending_sorting_button) self.__button_layout.addWidget(self.hide_button) self.__button_layout.addWidget(self.favourites_button) res_x = 2560 res_y = 1440 self.move(int(res_x / 2) - int(self.frameSize().width() / 2), int(res_y / 2) - int(self.frameSize().height() / 2)) self.tracked = Tracker.get_tracked_applications() self.scroll_area_widget = QtWidgets.QWidget() self.__scroll_area.setWidget(self.scroll_area_widget) self.scroll_area_layout = QtWidgets.QVBoxLayout(self.scroll_area_widget) self.scroll_area_table = QtWidgets.QTableWidget(len(self.tracked), 7) self.scroll_area_layout.addWidget(self.scroll_area_table) self.scroll_area_table.setHorizontalHeaderLabels(["Executable", "Name", "First used", "Last used", "Use time (hours)", "Favourite", "Hidden"]) self.sorting_mode = 4 # from 1 to 4 self.sorting_descriptions = ["NAME", "FIRST USED", "LAST USED", "USE TIME"] self.ascending_sorting = False self.favourites_only = False self.hide_hidden = True row = 0 for application in self.tracked: app = self.tracked[application] exe_name = QtWidgets.QTableWidgetItem(application) name = QtWidgets.QTableWidgetItem(app.name) started = QtWidgets.QTableWidgetItem(app.started.strftime("%m/%d/%Y, %H:%M:%S")) ended = QtWidgets.QTableWidgetItem(app.last.strftime("%m/%d/%Y, %H:%M:%S")) time_displayed = round(app.use_time / 3600, 2) use_time = QtWidgets.QTableWidgetItem() use_time.setData(QtCore.Qt.DisplayRole, time_displayed) favourite = QtWidgets.QTableWidgetItem(str(app.favourite)) hidden = QtWidgets.QTableWidgetItem(str(app.hidden)) if self.hide_hidden and app.hidden: self.scroll_area_table.hideRow(row) exe_name.setFlags(started.flags() ^ (QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEditable)) started.setFlags(started.flags() ^ (QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEditable)) ended.setFlags(ended.flags() ^ (QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEditable)) use_time.setFlags(use_time.flags() ^ (QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEditable)) favourite.setFlags(favourite.flags() ^ (QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEditable)) hidden.setFlags(hidden.flags() ^ (QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEditable)) self.scroll_area_table.setItem(row, 0, exe_name) self.scroll_area_table.setItem(row, 1, name) self.scroll_area_table.setItem(row, 2, started) self.scroll_area_table.setItem(row, 3, ended) self.scroll_area_table.setItem(row, 4, use_time) self.scroll_area_table.setItem(row, 5, favourite) self.scroll_area_table.setItem(row, 6, hidden) row += 1 self.scroll_area_table.horizontalHeader().setStretchLastSection(True) self.scroll_area_table.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch) self.sort() self.scroll_area_table.clicked.connect(self.cell_clicked) self.sorting_button.clicked.connect(self.change_sorting) self.ascending_sorting_button.clicked.connect(self.change_ascending_sorting) self.hide_button.clicked.connect(self.toggle_hiding) self.favourites_button.clicked.connect(self.toggle_favourites) atexit.register(self.save_changes) atexit.register(UserInterface.start_tracker)
def save_changes(self): for row in range(self.scroll_area_table.width()): if self.scroll_area_table.item(row, 0) is not None: self.tracked[self.scroll_area_table.item(row, 0).text()].name = \ self.scroll_area_table.item(row, 1).text() Tracker.write_times(self.tracked)
matplotlib.pyplot.scatter(destination_points[:, 0], destination_points[:, 1], marker="x", color="red", s=200) write_location = 'test_images_output/destination_points_' + str( file_name)[len('test_images/'):] matplotlib.pyplot.savefig(write_location) matplotlib.pyplot.close('all') window_width = 30 window_height = 40 # set up the overall class to do all the tracking curve_centers = Tracker(Mywindow_width=window_width, Mywindow_height=window_height, Mymargin=55, Mysmooth_factor=1) window_centroids = curve_centers.find_window_centroids(binary_bird_view) # points used to draw all the left and the right windows l_points = np.zeros_like(binary_bird_view) r_points = np.zeros_like(binary_bird_view) # Points used to find the left and right lanes rightx = [] leftx = [] # go through each level and draw the windows for level in range(0, len(window_centroids)): # window_mask is a function to draw window areas leftx.append(window_centroids[level][0])
def main(): stitcher = Stitcher() if config_scale: background = cv2.imread('images/background_scaled.jpg') else: background = cv2.imread('images/background.jpg') transformer = Transformer(config_scale) cap_left = cv2.VideoCapture(videos_path + videos[0]) cap_mid = cv2.VideoCapture(videos_path + videos[1]) cap_right = cv2.VideoCapture(videos_path + videos[2]) frame_width = int(cap_mid.get(cv.CV_CAP_PROP_FRAME_WIDTH)) frame_height = int(cap_mid.get(cv.CV_CAP_PROP_FRAME_HEIGHT)) frame_count = int(cap_mid.get(cv.CV_CAP_PROP_FRAME_COUNT)) init_points = {'C0': (71, 1153), \ 'R0': (80, 761), 'R1': (80, 1033), 'R2': (95, 1127), 'R3': (54, 1156), 'R4': (65, 1185), 'R5': (61, 1204), 'R6': (56, 1217), 'R7': (69, 1213), 'R8': (67, 1253), 'R9': (75, 1281), 'R10': (92, 1347), \ 'B0': (71, 1409), 'B1': (72, 1016), 'B2': (47, 1051), 'B3': (58, 1117), 'B4': (74, 1139), 'B5': (123, 1156), 'B6': (61, 1177), 'B7': (48, 1198), 'B8': (102, 1353)} points = init_points.values() tracker = Tracker(background, config_scale, init_points.values()) # cap_left.set(cv.CV_CAP_PROP_POS_FRAMES, 1400) # cap_mid.set(cv.CV_CAP_PROP_POS_FRAMES, 1400) # cap_right.set(cv.CV_CAP_PROP_POS_FRAMES, 1400) for fr in range(frame_count): print(fr) status_left, frame_left = cap_left.read() status_mid, frame_mid = cap_mid.read() status_right, frame_right = cap_right.read() scaled_size = (frame_width / image_down_scale_factor, frame_height / image_down_scale_factor) frame_left = cv2.resize(frame_left, scaled_size) frame_mid = cv2.resize(frame_mid, scaled_size) frame_right = cv2.resize(frame_right, scaled_size) # Adjust the brightness difference. frame_mid = cv2.convertScaleAbs(frame_mid, alpha=0.92) if status_left and status_mid and status_right: warped_left_mid = stitcher.stitch(frame_mid, frame_left, H_left_mid) warped_left_mid_right = stitcher.stitch(warped_left_mid, frame_right, H_mid_right) warped_left_mid_right_cropped = crop_img(warped_left_mid_right) # plt.imshow(warped_left_mid_right_cropped) # plt.show() # cv2.waitKey(0) points = tracker.tracking(warped_left_mid_right_cropped) for i in range(len(points)): cv2.circle(warped_left_mid_right_cropped, (points[i][1], points[i][0]), 3, (0, 0, 255), -1) height, width = warped_left_mid_right_cropped.shape[:2] warped_left_mid_right_cropped = cv2.resize(warped_left_mid_right_cropped, (width / 2, height / 2)) cv2.imshow('Objects', warped_left_mid_right_cropped) cv2.waitKey(1) # background = transformer.transform(points) # plt.imshow(warped_left_mid_right_cropped) # plt.show() # cv2.imshow('Objects', background) # cv2.waitKey(30) cv2.waitKey(0) cv2.destroyAllWindows() cap_left.release() cap_mid.release() cap_right.release()
from __future__ import division, print_function import numpy as np from Tracker import Tracker i = 1 def main(points): global i A = np.array([[p.x for p in points], [p.y for p in points], [p.t for p in points]]).T np.save(str(i) + ".npy", A) i += 1 if __name__ == '__main__': Tracker(main, 20)
class Ball3DTracker(): def __init__(self, algorithm): self.threshold_filter = ThresholdFilter( np.array([24, 125, 100], dtype=np.uint8), np.array([36, 255, 255], dtype=np.uint8) ) self.algo = algorithm() # Camera Setups self.left_tracker = Tracker(1) self.right_tracker = Tracker(2) self.horizontal_fov = 120.0 self.vertical_fov = 60.0 self.d = 100 self.centroid_algo = Centroid() self.left_transformed_image = np.copy( self.left_tracker.image ) self.right_transformed_image = np.copy( self.right_tracker.image ) self.valid = True def captureImageFrame(self): self.left_tracker.captureImageFrame() self.right_tracker.captureImageFrame() self.valid = self.left_tracker.valid and self.right_tracker.valid if self.valid: de_kernel = np.ones([3,3], dtype=np.uint8) self.left_transformed_image = np.copy( self.left_tracker.image ) self.left_transformed_image = cv2.resize(self.left_transformed_image, (0,0), self.left_transformed_image, 0.5, 0.5, cv2.INTER_LANCZOS4) self.left_transformed_image = cv2.GaussianBlur(self.left_transformed_image, (9,9), 2, self.left_transformed_image, 2) self.left_transformed_image = cv2.cvtColor(self.left_transformed_image, cv2.COLOR_BGR2HSV) self.left_transformed_image = self.threshold_filter(self.left_transformed_image) self.left_transformed_image = cv2.erode(self.left_transformed_image, de_kernel, iterations=1) self.left_transformed_image = cv2.dilate(self.left_transformed_image, de_kernel, iterations=1) self.left_tracker.centroid = self.centroid_algo(self.left_transformed_image) self.left_transformed_image = cv2.cvtColor(self.left_transformed_image, cv2.cv.CV_GRAY2BGR) lds_centroid = self.left_tracker.centroid[:] self.left_tracker.centroid = tuple([2*i for i in self.left_tracker.centroid]) cv2.circle(self.left_transformed_image, lds_centroid, 2, (255, 0, 0), -1) cv2.circle(self.left_tracker.image, self.left_tracker.centroid, 2, (255, 0, 0), -1) self.right_transformed_image = np.copy( self.right_tracker.image ) self.right_transformed_image = cv2.resize(self.right_transformed_image, (0,0), self.right_transformed_image, 0.5, 0.5, cv2.INTER_LANCZOS4) self.right_transformed_image = cv2.GaussianBlur(self.right_transformed_image, (9,9), 2, self.right_transformed_image, 2) self.right_transformed_image = cv2.cvtColor(self.right_transformed_image, cv2.COLOR_BGR2HSV) self.right_transformed_image = self.threshold_filter(self.right_transformed_image) self.right_transformed_image = cv2.erode(self.right_transformed_image, de_kernel, iterations=1) self.right_transformed_image = cv2.dilate(self.right_transformed_image, de_kernel, iterations=1) self.right_tracker.centroid = self.centroid_algo(self.right_transformed_image) self.right_transformed_image = cv2.cvtColor(self.right_transformed_image, cv2.cv.CV_GRAY2BGR) rds_centroid = self.right_tracker.centroid[:] self.right_tracker.centroid = tuple([2*i for i in self.right_tracker.centroid]) cv2.circle(self.right_transformed_image, rds_centroid, 2, (255, 0, 0), -1) cv2.circle(self.right_tracker.image, self.right_tracker.centroid, 2, (255, 0, 0), -1) midpoint = self.compute_transform() print midpoint def compute_transform(self): # NOT CURRENTLY VALID DATA l_centroid = self.left_tracker.centroid r_centroid = self.right_tracker.centroid # Compute the camera sizes l_width = self.left_tracker.image.shape[0] l_height = self.left_tracker.image.shape[1] r_width = self.left_tracker.image.shape[0] r_height = self.left_tracker.image.shape[1] # Compute euler angles theta_l = ((float(l_centroid[0])/l_width)-0.5)*self.horizontal_fov phi_l = ((float(l_centroid[1])/l_height)-0.5)*self.vertical_fov theta_r = ((float(r_centroid[0])/r_width)-0.5)*self.horizontal_fov phi_r = ((float(r_centroid[1])/r_height)-0.5)*self.vertical_fov left_line = Line([sin(theta_l), cos(theta_l)*cos(phi_l), sin(theta_l)*cos(phi_l)], [0., -float(self.d)/2., 0.]) right_line = Line([sin(theta_r), cos(theta_r)*cos(phi_r), sin(theta_r)*cos(phi_r)], [0., float(self.d)/2., 0.]) midpoint = Line.point_between_lines(left_line, right_line) return midpoint
import sys import os sys.path.append('..'+os.sep+'python'+os.sep+'src') from Tracker import Tracker m = Tracker(); m.boot();
def feed_pkt(self, pkt): log("track: %s" % pkt) return Tracker.feed_pkt(self, pkt)
class Win3d(FvwmModule): def send(self, command, window=WINDOW_NONE, cont=1): log("send: %s" % command) return FvwmModule.send(self, command, window, cont) def get_windowlist(self): #debug version t = DebugTracker() def callback(self, pkt, tracker=t): if Types[pkt.msgtype] == 'M_END_WINDOWLIST':self.stop() else: tracker.feed_pkt(pkt) cache = self._FvwmModule__override(callback) self.send('Send_WindowList');self.start();self.__restore(cache) return t def __init__(self, argv): FvwmModule.__init__(self, argv) self.set_mask() self.send("Set_Mask 4294967295") self.send("Move 2963p 178p", window=0x3e00004) # pager sends this ## SetMessageMask(fd, ## M_VISIBLE_NAME | ## M_ADD_WINDOW| ## M_CONFIGURE_WINDOW| ## M_DESTROY_WINDOW| ## M_FOCUS_CHANGE| ## M_NEW_PAGE| ## M_NEW_DESK| ## M_RAISE_WINDOW| ## M_LOWER_WINDOW| ## M_ICONIFY| ## M_ICON_LOCATION| ## M_DEICONIFY| ## M_RES_NAME| ## M_RES_CLASS| ## M_CONFIG_INFO| ## M_END_CONFIG_INFO| ## M_MINI_ICON| ## M_END_WINDOWLIST| ## M_RESTACK); ## SetMessageMask(fd, ## MX_VISIBLE_ICON_NAME| ## MX_PROPERTY_CHANGE); self.register("M_CONFIGURE_WINDOW", self.ConfigureWindow) log("windowlist") #self.tracker = self.get_windowlist() self.tracker = Tracker() log("windowlist done") # for win in self.tracker.get_windows(): # log((win, win.name, win.x, win.y, win.width, win.height, win.desk)) self.send("Send_WindowList") # pager sends this self.send("NOP FINISHED STARTUP") self.lastSend = None def start(self): self._Fvwm__done = 0 while not self._Fvwm__done: t = time.time() if t - .1 > self.lastSend: log("mv") self.lastSend = t self.send("Move %dp 178p" % (2960+math.sin(t)*150), window=0x3e00004) time.sleep(.01) self.do_dispatch() def RaiseWindow(self, pkt): pass #self.unhandled_packet(pkt) #log(("raise", pkt.db_entry, pkt.top_id, pkt.frame_id)) def NewPage(self, pkt): self.unhandled_packet(pkt) log((pkt.desk,)) def AddWindow(self, p): if p.top_id == p.frame_id == 1: return self.unhandled_packet(p) log(("add", p.x, p.y, p.width, p.height, p.top_id, p.frame_id)) def ConfigureWindow(self, p): self.unhandled_packet(p) log(("cfg", p.x, p.y, p.width, p.height, p.top_id, p.frame_id)) def DestroyWindow(self, p): log(("destroy", p.top_id, p.frame_id, p.db_entry)) def unhandled_packet(self, pkt): log(("up", pkt)) self.tracker.feed_pkt(pkt)
def loadModel(self): self.tracker = Tracker()
from Tracker import Tracker import os if __name__ == "__main__": # change current directory to the file's directory if os.path.dirname(__file__) != "": try: os.chdir(os.path.dirname(__file__)) except OSError: pass Tracker().start()