def __init__(self, carla_server='127.0.0.1', port=2000): pygame.init() self.intitalize_carla(carla_server, port) self.initialize_navigation() self.initialize_vehicle() self.initialize_game_manager() self.initialize_sensor_manager() self.initialize_control_manager() self.initialize_reward_system() self.initialize_variables() self.type = Type.Automatic self.running = True self.rendering = True self.respawn_pos_times = 0 self.key_control = False self.collision_vehicle = False self.traffic_controller = traffic_controller.TrafficController( self, 80) self.traffic_controller.add_vehicles() self.lane_ai = lane_ai.LaneAI(self) #need to change from here self.navigation_system.make_local_route() self.agent = basic_agent.BasicAgent(self.vehicle_controller.vehicle) # drawing_library.draw_arrows(self.world.debug,[i.location for i in self.navigation_system.ideal_route]) # drawing_library.print_locations(self.world.debug,[i.location for i in self.navigation_system.ideal_route]) # self.add_npc() self.world.tick() self.world.wait_for_tick() self.data_collector = data_collector.DataCollector(self) # self.collision_collector = data_collector.CollisionCollector(self) # self.free_road = ai_model.FreeRoad(self.vehicle_controller.control) self.last_stop = pygame.time.get_ticks() self.cnt = 0 self.collide_cnt = 0
def __init__(self): self.urlManager = url_manager.UrlManager() self.parser = html_parser.HtmlParser() self.downloader = html_downloader.HtmlDownloader() self.collector = data_collector.DataCollector() self.lock = threading.Lock() #线程锁 self.local_crawed = threading.local() #创建全局ThreadLoacl对象,让每个线程拥有自己的数据。 self.count = 0 #全局爬取页面计数
def client(q1, q2, tasks, count, processor_nums): print("启动工作进程%s" % os.getpid()) #初始化 parser = html_parser.HtmlParser() downloader = html_downloader.HtmlDownloader() collector = data_collector.DataCollector() crawed_nums = 0 while True: url = q1.get() response = downloader.download(url) new_urls, json_data = parser.parse(response) for new_url in new_urls: q2.put(new_url) if json_data: collector.collect_data(json_data) count.value = count.value + 1 print('进程%s成功抓取第%s个页面' % (os.getpid(), count.value)) crawed_nums = crawed_nums + 1 else: print('fail url: %s' % url) # 打印失败的url if count.value >= (tasks - processor_nums + 1): print("%s爬取的网页数量:%s" % (os.getpid(), crawed_nums)) break
def __init__(self): self.urlManager = url_manager.UrlManager() self.parser = html_parser.HtmlParser() self.downloader = html_downloader.HtmlDownloader() self.collector = data_collector.DataCollector()
def main(dist_constant: int = None, file_path: str = '../data/matches.pkl', hero_weight: float = 0.05): if dist_constant is not None: print('main(): dist_constant is: ') print('main(): Data file path: ' + file_path) print('main(): hero weight: ' + str(hero_weight)) collector = data_collector.DataCollector(data_file_path=file_path) list_of_dota_matches = collector.read_dota_matches_from_file() print('main(): DataCollector found: ' + str(len(list_of_dota_matches)) + ' matches from file') hero_names_dict = data_collector.get_hero_names() # for match_dict in list_of_match_dicts: # if match_dict['avg_mmr'] is not None: # try: # match = DotaMatch(match_dict, hero_names_dict) # list_of_dota_matches.append(match) # print(str(match)) # except json.decoder.JSONDecodeError: # print('Caught json.decoder.JSONDecodeError Exception, ignoring match...') # print('Query returned ' + str(len(list_of_dota_matches)) + ' matches') # Create the initial cluster that contains all matches that were read from file initial_cluster = clustering.Cluster(matches=list_of_dota_matches, hero_names=hero_names_dict) initial_cluster.print_center() max_dist = 0 num_of_matches = len(initial_cluster.matches) farthest_cluster_pair = 0, 0 avg_dist = 0 num_of_disjoint_pairs = 0 if dist_constant is None: # Get all disjoint pair distances, and find the average distance between points as well as the maximum distance for i in range(num_of_matches): for j in range(i + 1, num_of_matches): dist = clustering.get_distance(initial_cluster.matches[i], initial_cluster.matches[j], hero_weight=hero_weight) avg_dist += dist num_of_disjoint_pairs += 1 if dist > max_dist: max_dist = dist farthest_cluster_pair = i, j # print('Distance is: ' + str(dist)) avg_dist = (avg_dist / num_of_disjoint_pairs) print('Average distance between Dota matches: ' + str(avg_dist) + ' max dist is: ' + str(max_dist)) else: avg_dist = dist_constant print('Using dist constant of: ' + str(avg_dist) + ' max dist is: ' + str(max_dist)) divisive_clustering_clusters = [] divisive_start_time = time.time() # Run divisive clustering with the initial cluster and use the avg_dist as the "user-defined" constant # Time the computation time clustering.run_divisive_clustering(initial_cluster, hero_names_dict, int(avg_dist), divisive_clustering_clusters, hero_weight=hero_weight) divisive_end_time = time.time() suggested_value_of_k = len(divisive_clustering_clusters) print('Num of clusters found in divisive clustering: ' + str(len(divisive_clustering_clusters)) + ' Time of computation: %s seconds. About to print centers...' % (divisive_end_time - divisive_start_time)) for cluster in divisive_clustering_clusters: print('Number of matches in cluster: ' + str(len(cluster.matches))) cluster.print_center() reclustering_start_time = time.time() # Run K-means using the value of k found by divisive clustering, and using the clusters already found in that step # Time the computation time k_means_clusters = clustering.k_means( hero_names_dict=hero_names_dict, num_of_clusters=suggested_value_of_k, final_clusters=divisive_clustering_clusters, hero_weight=hero_weight) reclustering_end_time = time.time() print('K-Means with re-clustering found clusters for k=' + str(len(k_means_clusters)) + ' In %s seconds. About to print centers...' % (reclustering_end_time - reclustering_start_time)) for cluster in k_means_clusters: print('Number of matches in cluster: ' + str(len(cluster.matches))) cluster.print_center() random_start_time = time.time() # In order to compare computation time between k_means_random_clusters = clustering.k_means( hero_names_dict=hero_names_dict, num_of_clusters=suggested_value_of_k, matches=list_of_dota_matches, hero_weight=hero_weight) random_end_time = time.time() print('K-Means with random initialization found clusters for k=' + str(len(k_means_random_clusters)) + ' In %s seconds. About to print centers...' % (random_end_time - random_start_time)) for cluster in k_means_random_clusters: print('Number of matches in cluster: ' + str(len(cluster.matches))) cluster.print_center()
def run(self): run_success = False if self.args.url: # URL save if not os.path.exists('config'): os.makedirs('config') self.config_data['url'] = self.args.url self.save_config(self.config_data) print("\nConfig file successfully updated!\n") run_success = True if self.args.collect: # Collector try: self.read_config() except FileNotFoundError: logging.critical("Config file not found. Please run with -u") print("\nERROR: Config file not found. Please run with -u") sys.exit(1) collector = data_collector.DataCollector( self.config_data.get('url')) try: collector.run_collector() except ConnectionError: logging.critical( "URL is invalid or the connection was refused") print( "\nERROR: URL is invalid or the connection was refused\n") sys.exit(1) except ValueError: print( "\nERROR: Invalid machine type detected. Please send log to developer" ) logging.critical( "Invalid machine type detected. Please send log to developer" ) logging.debug(self.config_data.get('url')) sys.exit(1) print("\nData collection complete!\n") run_success = True if self.args.compile: # Compiler if self.args.output: self.compile_OF = self.args.output compiler = compile_data.CompileData(self.compile_OF) try: compiler.run_compiler() except FileNotFoundError: print( "\nERROR: Data files not found (have you run the collector yet?)" ) logging.critical( "Data files not found (have you run the collector yet?)") sys.exit(1) print("\nData compiled and saved to " + self.compile_OF) run_success = True if not run_success: # Fail print("\nERROR: Invalid arguments\n") logging.critical("Invalid arguments") sys.exit(1)
def init_collector_set(): return { "mfcc": dc.DataCollector("mfcc computation time", "ms", 3), "inference": dc.DataCollector("inference time", "ms", 3), "process": dc.DataCollector("overall process time", "ms", 3), }
def setUp(self): self.test_listener = data_collector.DataCollector() self.setup_mocks()
import data_collector import dbutils import time db = dbutils.DB() while True: ip_ranges = db.get_ip_ranges() # ip_ranges = [['172.16.1.0',1,254],['172.16.3.0',1,254], # ['172.16.4.0',1,254],['172.16.109.0',1,254], # ['172.16.110.0',1,254],['172.16.111.0',1,254]] system_variables = db.get_system_variable() # system_variables = {'timeout':10 , 'timer':1} print(ip_ranges) for range in ip_ranges: network_part = str(range[0]).split('.')[0] + '.' + str( range[0]).split('.')[1] + '.' + str(range[0]).split('.')[2] start_ip = range[1] end_ip = range[2] # print(network_part,start_ip,end_ip) datacollector = data_collector.DataCollector( network_part, start_ip, end_ip, 'admin', 'gn123125', int(system_variables['timeout'])) time.sleep(3) time.sleep(int(system_variables['timer']) * 60)