def work(self): Worker.work(self) oldRawConfig = "" while self.running: rawConfig = open(self.configFile).read() if rawConfig != oldRawConfig: self.say("Updating config to: %s" % rawConfig) self.sharedConfig.value = rawConfig oldRawConfig = rawConfig #TODO: replace this with an event time.sleep(10)
def main(): size = 100 generate_simplex('simplex.map', size, seed=random.randint(0, 1000000)) WORLD.load('simplex.map', WORLD) WORLD.workers.append(Worker(size // 2 + 4, size // 2, WORLD)) WORLD.workers.append(Worker(size // 2 + 4, size // 2 + 1, WORLD)) WORLD.workers.append(Worker(size // 2 - 3, size // 2, WORLD)) WORLD.workers.append(Worker(size // 2 - 3, size // 2 + 1, WORLD)) # Run the game pyglet.app.run()
def add_server(monitor, server_config, server_state, stat_window): hostname = utils.ssh_exec_command('hostname', **server_config) host, user, password = server_config['host'], server_config[ 'username'], server_config['password'] stats = VarnishStats(host, user, password, hostname, server_state, stat_window) stat_name = 'Stats [{0}]'.format(host) monitor.add_worker(Worker(stat_name, stats)) health_name = 'Health [{0}]'.format(host) health = VarnishHealth(host, user, password, hostname, server_state) monitor.add_worker(Worker(health_name, health))
def empty_task(self, task): """ 清空某任务,采用lazy的方式。 使用一个什么都不干的worker收拾任务。 另:还可以用unregister_task注销任务, 两种效果都不太好,顾需要实际测试处理. """ def callback(worker, job): print "Efforts to clean up the list..." return json.dumps({'a': 'a'}) worker = Worker(self.host_list) worker.register_task(task, callback) worker.safely_work()
def Opt_KG_experiment(): """ Run experiment on the RTE data set :return: accuracy """ # initialize the prior parameters a0 b0 of the dataset data_file = 'rte.standardized.tsv' init_a0 = 1 init_b0 = 1 # sourcedata = DataSource(data_file, init_a0, init_b0) # initialize the prior parameters c0 d0 of the workers init_c0 = 4 init_d0 = 1 # workers = Worker(data_file, init_c0, init_d0) # Given Budget T Budget_T = np.arange(0,8000,100) # accuracy result of experiment each time accuracy_ = [] # run experiment limited to the given budget T_ for T_ in Budget_T: accuracy_sum = 0 for i in range(0,1): sourcedata = DataSource(data_file, init_a0, init_b0) workers = Worker(data_file, init_c0, init_d0) Opt_KG = Algorithm(sourcedata, workers, T_) H_T, H_complement = Opt_KG.run_Opt_KG() # the number that the positive and negative set result derived from the experiment is accordance with the real data result = 0 # get H* and H*c H_star, H_star_c = sourcedata.get_H_star() for idx in H_T: if idx in H_star: result = result + 1 for idx in H_complement: if idx in H_star_c: result = result + 1 # calculate the accuracy_sum accuracy_sum = accuracy_sum + result / 800 # calculate the accuracy accuracy_mean = accuracy_sum / 1 accuracy_.append(accuracy_mean) # print the accuracy result on the console print('the length of H_t is:' + str(len(H_T)) + ', the length of H_t_c is:' + str(len(H_complement))) print('the length of H* is:' + str(len(H_star)) + ', the length of H*_c is:' + str(len(H_star_c))) print('the length of result is:' + str(result)) print('Budget ' + str(T_) + ' and the accuracy is ' + str(accuracy_[-1])) print('*' * 40) # save the beta distribution dictionary save_beta_dic() # plot plt.figure() plt.plot(Budget_T, accuracy_, color = 'red', linewidth = 2.0, marker = 'D', fillstyle = 'full') plt.xlabel('Budget') plt.ylabel('accuracy') # set y-axis locations and labels plt.yticks(np.arange(0,1,0.05)) plt.title('Opt-KG on RTE') plt.show()
def _open_dialog(self, target: str): wb_callback = getattr(self, f"set_{target}_wb") name = QFileDialog.getOpenFileName(self, 'Open file', './')[0] if not name: return elif name.split(".")[-1] not in ("xlsx", "xlsm", "xlsb", "xls"): QMessageBox.critical(self, 'error', '엑셀 파일이 아닙니다.') return self.create_button.setDisabled(True) self.pg_thread.toggle_status() setattr(self, f'{target}_thread', QtCore.QThread()) setattr(self, f'{target}_worker', Worker(name, self.text_browser)) worker: Worker = getattr(self, f'{target}_worker') thread: QtCore.QThread = getattr(self, f'{target}_thread') worker.moveToThread(thread) thread.started.connect(worker.run) worker.finished.connect(thread.quit) worker.finished.connect(self.pg_thread.toggle_status) worker.failed.connect(lambda: QMessageBox.critical( self, 'error', '해당 파일이 존재하지 않거나 잘못된 파일입니다.')) worker.workbook.connect(wb_callback) worker.finished.connect(worker.deleteLater) thread.finished.connect(thread.deleteLater) thread.start() self._set_edit_text(target, name) setattr(self, f"{target}_name", name)
def get_worker(self): m = self.menu fname = m.nameText.text() lname = m.lnameText.text() phone = m.phoneText.text() skills = self.get_skills() score = 0 return Worker().to_worker(0, fname, lname, phone, skills, score)
def run(self): """ Method called by run_app to start processing the queue """ num_workers = multiprocessing.cpu_count() logger.info('Starting %d workers...' % num_workers) for ii in range(0, num_workers): # Create and store worker = Worker(id=ii) self.workers.append(worker) # Start the process worker.daemon = True worker.start()
def _test_algorithm(): filename = 'rte.standardized.tsv' a0 = 1 b0 = 1 datasource = DataSource(filename, a0, b0) workers = Worker(filename, 4, 1) Budget = 6400 Opt_KG = Algorithm(datasource, workers, Budget) H_T = Opt_KG.run_Opt_KG() print(H_T)
def main(config, static_path): hostname = 'testing' monitor = WorkerMonitor() server_state = ServerState() if not config['test']: for server_config in config['servers']: add_server(monitor, server_config, server_state, config['stat_window']) print('Started gathering varnish data') else: from tests.data_generator import DummyDataGenerator dummydata = DummyDataGenerator(server_state) worker = Worker('Testing', dummydata) monitor.add_worker(worker) Worker('WorkerMonitor', monitor).start() http_server.start(server_state, static_path, config['port'], config['wsgi_server']) print('Simverest stopping...') monitor.stop()
def default_worker_factory(job_queue): return Worker(job_queue)
def __init__(self, config, sharedConfig, statusQueue): Worker.__init__(self, 'Config updater', statusQueue) self.configFile = config self.sharedConfig = sharedConfig
def __init__(self, jobs, toolbox_factory): self.toolbox = toolbox_factory() Worker.__init__(self, jobs)
def poller_thread(self): worker = Worker(self.poll_plc) self.threadpool.start(worker)
def poller_thread(self): worker = Worker(self.poll_plc) #worker.signals.result.connect(self.print_output) #worker.signals.finished.connect(self.thread_complete) #worker.signals.progress.connect(self.progress_fn) self.threadpool.start(worker)
prep_queue = Queue('PrepQueue', Manager().Queue()) db_queue = Queue('DBQueue', Manager().Queue()) # Create processes pool = Pool() default_file_handler = DefaultFileHandler() for i in range(1, 3): file_worker = FileWorker(f'File Worker{i}', file_queue) file_worker.register_handler(default_file_handler) file_worker.register_result_queue(prep_queue) pool.apply_async(file_worker.process) default_prep_handler = DefaultPrepHandler() db_prep_handler = DBPrepHandler() for i in range(1, 3): prep_worker = Worker(f'Prep Worker{i}', prep_queue) prep_worker.register_handler(default_prep_handler) prep_worker.register_handler(db_prep_handler) prep_worker.register_result_queue(db_queue) pool.apply_async(prep_worker.process) orm_handler = ORMHandler() db_handler = DBHandler() for i in range(1, 5): db_worker = Worker(f'DB Worker{i}', db_queue) db_worker.register_handler(orm_handler) db_worker.register_handler(db_handler) pool.apply_async(db_worker.process) pool.close()