class Controller(Parent): def models(self): self.workers = Worker(self.config) # BELOW THIS LINE ARE ALL CONTROLLER ACTIONS def new(self, body, resp): worker = self.workers.new() machine = ProcHandler(self.config, Writer(self.config)) machine.start() worker.CONTROLQUEUE = machine.stopqueue worker.OUTPUTQUEUE = machine.subproc.queue self.workers.save(worker) resp.respond(worker.OUTPUTQUEUE) def delete(self, body, resp): worker = self.workers.find(body.data) pub = SimplePublisher( ''. self.config['Rabbit']['host'], self.config['Rabbit']['username'], self.config['Rabbit']['passwords']) pub.publish_msg(self, 'STOP', work.CONTROLQUEUE) resp.respond('DELETED')
def test_no_wake_up_call_for_delayed(self): from machine_midwife import MachineMidwife Apprentice = MachineMidwife.Apprentice from job import Job from worker import Worker apprentice = Apprentice() apprentice.settings = mock.MagicMock() apprentice.settings.max_instances = 1 apprentice.client = mock.MagicMock() apprentice.client.exists.return_value = True job = Job('delayed', 'batch-') apprentice.client.keys.side_effect = [['jm-1', 'jm-2'], ['job-']] w1 = Worker(None, None) w1.instance = 'a' w2 = Worker(None, None) w2.instance = 'b' apprentice.client.get.side_effect = [pickle.dumps(w1), pickle.dumps(w2), pickle.dumps(job)] apprentice.client.publish = mock.MagicMock() apprentice.rise_and_shine() assert apprentice.client.keys.call_count == 2 assert apprentice.client.get.call_count == 3 assert apprentice.client.publish.call_count == 0
def test_normal_machine_recycle(self): from consuela import Consuela from job import Job from worker import Worker cleaner = Consuela() cleaner.job_pub_sub = mock.MagicMock() cleaner.job_pub_sub.listen.return_value = [{'data': 'test'}] worker = Worker(None, None) worker.instance = 'some' cleaner.get_worker = mock.MagicMock() cleaner.get_worker.return_value = 'id', worker cleaner.client = mock.MagicMock() cleaner.client.exists.return_value = True cleaner.client.get.return_value = pickle.dumps(Job('finished', 'something')) cleaner.settings = mock.MagicMock() cleaner.settings.recycle_workers = True cleaner.recycle_worker = mock.MagicMock() cleaner.recycle_worker.return_value = True cleaner.run() assert cleaner.client.exists.call_count == 1 assert cleaner.client.get.call_count == 1 assert pickle.loads(cleaner.client.set.call_args_list[0][0][1]).job_id is None
def test_request_to_booted(self): self.aws_mock.my_booted_machine = mock.MagicMock() self.aws_mock.my_booted_machine.return_value = 'instance', 'ip' from machine_midwife import MachineMidwife Apprentice = MachineMidwife.Apprentice from job import Job from worker import Worker apprentice = Apprentice() apprentice.settings = mock.MagicMock() apprentice.settings.aws_req_max_wait = 10 apprentice.client = mock.MagicMock() apprentice.client.exists.return_value = True job = Job('requested', 'batch-') worker = Worker(None, None) worker.reservation = 'some' worker.request_time = datetime.now() - timedelta(minutes=5) apprentice.client.keys.return_value = ['jm-'] apprentice.client.get.side_effect = [pickle.dumps(worker), pickle.dumps(job)] apprentice.client.set = mock.MagicMock() apprentice.client.publish = mock.MagicMock() apprentice.check_newborn() assert apprentice.client.keys.call_count == 1 assert apprentice.client.get.call_count == 2 assert apprentice.client.set.call_count == 2 assert apprentice.client.publish.call_count == 1 assert pickle.loads(apprentice.client.set.call_args_list[0][0][1]).instance == 'instance' assert pickle.loads(apprentice.client.set.call_args_list[1][0][1]).state == 'booted'
def test_failed_job_machine_removal(self): from consuela import Consuela from job import Job from worker import Worker with mock.patch('consuela.terminate_worker') as worker_mock: cleaner = Consuela() cleaner.job_pub_sub = mock.MagicMock() cleaner.job_pub_sub.listen.return_value = [{'data': 'test'}] worker = Worker(None, None) worker.instance = 'some' cleaner.get_worker = mock.MagicMock() cleaner.get_worker.return_value = 'id', worker cleaner.client = mock.MagicMock() cleaner.client.exists.return_value = True cleaner.client.get.return_value = pickle.dumps(Job('failed', 'something')) cleaner.settings = mock.MagicMock() cleaner.settings.recycle_workers = True cleaner.recycle_worker = mock.MagicMock() cleaner.recycle_worker.return_value = False cleaner.run() assert cleaner.client.exists.call_count == 1 assert worker_mock.call_count == 0
def stop(self, force = True): """ Stops all modules and shuts down the manager. """ self.log().debug("Stopping") self.stopModules() Worker.stop(self, force)
class MainWindow(QMainWindow): def __init__(self): super().__init__() self.worker = None self.statusBar().showMessage('ready') self.resize(250, 150) self.move(300, 300) self.setWindowTitle('刷起来') self.setWindowIcon(QIcon('icon.ico')) self.imagesPath = "./images/tp14/" self.toolBar = self.addToolBar('') GameStatus().window = self yaoguaifaxian_action = QAction(QIcon('./images/ui/yaoguaifaxian.jpg'), '妖怪发现', self) yaoguaifaxian_action.triggered.connect(self.yaoguaifaxian) exit_action = QAction(QIcon('./images/ui/exit.png'), '停止', self) exit_action.setShortcut('Ctrl+Q') exit_action.triggered.connect(self.stop_loop) self.toolBar.addAction(yaoguaifaxian_action) self.toolBar.addAction(exit_action) txt = QTextBrowser() txt.setContentsMargins(5, 5, 5, 5) self.setCentralWidget(txt) self.show() def add_text(self, text): self.centralWidget().append(text) sb = self.centralWidget().verticalScrollBar() sb.setValue(sb.maximum()) print(text) def closeEvent(self, *args, **kwargs): self.stop_loop() print("关闭程序") def status_changed(self): if self.worker is None: self.statusBar().showMessage("就绪。") elif self.worker.stopped(): self.statusBar().showMessage("已停止。") else: self.statusBar().showMessage("当前次数: " + str(self.worker.cnt)) def yaoguaifaxian(self): if self.worker is not None and not self.worker.stopped(): return self.worker = Worker(self.imagesPath) GameStatus().game_stage = GameStage.Yaoguaifaxian self.worker.start() def stop_loop(self): if self.worker is None: return self.worker.stop()
def fork_all(self): """Create a fork for each worker. The number of workers per tube is specified in the tubes list passed to the constructor. """ error_actions = ErrorActions(self.config['error_codes']) pids = [] self.info('Parent process started with pid {}'.format(os.getpid())) for tube_config in self.config['tubes']: try: worker_count = tube_config['workers'] except KeyError: worker_count = 1 for i in range(worker_count): # fork the current process. The parent and the child both continue # from this point so we need to make sure that only the child # process adds workers to the pool. pid = os.fork() if pid == 0: # child process self.info('Child process started with pid {} on tube "{}"'.format(os.getpid(), tube_config['name'])) worker = Worker(os.getpid(), tube_config, self.config['connection'], error_actions) worker.watch() sys.exit() else: pids.append(pid) for pid in pids: os.waitpid(pid, 0) self.info("Worker {} has exited.".format(pid))
def test_thread_safe(self): """ These tests are related to: http://stackoverflow.com/q/3752618 I'm not even sure if these tests are correct. """ from worker import Worker with self.subTest("one-time listener"): a = Worker().start() @a.listen("test") def handler(event): a.unlisten(handler) a.fire("test") a.stop().join() self.assertNotIn(handler, a.listener_pool) self.assertEqual(a.listeners.get("test", []), []) with self.subTest("add listener in listener callback"): a = Worker().start() @a.listen("test") def _(event): @a.listen("test") def _(event): pass a.fire("test") a.stop().join() self.assertEqual(len(a.listeners.get("test", [])), 2)
def do_request( conn ): ## Envoi d'un message d'invite logger.info("demarrage d'une requete") conn.send("Ok send your job") run = False logger.info("Attente du job") try: j = conn.recv() run = True except: logger.info("Probleme reception job") ## ya pas eu d'erreur if run: logger.info("Reception du job %s " % j) if isinstance(j, Job): conn.send('Job receive') logger.info("Creation Worker") w = Worker(j) logger.info("Lancement Worker") w.work() logger.info("Worker fini") conn.send('Job finish sending result ') conn.send(j) conn.send('result transmit') conn.send('OK see you soon') conn.close() else: logger.info("Erreur not a job %s " % j) conn.send('Error not a job') conn.close()
def startWorker(self, featureCollection, attributes, pointProvider=None, hexagonProvider=None): # create a new worker instance worker = Worker(featureCollection, attributes, pointProvider, hexagonProvider) # configure the QgsMessageBar messageBar = self.iface.messageBar().createMessage('Reading IMAER data...', ) progressBar = QtGui.QProgressBar() progressBar.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) progressBar.setMinimum(0) progressBar.setMaximum(100) progressBar.setTextVisible(True) cancelButton = QtGui.QPushButton() cancelButton.setText('Cancel') cancelButton.clicked.connect(worker.kill) messageBar.layout().addWidget(progressBar) messageBar.layout().addWidget(cancelButton) self.iface.messageBar().pushWidget(messageBar, self.iface.messageBar().INFO) self.messageBar = messageBar self.progressBar = progressBar # start the worker in a new thread thread = QtCore.QThread(self) worker.moveToThread(thread) worker.finished.connect(self.workerFinished) worker.error.connect(self.workerError) worker.progress.connect(self.updateProgress) thread.started.connect(worker.run) thread.start() self.thread = thread self.worker = worker
def test_delayed_machine_state_flow_for_requested_with_recycle(self): from machine_midwife import MachineMidwife from job import Job from worker import Worker midwife = MachineMidwife() midwife.apprentice = mock.MagicMock() midwife.settings = mock.MagicMock() midwife.client = mock.MagicMock() midwife.job_pub_sub = mock.MagicMock() midwife.job_pub_sub.listen.return_value = [{'data': 'test'}] midwife.client.exists.return_value = True job = Job('delayed', 'batch-') worker = Worker(None, 'batch-') worker.reservation = 'reservation' worker.request_time = datetime.now() midwife.client.keys.return_value = ['jm-'] midwife.client.get.side_effect = [pickle.dumps(job), pickle.dumps(worker)] midwife.client.set = mock.MagicMock() midwife.client.publish = mock.MagicMock() midwife.run() assert midwife.client.exists.call_count == 2 assert len(midwife.client.set.call_args_list) == 2 assert pickle.loads(midwife.client.set.call_args_list[0][0][1]).job_id == 'test' assert pickle.loads(midwife.client.set.call_args_list[1][0][1]).state == 'booted'
def __init__(self, node_id, data): """ Constructor. @type node_id: Integer @param node_id: the unique id of this node; between 0 and N-1 @type data: List of Integer @param data: a list containing this node's data """ self.node_id = node_id self.data = data # temporary buffer for needed for scatter self.copy = data[:] self.lock_copy = Lock() self.nodes = None self.lock_data = Lock() # list of threads (in this case 16 fo each node) self.thread_list = [] # list with tasks that need to be computed self.thread_pool = [] self.mutex = Lock() # condition used for put and get self.condition = Condition(self.mutex) # condition needed for checking if there are # still tasks that need o be solved self.all_tasks_done = Condition(self.mutex) # number of unfinished tasks self.unfinished_tasks = 0 # start the 16 threads for i in range(16): th = Worker(self, i) self.thread_list.append(th) th.start()
def main(): w = Worker('1133079545973309867') songs = w.get_songs() print "\n".join([x.simple_info() for x in songs]) print '=================' print "\n".join([x.detail_info() for x in songs])
def test_getzipfile(self): conf = self.get_conf() w = Worker(conf) p = w.get_zipfile("http://localhost:9000/download/test.zip", conf['app']['internaltoken'], "/tmp/") assert p == "/tmp/test.zip" assert os.path.exists("/tmp/test.zip")
def run(self): # set thread ID self.tid = self._Thread__ident try: Worker.run(self) finally: # invalidate thread ID self.tid = None
def work(nbetapes): while len(Worker.workers) != 0 and Worker.time < nbetapes: # tant que ce n'est pas vide # print(Worker.time) Worker.pop() G = net1.to_graph() #G.layout() G.draw("figures/RCetape{0}.png".format(Worker.time))
def console_download(url, savepath): """Download url to savepath.""" from worker import Worker from .core import Mission, download, analyze mission = Mission(url=url) Worker.sync(analyze, mission, pass_instance=True) Worker.sync(download, mission, savepath, pass_instance=True)
def worker(loop): worker = Worker(loop) task = loop.create_task(worker.work_loop()) yield worker worker.redis.close() task.cancel()
def stop(self): if self._running: Worker.stop(self) self._save() if self._lastfm: self._lastfm.stop() if self._echonest: self._echonest.stop() self._running = False
def init_workers(): global mail global workers with app.app_context(): mail.init_app(app) worker = Worker(app, mail) # start the thread worker.start() workers.append(worker)
def run(self): self.apprentice.start() for item in self.job_pub_sub.listen(): job_id = item['data'] if job_id == 'KILL': self.apprentice.halt() self.job_pub_sub.unsubscribe() logging.info('MachineMidwife: Stopping.') return # queue_full = self.choke_full() # logging.debug('MachineMidwife: Redis signals for job: %s' % job_id) if not self.client.exists(job_id): logging.warning('MachineMidwife: Redis signaled for non-existing job: %s' % job_id) continue job = pickle.loads(self.client.get(job_id)) logging.debug('MachineMidwife: Job %s has state %s' % (job_id, job.state)) if job.state != 'received' and job.state != 'delayed': continue # Recycle recycled = False for worker_id in [worker_key for worker_key in self.client.keys() if worker_key.startswith('jm-')]: # Redis keys(pattern='*') does not filter at all. if not self.client.exists(worker_id): continue existing_worker = pickle.loads(self.client.get(worker_id)) if existing_worker.batch_id == job.batch_id and existing_worker.job_id is None: logging.info('MachineMidwife: Recycling worker %s for job %s of batch %s.' % (worker_id, job_id, job.batch_id)) job.state = 'booted' existing_worker.job_id = job_id self.client.set(worker_id, pickle.dumps(existing_worker)) self.client.set(job_id, pickle.dumps(job)) self.client.publish('jobs', job_id) recycled = True break if recycled: continue # New machine if not queue_full: worker_id, reservation = aws.start_machine(job.ami, job.instance_type) if worker_id is not None: logging.info('MachineMidwife: Started new worker for job %s of batch %s.' % (job_id, job.batch_id)) job.state = 'requested' worker = Worker(job_id, job.batch_id) worker.request_time = datetime.now() worker.reservation = reservation self.client.set(worker_id, pickle.dumps(worker)) self.client.set(job_id, pickle.dumps(job)) self.client.publish('jobs', job_id) else: logging.warning('MachineMidwife: AWS failed to start a new machine.') job.state = 'delayed' self.client.set(job_id, pickle.dumps(job)) else: job.state = 'delayed' self.client.set(job_id, pickle.dumps(job))
def main(): parser = argparse.ArgumentParser(description='a quick setup tool for flask and react project') parser.add_argument('name', help='The name of the project') parser.add_argument('-p', '--path', dest='path', help='The path of the project', default='.') args = parser.parse_args() config = Config(name=args.name, path=args.path) w = Worker(config=config) w.work()
def __init__(self, *args, **kwargs): Worker.__init__(self, *args, **kwargs) threading.Thread.__init__(self) self.tid = None self.killed = False # NOTE: Make it as a daemon thread because it is not possible to kill # a thread externally in Python. In this way, terminating the # root process automatically terminates child threads as well. self.daemon = True
def test_do_sphinx(self): conf = self.get_conf() w = Worker(conf) p = w.get_zipfile("http://localhost:9000/download/test.zip", conf['app']['internaltoken'], "/tmp/") d = w.extract(p, "/tmp/") req = {} print w.do_sphinx(d, req, conf)
def execute( self, call ): #print call worker = Worker( call ) os.chmod( worker.sandbox + 'task.sh', 0755); worker.process = subprocess.Popen( "./task.sh", cwd=worker.sandbox, env=self.my_env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True ) print '\nExecuting cmd (%s) in a %s sandbox: %s' % ( worker.call.body['cmd'], '+'.join(worker.env_names), worker.sandbox ) self.workers.append( worker )
def __init__(self): Widget.__init__(self) Worker.__init__(self) self._keyboard = Window.request_keyboard(self._keyboard_closed, self) self._keyboard.bind(on_key_down=self.__on_key_down) # OutputExecutor Clock.schedule_interval(self.__update_frame, 1.0/60.0) self._mission_out = []
def __init__(self, objcolor, objradius, scene, script, interval, title): self.title = title self.wltype = script if(script=="norm"): self.loadGen = WSWorkloadGenerator(scene, interval) else: self.loadGen = ScriptWorkloadGenerator(scene,script, interval) Worker.__init__(self, objcolor, objradius, scene, self.loadGen,title)
def test_extract(self): conf = self.get_conf() w = Worker(conf) p = w.get_zipfile("http://localhost:9000/download/test.zip", conf['app']['internaltoken'], "/tmp/") d = w.extract(p, "/tmp/") assert d.startswith("/tmp/") assert os.path.isdir(d) shutil.rmtree(d)
def handleCommand(cmd, workers): cmd = cmd.strip().lower() if(cmd == '-a'): w = Worker() workers.append(w) w.start() elif(cmd == '-q'): exit(0) else: workerID = int(cmd.split()[1]) w = workers.pop(workerID) w.stop()
def __init__(self, parent, files: [], dest_album: AlbumData): super().__init__(parent) self.setWindowFlag(Qt.WindowCloseButtonHint, False) self.setWindowFlag(Qt.WindowContextHelpButtonHint, False) self.setWindowTitle('Album Content Importer') self.setWindowIcon(QIcon(QPixmap(QImageReader('assets/importAlbumContent.png').read()))) # Layout setup layout = QVBoxLayout() layout.addWidget(QLabel('Importing new files...')) self.progress = QProgressBar() self.progress.setValue(0) self.progress.setFormat('Waiting (%p%)') layout.addWidget(self.progress) self.current_file = QLabel('Waiting...') layout.addWidget(self.current_file) layout.addStretch() # Init thread self.progress_signal.connect(self.update_progress) self.thread_pool = QThreadPool() self.thread_worker = Worker(self.run) self.thread_worker.signals.progress.connect(self.progress_signal) self.thread_worker.signals.finished.connect(self.update_after_completion) self.files = files self.dest_album = dest_album self.setLayout(layout) self.setFixedSize(300, 80) self.thread_pool.start(self.thread_worker) self.exec()
class MyLog: """ Log object """ def __init__(self, worker, task): self.worker = worker self.task = task self.worker_confirm = Worker() self.task_confirm = Task(self.task) self._confirm = False def confirm(self): """ See if we can provide such worker and task :return: """ if self.worker in self.worker_confirm.worker_available(): if self.task in self.task_confirm.task_acceptable(): self._confirm = True return self._confirm def report(self): """ Ask user to pay :return: """ if self.confirm(): if self.task in self.task_confirm.hour_tasks: self.task_confirm.price = Hourly(self.task).price else: self.task_confirm.price = Fixed(self.task).price return f"Please pay " \ f"{self.task_confirm.get_rate(self.task_confirm.price)}" \ f" to {self.worker} for {self.task}!" return f"Worker or Task doesnt exists"
def main(): env = gym.make(env_name) env.seed(500) torch.manual_seed(500) num_inputs = env.observation_space.shape[0] num_actions = env.action_space.n global_model = Model(num_inputs, num_actions) global_model.share_memory() global_optimizer = SharedAdam(global_model.parameters(), lr=lr) global_ep, global_ep_r, res_queue = mp.Value('i', 0), mp.Value('d', 0.), mp.Queue() writer = SummaryWriter('logs') workers = [ Worker(global_model, global_optimizer, global_ep, global_ep_r, res_queue, i) for i in range(mp.cpu_count()) ] [w.start() for w in workers] res = [] while True: r = res_queue.get() if r is not None: res.append(r) [ep, ep_r, loss] = r writer.add_scalar('log/score', float(ep_r), ep) writer.add_scalar('log/loss', float(loss), ep) else: break [w.join() for w in workers]
def main(): client = Client(PORT_START, PORT_END) if len(sys.argv) > 1: inet_addr = sys.argv[1] client.addr = inet_addr if not client.connect(): sys.stderr.write('failed to connect to {}\n'.format(inet_addr)) tasks = TaskQueue() wrk = Worker(tasks, client) gui = GUI(tasks) wrk.start() gui.window.mainloop()
def test_priority(self): from worker import Worker access = [] thread = Worker().start() def register(i, priority): @thread.listen("MY_EVENT", priority=priority) def _(event): access.append(i) for i, p in enumerate([1, 3, 3, 1, 2]): register(i, p) thread.fire("MY_EVENT").stop().join() self.assertEqual(access, [1, 2, 4, 0, 3])
def test_get_credit_balance(self): worker_mock = Worker("", "") worker_mock.execute = MagicMock() service_mock = TwizoService() service_mock.parse = MagicMock() self.sut = BalanceController(worker_mock, service_mock) self.sut.get_credit_balance() worker_mock.execute.assert_called_once_with( url="wallet/getbalance", request_type=RequestType.GET, expected_status=200) service_mock.parse.assert_called_once()
def start(cls): cls.init() # 开启任务分发线程 Distributor(FETCH_MID_FROM, FETCH_MID_TO + 1).start() # 开启爬虫线程 for i in range(0, THREADS_NUM): Worker(f'Worker-{i}').start()
def action(self): factory = self.__outer.unit() garrison = factory.structure_garrison() if garrison: direction = random.choice(list(bc.Direction)) if self.__outer._gc.can_unload(factory.id, direction): self.__outer._gc.unload(factory.id, direction) location = factory.location.map_location().add(direction) unit = self.__outer._gc.sense_unit_at_location(location) if unit: # TODO: Add other unit types' tree containers strategy.Strategy.getInstance().removeInProduction( unit.unit_type) strategy.Strategy.getInstance().addUnit(unit.unit_type) if unit.unit_type == bc.UnitType.Worker: self.__outer._my_units.append( Worker(unit.id, self.__outer._gc, self.__outer._maps, self.__outer._my_units)) elif unit.unit_type == bc.UnitType.Knight: self.__outer._my_units.append( Knight(unit.id, self.__outer._gc)) elif unit.unit_type == bc.UnitType.Healer: self.__outer._my_units.append( Healer(unit.id, self.__outer._gc, self.__outer._maps)) elif unit.unit_type == bc.UnitType.Ranger: self.__outer._my_units.append( Ranger(unit.id, self.__outer._gc)) elif unit.unit_type == bc.UnitType.Mage: self.__outer._my_units.append( Mage(unit.id, self.__outer._gc, self.__outer._maps)) self._status = bt.Status.SUCCESS
async def __sync__execute(self): print('Task spawner called (sync).') workers = [] for i in range(self.count_workers): workers.append(Worker()) return asyncio.gather(*[w.run() for w in self.workers])
def main(args): log_level = logging.INFO if args.debug: log_level = logging.DEBUG logging.basicConfig(filename=None if args.verbose else "reylgan.log", level=log_level) workers = [] if args.worker: workers.extend([[Worker(), Worker] for _ in range(0, int(args.worker))]) logging.info("add %s crawlers." % args.worker) if args.analyzer: workers.extend([[Analyzer(), Analyzer] for _ in range(0, int(args.analyzer))]) logging.info("add %s analyzers." % args.analyzer) if args.frontend: """ @todo: start frontend worker """ raise NotImplementedError [w[0].start() for w in workers] while True: for i, worker in enumerate(workers): if not worker[0].is_alive(): worker[0] = worker[1]() logging.info("Starting a new worker %s" % worker[0].name) worker[0].start() time.sleep(5)
def main(): env = gym.make(env_name) env.seed(500) torch.manual_seed(500) num_inputs = env.observation_space.shape[0] num_actions = env.action_space.n env.close() global_model = Model(num_inputs, num_actions) global_average_model = Model(num_inputs, num_actions) global_model.share_memory() global_average_model.share_memory() global_optimizer = SharedAdam(global_model.parameters(), lr=lr) global_ep, global_ep_r, res_queue = mp.Value('i', 0), mp.Value('d', 0.), mp.Queue() n = mp.cpu_count() workers = [ Worker(global_model, global_average_model, global_optimizer, global_ep, global_ep_r, res_queue, i) for i in range(n) ] print(aaa) [w.start() for w in workers] res = [] while True: r = res_queue.get() if r is not None: res.append(r) [ep, ep_r, loss] = r else: break [w.join() for w in workers]
def __init__(self, mat_path, tol, max_iteration): """ """ #print ("WorkerIterativeLinearSystemSolver works good") Worker.__init__(self) self._hist_list = [] if mat_path == "": """ Need to generatre matrix """ print("calling self._matrix_generation") #self._mat = self._matrix_generation() else: self._mat = io.mmread(mat_path) print("Done reading matrix {}, Row:{}, Col:{}".format( mat_path, self._mat.shape[0], self._mat.shape[1])) self._tol = tol self._max_iteration = max_iteration
def new_worker(): w = Worker().start() workers.add(w) @w.listen("MY_EVENT") def _(event): access.add(w) ch.sub(w)
def test_run_timeout(self): from job_dictator import JobDictator from job import Job from worker import Worker dictator = JobDictator() dictator.client = mock.MagicMock() dictator.client.keys.return_value = ['job-', 'jm-'] job = Job('running', 'something') job.run_started_on = datetime.now() - timedelta(minutes=10) worker = Worker('job-', None) dictator.client.get.side_effect = [pickle.dumps(job), pickle.dumps(worker)] self.request_mock.get = mock.MagicMock() dictator.settings = mock.MagicMock() dictator.settings.job_timeout = 1 dictator.headers = mock.MagicMock() returner = mock.MagicMock() returner.content = 'status:ok' self.request_mock.get.return_value = returner dictator.pull = mock.MagicMock() dictator.aladeen() assert dictator.client.keys.call_count == 2 assert dictator.client.get.call_count == 2 assert dictator.client.set.call_count == 1 assert dictator.client.publish.call_count == 1 assert dictator.pull.call_count == 0 assert pickle.loads(dictator.client.set.call_args_list[0][0][1]).state == 'broken'
async def do_connect(peers, torrent): peer_queue = asyncio.Queue() pieces_queue = asyncio.Queue() downloaded_queue = asyncio.Queue() [peer_queue.put_nowait(peer) for peer in peers] [ pieces_queue.put_nowait( (index, piece, torrent.get_piece_length(index))) for index, piece in enumerate(torrent.pieces) ] handlers = [ Worker(f"thread {x}", torrent, ID, peer_queue, pieces_queue, downloaded_queue) for x in range(30) ] [asyncio.create_task(worker.run()) for worker in handlers] # await asyncio.gather(*[worker.run() for worker in handlers]) print("handlers finished") await pieces_queue.join() downloaded_pieces = [] for x in range(downloaded_queue.qsize()): downloaded_pieces.append(await downloaded_queue.get()) downloaded_pieces.sort(key=sort_index) with open(torrent.filename, "wb+") as f: for (piece_index, piece) in downloaded_pieces: f.write(piece)
def setUp(self): self.worker_mock = Worker("", "") self.worker_mock.execute = MagicMock() self.service_mock = TwizoService() self.service_mock.parse = MagicMock() self.sut = NumberLookupController(self.worker_mock, self.service_mock)
def run_topics_analyser(self) -> None: if (len(self.ui.output_file_name_txt.text().strip()) == 0): self._show_message(['Please enter the output file name.'], icon=QMessageBox.Warning) return get_wordlist = lambda text: [word.strip() for word in text.split(',') ] if (len(text) > 0) else [] self.addl_stopwords = get_wordlist(self.ui.addl_stopwords_txt.text()) self.groupby_cols = self._get_groupby_cols() self.data = self.data_reader.get_dataframe( self.ui.text_col_name_txt.text(), self.groupby_cols) self.output_filename = self.ui.output_file_name_txt.text() self.num_ngrams = self.ui.num_ngrams_spb.value() self.num_topics = self.ui.num_topics_spb.value() # use the input file name as the Optuna study name self.studyname = re.sub(r'[.]\w+', '', ntpath.basename(self.ui.data_file_txt.text())) # log the analysis self.logger.info( f'Start Topics Analysis:\n{self._get_analysis_inputs_summary()}') # create a worker thread for the TopicsAnalyser worker = Worker(self.execute_analysis) # connect the signals to the slots (callback functions) worker.signals.progress.connect(self.on_analysis_progress) worker.signals.result.connect(self.on_analysis_success) worker.signals.error.connect(self.on_thread_error) # Execute the worker thread self.threadpool.start(worker) # show a progress dialog while the TopicsAnalyser is running self.analysis_progress = ProgressDialog( 'Analysis is running, please wait...', self).progress self.analysis_progress.setValue(1) self.analysis_progress.show()
def test_run_result(self): from job_dictator import JobDictator from job import Job from worker import Worker dictator = JobDictator() dictator.client = mock.MagicMock() dictator.client.keys.return_value = ['job-', 'jm-'] job = Job('run_succeeded', 'something') worker = Worker('job-', None) dictator.client.get.side_effect = [pickle.dumps(job), pickle.dumps(worker)] self.request_mock.get = mock.MagicMock() dictator.settings = mock.MagicMock() dictator.headers = mock.MagicMock() returner = mock.MagicMock() returner.content = 'status:ok' self.request_mock.get.return_value = returner dictator.pull = mock.MagicMock() dictator.aladeen() assert dictator.client.keys.call_count == 2 assert dictator.client.get.call_count == 2 assert dictator.client.set.call_count == 1 assert dictator.client.publish.call_count == 1 assert dictator.pull.call_count == 1
def start_opencv(self): """ Slot function for the start button signal. Instantiates Qt opencv window if not created, then starts and shows the window. Once the window is opened, starts worker thread to send commands to Google Earth. """ # If opencv window not initialized, if not self.capture: # Instantiate QtCapture class, set parent and window flags self.capture = QtCapture(self.google_earth) self.capture.setParent(self) self.capture.setWindowFlags(QtCore.Qt.Tool) self.capture.setWindowTitle("OpenCV Recording Window") self.capture.setGeometry( int(self.window_resize[0] + self.new_position[0]), int(self.window_resize[1] + self.title_bar_offset), -1, -1) # Start video capture and show it self.capture.start() self.capture.show() # Set stop command flag, create worker attached to send_output # function, start worker as new thread self.stop_commands = False worker_one = Worker(self.send_output) self.threadpool.start(worker_one)
def get_test_job(job_type) -> Box: os.environ['FORCE_FIRESTORE_DB'] = '1' instance_id = '9999999999999999999' inst_db = get_worker_instances_db() Worker.make_instance_available(instances_db=inst_db, instance_id=instance_id) os.environ['INSTANCE_ID'] = instance_id job_id = 'TEST_JOB_' + utils.generate_rand_alphanumeric(32) test_job = Box({ 'botleague_liaison_host': 'https://liaison.botleague.io', 'status': JOB_STATUS_ASSIGNED, 'id': job_id, 'instance_id': instance_id, 'job_type': job_type, }) return test_job
def test_build_model(self): input_data = np.array(get_data_arrays(), dtype=float) labels = np.array(get_labels(), dtype=float) expected_test_X, expected_test_y = input_data[3:, :], labels[3:] worker = Worker() model, actual_test, scaler = worker.build_save_model_LSTM(input_data, labels, False) npt.assert_allclose(actual_test[0], expected_test_X) npt.assert_allclose(actual_test[1], expected_test_y) self.assertIsNotNone(model) test_result, rmse = worker.predict(actual_test, scaler) self.assertTupleEqual(test_result.shape, (2, 1)) self.assertIs(type(test_result), np.ndarray) print('rmse = ', rmse)
def start(port=8000): os.environ['PORT'] = str(port) state_ = State(port) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((state_.ip, port)) s.listen() print('Listening on {}:{}'.format(state_.ip, port)) # Start maintenance thread. Maintenance(Worker([None, None], state_, False)).start() while True: peer = s.accept() Worker(peer, state_).start() s.close()
def test_normal_machine_state_flow_for_received(self): self.aws_mock.start_machine = mock.MagicMock() self.aws_mock.start_machine.return_value = 'jm-', 'res0' from machine_midwife import MachineMidwife from job import Job from worker import Worker midwife = MachineMidwife() midwife.apprentice = mock.MagicMock() midwife.settings = mock.MagicMock() midwife.client = mock.MagicMock() midwife.job_pub_sub = mock.MagicMock() midwife.job_pub_sub.listen.return_value = [{'data': 'test'}] midwife.client.exists.return_value = True midwife.client.keys.return_value = ['job-', 'jm-'] job = Job('received', 'something') worker = Worker('job-', None) midwife.client.get.side_effect = [ pickle.dumps(job), pickle.dumps(worker) ] midwife.client.set = mock.MagicMock() midwife.client.publish = mock.MagicMock() midwife.run() assert midwife.client.exists.call_count == 2 assert len(midwife.client.set.call_args_list) == 2 assert midwife.client.set.call_args_list[0][0][0] == 'jm-' assert midwife.client.set.call_args_list[1][0][0] == 'test' assert self.aws_mock.start_machine.call_count == 1 assert pickle.loads( midwife.client.set.call_args_list[1][0][1]).state == 'requested'
def runExperiment(self): worker = Worker(self.experiment.run) worker.signals.progress.connect(self.determineExperimentAction) worker.signals.error.connect(self.handleException) worker.signals.finished.connect(self.stopExperiment) self.threadpool.start(worker) self.experimentStartTime = time.time()
def __manage(self): while True: if Config.REUSE_VMS: id, vm = self.jobQueue.getNextPendingJobReuse() else: id = self.jobQueue.getNextPendingJob() if id: job = self.jobQueue.get(id) try: # Mark the job assigned self.jobQueue.assignJob(job.id) # Try to find a vm on the free list and allocate it to # the worker if successful. if Config.REUSE_VMS: preVM = vm else: preVM = self.preallocator.allocVM(job.vm.name) # Now dispatch the job to a worker self.log.info("Dispatched job %s:%d to %s [try %d]" % (job.name, job.id, preVM.name, job.retries)) job.appendTrace("%s|Dispatched job %s:%d [try %d]" % (datetime.utcnow().ctime(), job.name, job.id, job.retries)) vmms = self.vmms[job.vm.vmms] # Create new vmms object Worker(job, vmms, self.jobQueue, self.preallocator, preVM).start() except Exception as err: self.jobQueue.makeDead(job.id, str(err)) # Sleep for a bit and then check again time.sleep(Config.DISPATCH_PERIOD)
def start(self): if self.mock: self.mock_start() else: worker = Worker(self.run, self.tcp_ip, self.tcp_port, self.device_type, self.RUN_PARAMS) self.threadpool.start(worker)
def test_detached(self): """child will detached from parent when finished""" from worker import Worker a = Worker().start() b = Worker(parent=a).start() b.stop().join() time.sleep(1) self.assertNotIn(b, a.children) a.stop().join()
def main(args): if args.save_path is not None and not os.path.exists(args.save_path): os.makedirs(args.save_path) summary_writer = tf.summary.FileWriter(os.path.join(args.save_path, 'log')) global_steps_counter = itertools.count() # thread-safe global_net = Net(Atari.s_dim, Atari.a_dim, 'global', args) num_workers = args.threads workers = [] # create workers for i in range(num_workers): worker_summary_writer = summary_writer if i == 0 else None w = Worker(i, Atari(args), global_steps_counter, worker_summary_writer, args) workers.append(w) saver = tf.train.Saver(max_to_keep=5) with tf.Session() as sess: coord = tf.train.Coordinator() if args.model_path is not None: print 'Loading model...\n' ckpt = tf.train.get_checkpoint_state(args.model_path) saver.restore(sess, ckpt.model_checkpoint_path) else: print 'Initializing a new model...\n' sess.run(tf.global_variables_initializer()) print_params_nums() # Start work process for each worker in a seperated thread worker_threads = [] for w in workers: run = lambda: w.run(sess, coord, saver) t = threading.Thread(target=run) t.start() time.sleep(0.5) worker_threads.append(t) if args.eval_every > 0: evaluator = Evaluate(global_net, summary_writer, global_steps_counter, args) evaluate_thread = threading.Thread( target=lambda: evaluator.run(sess, coord)) evaluate_thread.start() coord.join(worker_threads)
class MainWindow(QMainWindow): def __init__(self): super(MainWindow, self).__init__() self.ui = Ui_MainWindow() self.ui.setupUi(self) self.ui.inputFileButton.clicked.connect(self.handle_input) self.ui.openButton.clicked.connect(self.handle_open) self.ui.startButton.clicked.connect(self.handle_start) def handle_input(self): current_path = QDir.currentPath() filter = QDir(current_path, "text files (*.txt)").filter() dlg = QFileDialog() dlg.setFileMode(QFileDialog.AnyFile) dlg.setFilter(filter) filename = dlg.getOpenFileName() f = open(filename[0], 'r', encoding='utf-8-sig') self.ui.inputFileLabel.setText(filename[0].split('/')[-1]) with f: self.query_list = f.read().splitlines() return def handle_open(self): path = os.getcwd()+"/result" webbrowser.open("file:///"+path) def handle_start(self): self.thread = QThread() self.worker = Worker(self.query_list, self.ui.delaySpinBox.text()) self.worker.moveToThread(self.thread) self.thread.started.connect(self.worker.run) self.worker.finished.connect(self.thread.quit) self.worker.finished.connect(self.worker.deleteLater) self.thread.finished.connect(self.thread.deleteLater) self.worker.progress.connect(self.ui.progressBar.setValue) self.thread.start() self.ui.buttonFrame.setEnabled(False) self.ui.inputFrame.setEnabled(False) self.thread.finished.connect(self.handle_finished) def handle_finished(self): self.ui.buttonFrame.setEnabled(True) self.ui.inputFrame.setEnabled(True) msgBox = QMessageBox() msgBox.setIcon(QMessageBox.Information) msgBox.setText("수집이 완료되었습니다.") msgBox.exec_()
def handle(): encoded = request.data prefix = re.match(b"data:image/(.*);base64,", encoded).group(0) arr = np.frombuffer(base64.decodestring(encoded[len(prefix):]), np.uint8) img = cv2.imdecode(arr, cv2.IMREAD_ANYCOLOR) # ok, here we have img with correct shape # let's send it into worker # todo: move Worker to global app context # todo: get path from config worker = Worker("./data/graph.pb") out = worker.process(img) # todo: encode result with base64 and send it back fname = "./static/tmp/{}.jpg".format(str(uuid4())) cv2.imwrite(fname, out) return jsonify(result=fname)