def main(): snesorDict = {'imu': 'LSM6DS3TR-C'} readObj = ReadData(snesorDict) # outputDataSigma = multiprocessing.Array('f', [0] * len(snesorDict) * 24) outputDataSigma = None magBg = multiprocessing.Array('f', [0] * 6) outputData = multiprocessing.Array('f', [0] * len(snesorDict) * 24) state = multiprocessing.Array('f', [0, 0, 0, 1, 0, 0, 0]) # Wait a second to let the port initialize # readObj.send() # receive data in a new process pRec = Process(target=readObj.receive, args=(outputData, magBg, outputDataSigma)) pRec.daemon = True pRec.start() pTrack3D = multiprocessing.Process(target=track3D, args=(state, )) pTrack3D.daemon = True pTrack3D.start() mp = MahonyPredictor(q=state[3:], Kp=100, Ki=0.01, dt=0.002) while True: # print("a={}, w={}".format(np.round(outputData[:3], 2), np.round(outputData[3:6], 2))) mp.getGyroOffset(outputData[3:6]) mp.IMUupdate(outputData[:3], outputData[3:6]) state[3:] = mp.q time.sleep(0.08)
def start(self): playingfile = self.get_playing_file() if not (playingfile["file"].startswith(uni_join(const.libpath, const.provider)) or playingfile["file"].startswith(uni_join(const.addonpath, "resources"))): return kodi.log("start onPlayBackStarted") self.koala_playing = True self.player = Player() self.remote = None if kodi.settings["remote"]: self.remote = remote.Remote() self.remote.run(player=self.player) self.player.connect() if "NRK nett-TV.htm" not in playingfile["file"]: self.player.get_player_coord() self.player.wait_player_start() self.player.toggle_fullscreen() if playingfile["type"] == "episode": thread = Thread(target=self.monitor_watched, args=[playingfile]) thread.start() kodi.log("finished onPlayBackStarted")
def test_pause_unpause(self): """ Verify that the pause method actually works. In this case, working means that the process doesn't send any more HTTP requests, fact that is verified with the "fake" count plugin. """ core_start = Process(target=self.w3afcore.start, name='TestRunner') core_start.daemon = True core_start.start() # Let the core start, and the count plugin send some requests. time.sleep(5) count_before_pause = self.count_plugin.count self.assertGreater(self.count_plugin.count, 0) # Pause and measure self.w3afcore.pause(True) count_after_pause = self.count_plugin.count time.sleep(2) count_after_sleep = self.count_plugin.count all_equal = count_before_pause == count_after_pause == count_after_sleep self.assertTrue(all_equal) # Unpause and verify that all requests were sent self.w3afcore.pause(False) core_start.join() self.assertEqual(self.count_plugin.count, self.count_plugin.loops)
def test_pause_unpause(self): output = Queue.Queue() self.uri_opener.pause(True) def send(uri_opener, output): url = URL(get_moth_http()) try: http_response = uri_opener.GET(url) output.put(http_response) except: output.put(None) th = Process(target=send, args=(self.uri_opener, output)) th.daemon = True th.start() self.assertRaises(Queue.Empty, output.get, True, 2) self.uri_opener.pause(False) http_response = output.get() self.assertNotIsInstance(http_response, types.NoneType, 'Error in send thread.') th.join() self.assertEqual(http_response.get_code(), 200) self.assertIn(self.MOTH_MESSAGE, http_response.body)
def test_pause_stop(self): ''' Verify that the pause method actually works. In this case, working means that the process doesn't send any more HTTP requests after we, pause and that stop works when paused. ''' core_start = Process(target=self.w3afcore.start, name='TestRunner') core_start.daemon = True core_start.start() # Let the core start, and the count plugin send some requests. time.sleep(5) count_before_pause = self.count_plugin.count self.assertGreater(self.count_plugin.count, 0) # Pause and measure self.w3afcore.pause(True) count_after_pause = self.count_plugin.count time.sleep(2) count_after_sleep = self.count_plugin.count all_equal = count_before_pause == count_after_pause == count_after_sleep self.assertTrue(all_equal) # Unpause and verify that all requests were sent self.w3afcore.stop() core_start.join() # No more requests sent after pause self.assertEqual(self.count_plugin.count, count_after_sleep)
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.active = True self.withdraw() self.main_window = Window.MainWindow(self) self.main_window.abtn.config(command=self.do_something) self.main_window.protocol("WM_DELETE_WINDOW", self.on_closing) self.main_window.btn_exit.config(command=self.on_closing) self.main_window.abtn.config(command=self.connect_all) self.devices = [] for name, adr in SRXAddresses: dev = SRXdev.SRXDevice(name, adr, username, password) sub_fr = Window.SubFrame(self.main_window, name, adr, ' N/A ') action_with_arg = partial(self.connect_device, dev, sub_fr) sub_fr.abtn.config(command=action_with_arg) action1_with_arg = partial(self.get_curr_gw, dev, sub_fr) sub_fr.cbtn.config(command=action1_with_arg) action2_with_arg = partial(self.reset_ospf, dev, sub_fr) sub_fr.dbtn.config(command=action2_with_arg) sub_fr.setBad() self.devices.append([dev, sub_fr]) process = Process(target=self.check_status, args=()) process.start()
def main(): """ Creates instances of the above methods and occassionally checks for crashed worker processes & relaunches. """ worker_process = list() get_update_process = Process(target=get_updates) get_update_process.start() for i in range(0, int(CONFIG['BOT_CONFIG']['workers'])): worker_process.append(Process(target=process_updates)) worker_process[i].start() time_worker = ThreadProcess(target=check_time_args) time_worker.start() while RUNNING.value: time.sleep(30) for index, worker in enumerate(worker_process): if not worker.is_alive(): del worker_process[index] worker_process.append(Process(target=process_updates)) worker_process[-1].start() if not time_worker.is_alive(): time_worker = ThreadProcess(target=check_time_args) time_worker.start() if not get_update_process.is_alive(): get_update_process = Process(target=get_updates) get_update_process.start() get_update_process.join() time_worker.join() for worker in worker_process: worker.join()
def process_updates(): """ Decides which type the update is and routes it to the appropriate route_updates method and launches a thread for the run_extensions method. """ signal.signal(signal.SIGINT, signal.SIG_IGN) plugin_http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where()) plugin_http.timeout = urllib3.Timeout(connect=1.0) plugin_http.retries = 3 update_router = RouteMessage(PLUGINS, plugin_http, GET_ME, CONFIG) while RUNNING.value: try: update = MESSAGE_QUEUE.get_nowait() except queue.Empty: time.sleep(SLEEP_TIME) continue extension_thread = ThreadProcess(target=run_extensions, args=(update, )) extension_thread.start() if 'message' in update: update_router.route_update(update['message']) elif 'edited_message' in update: update_router.route_update(update['edited_message']) elif 'callback_query' in update: route_callback_query(PLUGINS, GET_ME, CONFIG, plugin_http, update['callback_query']) elif 'inline_query' in update: route_inline_query(PLUGINS, GET_ME, CONFIG, plugin_http, update['inline_query']) extension_thread.join()
def new_process(cls, *args, **kwargs): process = Process(target=BaseCoroutine.start_coroutine, args=(cls, ) + args, kwargs=kwargs) process.daemon = True process.start() return process
def runReadData(printBool, maxIter=50): ''' 跑实际的数据来实现定位 :param printBool: 【bool】是否打印输出 :param maxIter: 【int】最大迭代次数 :return: ''' snesorDict = {'imu': 'LSM6DS3TR-C', 'magSensor': 'AK09970d'} readObj = ReadData(snesorDict) # 创建读取数据的对象 outputData = multiprocessing.Array('f', [0] * len(snesorDict) * 24) magBg = multiprocessing.Array('f', [0] * 6) state0 = multiprocessing.Array('f', [0, 0, 0.01, 1, 0, 0, 0]) readObj.send() pRec = Process(target=readObj.receive, args=(outputData, magBg, None)) # pRec.daemon = True pRec.start() time.sleep(2) pTrack3D = multiprocessing.Process(target=track3D, args=(state0, )) pTrack3D.daemon = True pTrack3D.start() while True: measureData = np.concatenate((outputData[:3], outputData[6:9])) LM(state0, measureData, 7, maxIter, printBool) time.sleep(0.1)
def start_websockify(): print('start vnc proxy..') t = Process(target=worker, args=()) t.start() print('vnc proxy started..')
def test_pause_unpause(self): output = Queue.Queue() self.uri_opener.pause(True) def send(uri_opener, output): url = URL(get_moth_http()) try: http_response = uri_opener.GET(url) output.put(http_response) except: output.put(None) th = Process(target=send, args=(self.uri_opener, output)) th.daemon = True th.start() self.assertRaises(Queue.Empty, output.get, True, 2) self.uri_opener.pause(False) http_response = output.get() self.assertNotIsInstance(http_response, types.NoneType, 'Error in send thread.') th.join() self.assertEqual(http_response.get_code(), 200) self.assertIn(self.MOTH_MESSAGE, http_response.body)
def test_stop(self): """ Verify that the stop method actually works. In this case, working means that the process doesn't send any more HTTP requests after we stop(). This test seems to be failing @ CircleCI because of a test dependency issue. If run alone in your workstation it will PASS, but if run at CircleCI the count plugin doesn't seem to start. """ core_start = Process(target=self.w3afcore.start, name='TestRunner') core_start.daemon = True core_start.start() # Let the core start, and the count plugin send some requests. time.sleep(5) count_before_stop = self.count_plugin.count self.assertGreater(count_before_stop, 0) # Stop now, self.w3afcore.stop() core_start.join() count_after_stop = self.count_plugin.count self.assertEqual(count_after_stop, count_before_stop)
def process_updates(): """ Decides which type the update is and routes it to the appropriate route_updates method and launches a thread for the run_extensions method. """ signal.signal(signal.SIGINT, signal.SIG_IGN) plugin_http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where()) plugin_http.timeout = urllib3.Timeout(connect=1.0) plugin_http.retries = 3 update_router = RouteMessage(PLUGINS, plugin_http, GET_ME, CONFIG) while RUNNING.value: try: update = MESSAGE_QUEUE.get_nowait() except queue.Empty: time.sleep(SLEEP_TIME) continue extension_thread = ThreadProcess(target=run_extensions, args=(update, )) extension_thread.start() if 'message' in update: update_router.route_update(update['message']) elif 'edited_message' in update: update_router.route_update(update['edited_message']) elif 'callback_query' in update: route_callback_query(PLUGINS, GET_ME, CONFIG, plugin_http, update['callback_query']) elif 'inline_query' in update: route_inline_query(PLUGINS, GET_ME, CONFIG, plugin_http, update['inline_query']) extension_thread.join()
def main(): """ Creates instances of the above methods and occassionally checks for crashed worker processes & relaunches. """ worker_process = list() get_update_process = Process(target=get_updates) get_update_process.start() for i in range(0, int(CONFIG['BOT_CONFIG']['workers'])): worker_process.append(Process(target=process_updates)) worker_process[i].start() time_worker = ThreadProcess(target=check_time_args) time_worker.start() while RUNNING.value: time.sleep(30) for index, worker in enumerate(worker_process): if not worker.is_alive(): del worker_process[index] worker_process.append(Process(target=process_updates)) worker_process[-1].start() if not time_worker.is_alive(): time_worker = ThreadProcess(target=check_time_args) time_worker.start() if not get_update_process.is_alive(): get_update_process = Process(target=get_updates) get_update_process.start() get_update_process.join() time_worker.join() for worker in worker_process: worker.join()
class Ticker(object): def __init__(self, api, interval=1): self.api = api self.db = MongoClient().poloniex['ticker'] self.interval = interval def updateTicker(self): tick = self.api.returnTicker() for market in tick: self.db.update_one({'_id': market}, {'$set': tick[market]}, upsert=True) logger.info('Ticker updated') def __call__(self): return list(self.db.find()) def run(self): self._running = True while self._running: self.updateTicker() sleep(self.interval) def start(self): self._thread = Thread(target=self.run) self._thread.daemon = True self._thread.start() logger.info('Ticker started') def stop(self): self._running = False self._thread.join() logger.info('Ticker stopped')
def test_pause_unpause(self): """ Verify that the pause method actually works. In this case, working means that the process doesn't send any more HTTP requests, fact that is verified with the "fake" count plugin. """ core_start = Process(target=self.w3afcore.start, name='TestRunner') core_start.daemon = True core_start.start() # Let the core start, and the count plugin send some requests. time.sleep(5) count_before_pause = self.count_plugin.count self.assertGreater(self.count_plugin.count, 0) # Pause and measure self.w3afcore.pause(True) count_after_pause = self.count_plugin.count time.sleep(2) count_after_sleep = self.count_plugin.count all_equal = count_before_pause == count_after_pause == count_after_sleep self.assertTrue(all_equal) # Unpause and verify that all requests were sent self.w3afcore.pause(False) core_start.join() self.assertEqual(self.count_plugin.count, self.count_plugin.loops)
def run(self): jobs = [] for i in range(int(ceil(self.count_vacancy / 25))): p = Process(target=self.get_graph_maker()) jobs.append(p) p.start() p.join()
class Ticker(object): def __init__(self, api, interval=1): self.api = api self.db = MongoClient().poloniex['ticker'] self.interval = interval def updateTicker(self): tick = self.api.returnTicker() for market in tick: self.db.update_one({'_id': market}, {'$set': tick[market]}, upsert=True) logger.info('Ticker updated') def __call__(self): return list(self.db.find()) def run(self): self._running = True while self._running: self.updateTicker() sleep(self.interval) def start(self): self._thread = Thread(target=self.run) self._thread.daemon = True self._thread.start() logger.info('Ticker started') def stop(self): self._running = False self._thread.join() logger.info('Ticker stopped')
def send_emails(modeladmin, request, queryset): messages = Queue() for user in queryset: process = Process(target=send_email, args=(user, messages)) process.start() messages.get().send() process.join()
def state_estimation_input(): user_id = str(uuid.uuid4()) if 'username' in request.form: if len(request.form['username']) > 0: # make username a safe string keep_chars = set(['-', '_', ' ']) username = request.form['username'].strip()[:25] username = ''.join( [c for c in username if c.isalnum() or (c in keep_chars)]) user_id = user_id + '-' + username base_path = os.path.join(current_app.config['USER_DATA_DIR'], user_id) os.makedirs(base_path) # save request.form with open(os.path.join(base_path, 'inputs.json'), 'w') as f: f.write(json.dumps(request.form)) # TODO: if file is large, start a new thread. otherwise just # run the thing request_file = request.files request_form = request.form data_paths, gene_paths, output_filenames, init, shapes = load_upload_data( request_file, request_form, base_path) # TODO: deal with init P = Process(target=state_estimation_preproc, args=(user_id, base_path, data_paths, gene_paths, output_filenames, init, shapes)) P.start() #state_estimation_preproc(user_id, path) return redirect(url_for('views.state_estimation_result', user_id=user_id))
def state_estimation_start(user_id): """ Actually start the process of state estimation. This saves a file called 'params.json' in /tmp/uncurl/<user_id> containing all parameters used in state estimation. """ path = os.path.join(current_app.config['USER_DATA_DIR'], user_id) gene_names_file = os.path.join(path, 'gene_names.txt') if not os.path.exists(gene_names_file): gene_names_file = None # TODO: deal with init here - make note if it's qualitative or # quantitative # run qualNorm??? init_path = os.path.join(path, 'init.txt') if not os.path.exists(init_path): init_path = None # load json params with open(os.path.join(path, 'preprocess.json')) as f: preprocess = json.load(f) for key in request.form.keys(): preprocess[key] = request.form[key] # params.json contains all input parameters to the state estimation, as well as all stats from preprocess.json. with open(os.path.join(path, 'params.json'), 'w') as f: json.dump(preprocess, f) P = Process(target=state_estimation_thread, args=(user_id, gene_names_file, init_path, path, preprocess, current_app.config.copy())) P.start() return redirect(url_for('views.state_estimation_result', user_id=user_id))
def haunt(tMin,tMax): configure() global hauntMode global duration running = 1 timer_start = time.time() while running: schedule = random.randint(tMin,tMax) if duration > 0: if time.time() - timer_start + schedule >= duration: schedule = duration - (time.time() - timer_start) running = 0 time.sleep(schedule) if hauntMode == 0: randomNoise() if hauntMode == 1: coinflip = random.randint(1,2) if coinflip == 1: randomNoise() if coinflip == 2: relayTrigger() if hauntMode == 2: doSound = Process(target = randomNoise) doRelay = Process(target = relayTrigger) doSound.start() doRelay.start() doSound.join() doRelay.join() if duration > 0: if time.time() - timer_start >= duration: running = 0
def test_pause_stop(self): """ Verify that the pause method actually works. In this case, working means that the process doesn't send any more HTTP requests after we, pause and that stop works when paused. """ core_start = Process(target=self.w3afcore.start, name="TestRunner") core_start.daemon = True core_start.start() # Let the core start, and the count plugin send some requests. time.sleep(5) count_before_pause = self.count_plugin.count self.assertGreater(self.count_plugin.count, 0) # Pause and measure self.w3afcore.pause(True) count_after_pause = self.count_plugin.count time.sleep(2) count_after_sleep = self.count_plugin.count all_equal = count_before_pause == count_after_pause == count_after_sleep self.assertTrue(all_equal) # Unpause and verify that all requests were sent self.w3afcore.stop() core_start.join() # No more requests sent after pause self.assertEqual(self.count_plugin.count, count_after_sleep)
def test_stop(self): """ Verify that the stop method actually works. In this case, working means that the process doesn't send any more HTTP requests after we stop(). This test seems to be failing @ CircleCI because of a test dependency issue. If run alone in your workstation it will PASS, but if run at CircleCI the count plugin doesn't seem to start. """ core_start = Process(target=self.w3afcore.start, name='TestRunner') core_start.daemon = True core_start.start() # Let the core start, and the count plugin send some requests. time.sleep(5) count_before_stop = self.count_plugin.count self.assertGreater(count_before_stop, 0) # Stop now, self.w3afcore.stop() core_start.join() count_after_stop = self.count_plugin.count self.assertEqual(count_after_stop, count_before_stop)
def report(pk, num): # hacky way to enforce db commit from outside the atomic transaction # spawning a separate celery task won't work as celery will queue # spawns after the commit of the underlying transaction. from multiprocessing.dummy import Process p = Process(target=update_status, args=(pk, num)) p.start() p.join()
class Worker(object): def __init__(self, address, name): self.__address = address self.__socket = socket.socket() self.__name = name self.__working = False self.__process = None self.__result = '' @property def working(self): return self.__working @property def result(self): return self.__result def __connect__(self): self.__socket.connect(self.__address) def __send__(self, message): message = message.encode() self.__socket.send(message) def join(self): self.__process.join() def __recv__(self): text = b'' while True: data = self.__socket.recv(8192) if not data: break text += data if len(data) < 8192: break text = text.decode() return text def __work__(self, message): t1 = time.time() self.__socket = socket.socket() self.__connect__() self.__send__(message) self.__result = self.__recv__() t2 = time.time() self.__socket.close() #logger.debug(self.__result) logger.debug(self.__name + ' end work at ' + str(t2 - t1)) self.__working = False def start(self, message): self.__result = '' self.__working = True self.__process = Process(target=self.__work__, args=(message, ), name=self.__name + '_thread') self.__process.start()
def manager(self): try: putter_process = Process(target=self.put_queue) getter_process = Process(target=self.get_queue) putter_process.start() getter_process.start() putter_process.join() except Exception as e: raise Exception(e.args[0])
def observe(self): army_entri_p = Process(target=self.enemy_entrance, args=(0, [200, 200])) army_entri_p.start() i = 1 while True: self.draw_background() #pygame.time.delay(40) time.sleep(0.1) #playerstatus = self.player key_pressed = pygame.key.get_pressed() pygame.key.get_repeat() if key_pressed[pygame.K_UP]: self.player.moveUp() if key_pressed[pygame.K_DOWN]: self.player.moveDown() if key_pressed[pygame.K_LEFT]: self.player.moveLeft() if key_pressed[pygame.K_RIGHT]: self.player.moveRight() for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() exit() elif event.type == pygame.KEYDOWN: if event.key == pygame.K_LEFT: self.player.changeBody(Player.Body.Left) elif event.key == pygame.K_RIGHT: self.player.changeBody(Player.Body.Right) elif event.key == pygame.K_z: self.player.is_attack = True elif event.key == pygame.K_LSHIFT: self.player.speed = param.slow_speed Process(target=self.open_field, args=()).start() print('A.T. Filed 全開!') #self.open_field() elif event.type == pygame.KEYUP: if (event.key == pygame.K_LEFT and not key_pressed[pygame.K_RIGHT])\ or (event.key == pygame.K_RIGHT and not key_pressed[pygame.K_LEFT]): self.player.changeBody(Player.Body.Center) elif event.key == pygame.K_z: self.player.is_attack = False elif event.key == pygame.K_LSHIFT: #self.close_field() self.player.speed = param.speed Process(target=self.close_field, args=()).start() print('A.T. Filed 關閉!') self.attack_check() # self.safe_check() self.show_player() self.show_enemys() # print('onDraw()') pygame.display.update()
def process_request(self, request, client_address): """ Start a new thread to process the request. Override here """ t = Process(target=self.process_request_thread, args=(request, client_address)) t.daemon = self.daemon_threads t.start()
class Ticker(object): def __init__(self): self.ticker = poloniex.Poloniex().returnTicker() self._appRunner = ApplicationRunner(u"wss://api.poloniex.com:443", u"realm1") self._appProcess, self._tickThread = None, None self._running = False def __call__(self): return self.ticker def tickCatcher(self): print("Catching...") while self._running: try: tick = queue.get(timeout=1) except: continue else: self.ticker[tick[0]] = { 'last': tick[1], 'lowestAsk': tick[2], 'highestBid': tick[3], 'percentChange': tick[4], 'baseVolume': tick[5], 'quoteVolume': tick[6], 'isFrozen': tick[7], 'high24hr': tick[8], 'low24hr': tick[9], 'id': self.ticker[tick[0]]['id'] } print("Done catching...") def start(self): """ Start the ticker """ print("Starting ticker") self._appProcess = Process(target=self._appRunner.run, args=(TickPitcher, )) self._appProcess.daemon = True self._appProcess.start() self._running = True print('TICKER: tickPitcher process started') self._tickThread = Thread(target=self.tickCatcher) self._tickThread.deamon = True self._tickThread.start() print('TICKER: tickCatcher thread started') def stop(self): """ Stop the ticker """ print("Stopping ticker") self._appProcess.terminate() print("Joining Process") self._appProcess.join() print("Joining thread") self._running = False self._tickThread.join() print("Ticker stopped.")
def new_send_email(email): if not IS_ACTIVE: new_send_email.queue = Manager().Queue() process = Process(target=process_sent_queue, args=(new_send_email.queue, )) process.daemon = True process.start() models.IS_ACTIVE = True new_send_email.queue.put(email)
def train(epoch): print('Epoch: %d' % epoch) def backward(): time.sleep(2) batch_idx = 0 train_loss = 0 correct = 0 total = 0 global epoch_loss while True: optimizer.zero_grad() try: outputs, targets = output_queue.get(block=True, timeout=args.wait) except Empty as e: print("done.....") epoch_loss = (train_loss / (batch_idx + 1)) break loss = criterion(outputs, targets) loss.backward() optimizer.step() train_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() progress_bar( batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (train_loss / (batch_idx + 1), 100. * correct / total, correct, total)) batch_idx += 1 net.train() start_flag = True first_count = 0 for batch_idx, (inputs, targets) in enumerate(trainloader): inputs, targets = inputs.cuda(0), targets.to(1) outputs = net(inputs) if first_count < args.count: first_count += 1 continue output_queue.put([outputs, targets]) if start_flag and output_queue.qsize() > args.wait: #2 start_flag = False back_process = Process(target=backward) back_process.start() back_process.join()
def process_request(self, request, client_address): """ Start a new thread to process the request. Override here """ t = Process(target=self.process_request_thread, args=(request, client_address)) t.daemon = self.daemon_threads t.start()
def multiprocessing(): # 启动一个子进程并等待其结束 print('Parent process %s.' % os.getpid()) # 创建子进程时,只需要传入一个执行函数和函数的参数 p = Process(target=run_child_process, args=('子进程', )) print('Process start') p.start() # join()方法可以等待子进程结束后再继续往下运行,通常用于进程间的同步 p.join() print('Process end.')
def scoreDuplicates(record_pairs: RecordPairs, data_model, classifier, num_cores: int = 1): if num_cores < 2: from multiprocessing.dummy import Process, Queue SimpleQueue = Queue else: from .backport import Process, SimpleQueue, Queue # type: ignore first, record_pairs = peek(record_pairs) if first is None: raise BlockingError("No records have been blocked together. " "Is the data you are trying to match like " "the data you trained on?") record_pairs_queue: _Queue = Queue(2) score_queue: _SimpleQueue = SimpleQueue() result_queue: _SimpleQueue = SimpleQueue() n_map_processes = max(num_cores, 1) score_records = ScoreDupes(data_model, classifier, record_pairs_queue, score_queue) map_processes = [ Process(target=score_records) for _ in range(n_map_processes) ] for process in map_processes: process.start() reduce_process = Process(target=mergeScores, args=(score_queue, result_queue, n_map_processes)) reduce_process.start() fillQueue(record_pairs_queue, record_pairs, n_map_processes) result = result_queue.get() if isinstance(result, Exception): raise ChildProcessError if result: scored_pairs_file, dtype, size = result scored_pairs = numpy.memmap(scored_pairs_file, dtype=dtype, shape=(size, )) else: dtype = numpy.dtype([('pairs', object, 2), ('score', 'f4', 1)]) scored_pairs = numpy.array([], dtype=dtype) reduce_process.join() for process in map_processes: process.join() return scored_pairs
def _draw_start(self): if not self._somethingnew: return True # let's draw! q = Queue.Queue() evt = Event() th = Process(target=self._draw_real, args=(q, evt), name='GTKDraw') th.start() gobject.timeout_add(500, self._draw_end, q, evt) return False
def qual2quant_input(): if 'fileinput' not in request.files or 'qualinput' not in request.files: return error('Missing data input', 400) cell_file = request.files['fileinput'] qual_file = request.files['qualinput'] cell_data = np.loadtxt(cell_file) qual_data = np.loadtxt(qual_file) user_id = str(uuid.uuid4()) P = Process(target=qual2quant_thread, args=(cell_data, qual_data, user_id)) P.start() return redirect(url_for('qual2quant_result', user_id=user_id))
def run_both(): run = 20 for i in range(run): receiver = Process(target=run_receiver) sender = Process(target=run_sender) receiver.start() sender.start() receiver.join() sender.join() time.sleep(1)
def _draw_start(self): if not self._somethingnew: return True # let's draw! q = Queue.Queue() evt = Event() th = Process(target=self._draw_real, args=(q, evt), name='GTKDraw') th.start() gobject.timeout_add(500, self._draw_end, q, evt) return False
def scoreDuplicates(records, data_model, classifier, num_cores=1, threshold=0): if num_cores < 2: from multiprocessing.dummy import Process, Queue SimpleQueue = Queue else: from .backport import Process, SimpleQueue, Queue first, records = peek(records) if first is None: raise BlockingError("No records have been blocked together. " "Is the data you are trying to match like " "the data you trained on?") record_pairs_queue = Queue(2) score_queue = SimpleQueue() result_queue = SimpleQueue() n_map_processes = max(num_cores, 1) score_records = ScoreDupes(data_model, classifier, threshold) map_processes = [Process(target=score_records, args=(record_pairs_queue, score_queue)) for _ in range(n_map_processes)] [process.start() for process in map_processes] reduce_process = Process(target=mergeScores, args=(score_queue, result_queue, n_map_processes)) reduce_process.start() fillQueue(record_pairs_queue, records, n_map_processes) result = result_queue.get() if isinstance(result, Exception): raise ChildProcessError if result: scored_pairs_file, dtype, size = result scored_pairs = numpy.memmap(scored_pairs_file, dtype=dtype, shape=(size,)) else: dtype = numpy.dtype([('pairs', object, 2), ('score', 'f4', 1)]) scored_pairs = numpy.array([], dtype=dtype) reduce_process.join() [process.join() for process in map_processes] return scored_pairs
def test_pause(self): output = Queue.Queue() self.uri_opener.pause(True) def send(uri_opener, output): url = URL('http://moth/') http_response = uri_opener.GET(url) output.put(http_response) th = Process(target=send, args=(self.uri_opener, output)) th.daemon = True th.start() self.assertRaises(Queue.Empty, output.get, True, 2)
def get_stats(): print 'Fetching NBA player stats...' stats_outfile = RUNDAY+'_nba_stats.csv' csvout = open(stats_outfile, 'wb') NUM_THREADS = 8 in_queue = Queue() out_queue = Queue() queue_players(in_queue) while not in_queue.empty(): jobs = [] for i in range(NUM_THREADS): if not in_queue.empty(): thread = Process(target=get_stats_helper, args=(in_queue, out_queue)) jobs.append(thread) thread.start() for thread in jobs: thread.join() while not out_queue.empty(): player = out_queue.get() del player['SUCCESS'] try: name = player['NAME'] except KeyError as e: continue player['TIME'] = RUNDAY fieldnames = [ 'TIME', 'NAME', 'JERSEY', 'SPORT', 'TEAM', 'POSITION', 'PTS', 'REB', 'AST', 'URL' ] csvwriter = csv.DictWriter(csvout, delimiter='|', fieldnames=fieldnames) csvwriter.writerow(player) csvout.close() print 'Finished fetching NBA player stats.' print 'Ouput saved in %s' % stats_outfile
def __init__(self): pool = Pool(processes=2) self.graph = getGraph() files = findFiles(opts) self.progressQueue = Queue() reporter = Process(target=ProgressReport, args=(self.progressQueue, len(files))) reporter.start() result = pool.map(self.cacheFile, enumerate(files), chunksize=5) self.progressQueue.put('END') log.info("finished, %s results", len(result)) reporter.join()
class Ticker(object): """ Ticker object for controlling the ticker thread and subprocess Holds poloniex ticker dict under self.markets""" def __init__(self): self._tickerP, self._tickerT = [None, None] self.markets = poloniex.Poloniex().marketTicker() def startTicker(self): """ Starts the 'tickcatcher' subprocess and 'tickCatcher' thread""" self._tickerP = Popen(["python", "tickcatcher.py"], stdout=PIPE, bufsize=1) print('TICKER: tickcatcher subprocess started') self._tickerT = Thread(target=self.tickCatcher);self._tickerT.daemon = True self._tickerT.start() print('TICKER: tickCatcher thread started') def stopTicker(self): """ Stops the ticker subprocess""" self._tickerP.terminate();self._tickerP.kill() print('TICKER: Ticker subprocess stopped') self._tickerT.join() print('TICKER: Ticker thread joined') def tickCatcher(self): with self._tickerP.stdout: for line in iter(self._tickerP.stdout.readline, b''): try: tick = json.loads(line[25:]) # shave off twisted timestamp (probably a better way to remove the timestamp...) self.markets[tick[0]] = { 'last':tick[1], 'lowestAsk':tick[2], 'highestBid':tick[3], 'percentChange':tick[4], 'baseVolume':tick[5], 'quoteVolume':tick[6], 'isFrozen':tick[7], 'high24hr':tick[8], 'low24hr':tick[9], 'id':self.markets[tick[0]]['id'] } except Exception as e: print(e) self._tickerP.wait()
def scoreDuplicates(records, data_model, classifier, num_cores=1, threshold=0) : if num_cores < 2 : from multiprocessing.dummy import Process, Pool, Queue SimpleQueue = Queue else : from .backport import Process, Pool, SimpleQueue record_pairs_queue = SimpleQueue() score_queue = SimpleQueue() result_queue = SimpleQueue() n_map_processes = max(num_cores-1, 1) score_records = ScoreRecords(data_model, classifier, threshold) map_processes = [Process(target=score_records, args=(record_pairs_queue, score_queue)) for _ in range(n_map_processes)] [process.start() for process in map_processes] reduce_process = Process(target=mergeScores, args=(score_queue, result_queue, n_map_processes)) reduce_process.start() fillQueue(record_pairs_queue, records, n_map_processes) result = result_queue.get() if isinstance(result, Exception) : raise ChildProcessError if result : scored_pairs_file, dtype = result scored_pairs = numpy.memmap(scored_pairs_file, dtype=dtype) else : scored_pairs = result reduce_process.join() [process.join() for process in map_processes] return scored_pairs
def scan_stop(scan_id): """ Stop a scan :param scan_id: The scan ID to stop :return: Empty result if success, 403 if the current state indicates that the scan can't be stopped. """ scan_info = get_scan_info_from_id(scan_id) if scan_info is None: abort(404, 'Scan not found') if not scan_info.w3af_core.can_stop(): abort(403, 'Scan can not be stop') t = Process(target=scan_info.w3af_core.stop, name='ScanStopThread', args=()) t.daemon = True t.start() return jsonify({'message': 'Stopping scan'})
def stat_files(): all_files = [] for root, dirs, files in os.walk('/home/gzguoyubo/mf/tw2/res/entities/custom_type'): ignore = False for ig_path in ignore_paths: if ig_path in root: ignore = True if ignore: continue for fname in files: if not fname.endswith('.py'): continue abs_file_path = join(root, fname) all_files.append(abs_file_path) file_sections = [] file_total_nums = len(all_files) for i in xrange(P_NUM): start = i * file_total_nums / P_NUM stop = start + file_total_nums / P_NUM if i == P_NUM - 1: stop = -1 file_sections.append(all_files[start : stop]) res_queue = Queue() processes = [] for section in file_sections: p = Process(target=stat_file, args=(section, res_queue)) p.start() processes.append(p) for p in processes: p.join() total_stats = defaultdict(int) while not res_queue.empty(): stat = res_queue.get() for author, cnt in stat.iteritems(): total_stats[author] += cnt print total_stats
def test_pause_unpause(self): output = Queue.Queue() self.uri_opener.pause(True) def send(uri_opener, output): url = URL('http://moth/') http_response = uri_opener.GET(url) output.put(http_response) th = Process(target=send, args=(self.uri_opener, output)) th.daemon = True th.start() self.assertRaises(Queue.Empty, output.get, True, 2) self.uri_opener.pause(False) http_response = output.get() th.join() self.assertEqual(http_response.get_code(), 200) self.assertIn(self.MOTH_MESSAGE, http_response.body)
def test_stop(self): """ Verify that the stop method actually works. In this case, working means that the process doesn't send any more HTTP requests after we stop(). """ core_start = Process(target=self.w3afcore.start, name="TestRunner") core_start.daemon = True core_start.start() # Let the core start, and the count plugin send some requests. time.sleep(5) count_before_stop = self.count_plugin.count self.assertGreater(count_before_stop, 0) # Stop now, self.w3afcore.stop() core_start.join() count_after_stop = self.count_plugin.count self.assertEqual(count_after_stop, count_before_stop)
def test_pause_stop(self): """ Verify that the pause method actually works. In this case, working means that the process doesn't send any more HTTP requests after we, pause and that stop works when paused. This test seems to be failing @ CircleCI because of a test dependency issue. If run alone in your workstation it will PASS, but if run at CircleCI the count plugin doesn't seem to start. """ core_start = Process(target=self.w3afcore.start, name='TestRunner') core_start.daemon = True core_start.start() # Let the core start, and the count plugin send some requests. time.sleep(5) count_before_pause = self.count_plugin.count self.assertGreater(self.count_plugin.count, 0) # Pause and measure self.w3afcore.pause(True) count_after_pause = self.count_plugin.count time.sleep(2) count_after_sleep = self.count_plugin.count all_equal = count_before_pause == count_after_pause == count_after_sleep self.assertTrue(all_equal) # Unpause and verify that all requests were sent self.w3afcore.stop() core_start.join() # No more requests sent after pause self.assertEqual(self.count_plugin.count, count_after_sleep)
scan_id = get_new_scan_id() scan_info = ScanInfo() scan_info.w3af_core = w3af_core scan_info.target_urls = target_urls scan_info.profile_path = scan_profile_file_name scan_info.output = RESTAPIOutput() SCANS[scan_id] = scan_info # # Finally, start the scan in a different thread # args = (scan_info,) t = Process(target=start_scan_helper, name='ScanThread', args=args) t.daemon = True t.start() return jsonify({'message': 'Success', 'id': scan_id, 'href': '/scans/%s' % scan_id}), 201 @app.route('/scans/', methods=['GET']) @requires_auth def list_scans(): """ :return: A JSON containing a list of: - Scan resource URL (eg. /scans/1) - Scan target - Scan status """
def test_glance_user_storage_quota_bypass_1_2(self, glance_remote, suffix, env, os_conn): """If deleting images in 'saving' status, storage quota is overcome by user because images in deleted state are not taken into account by quota. These image files should be deleted after the upload of files is completed. Scenario: 1. Set 'file' storage on glance-api.conf 2. Set 'user_storage_quota' to 604979776 in glance-api.conf (a little more than the size of the image) and restart glance-api service 3. Run 5-min cycle which creates image, wait 2 sec and then deletes it in "saving" status (and in any other status if any) on every iteration 4. After the end of cycle wait until the upload and deleting images is completed 5. Check that images statuses are "deleted" in mysql database Duration 5m """ user_storage_quota = 604979776 images_size_before = 0 for img in os_conn.nova.images.list(): images_size_before += img.to_dict()['OS-EXT-IMG-SIZE:size'] err_msg_quota = "Glance user storage quota is exceeded" assert images_size_before < user_storage_quota, err_msg_quota img_from_dir = self.get_images_number_from_dir() images_before = len(os_conn.nova.images.list()) name = "Test_{0}".format(suffix[:6]) image_url = ("http://releases.ubuntu.com/14.04/" "ubuntu-14.04.4-server-i386.iso") file_path = file_cache.get_file_path(image_url) start_time = datetime.datetime.now() duration = datetime.timedelta(seconds=300) stop_time = start_time + duration images_id = [] while 1: image = self.os_conn.glance.images.create(name=name, disk_format='qcow2', container_format='bare') p = Process(target=self.os_conn.glance.images.upload, args=(image.id, open(file_path), )) p.start() time.sleep(2) image = self.os_conn.glance.images.get(image.id) if image.status == 'saving': logger.info("Image status = {0}".format(image.status)) self.os_conn.glance.images.delete(image.id) logger.info("Image {0} is deleted in saving state" .format(image.id)) else: self.os_conn.glance.images.delete(image.id) images_id.append(image.id) p.join() if datetime.datetime.now() >= stop_time: break controllers = self.env.get_nodes_by_role('controller') for controller in controllers: with controller.ssh() as remote: wait(lambda: len(remote.check_call( 'ls /var/lib/glance/images')['stdout']) == img_from_dir[ controller.data['fqdn']], timeout_seconds=60, waiting_for='used space to be cleared') images_values = self.get_images_values_from_mysql_db(images_id) for image_id in images_values: image_values = images_values[image_id] err_msg = 'Status of image {0} is not deleted'.format(image_id) assert "deleted" in image_values, err_msg images_size_after = 0 for img in os_conn.nova.images.list(): images_size_after += img.to_dict()['OS-EXT-IMG-SIZE:size'] err_msg = "Glance user storage quota is exceeded" assert images_size_after < user_storage_quota, err_msg assert images_before == len(os_conn.nova.images.list())
worker_faceres_mailman = DummyProcess(target = resMailMan, args = (face_res_q, 'face_res')) worker_pref_writeman = DummyProcess(target = savePref, args = (pref_wrt_q, 'pref_wrt')) worker_svm_trainer = DummyProcess(target = updateFaceClassifier, args = (10,)) # worker_preprocess_p.daemon = True worker_hand_p1.daemon = True worker_face_p1.daemon = True worker_handres_mailman.daemon = True worker_faceres_mailman.daemon = True worker_pref_writeman.daemon = True worker_svm_trainer.daemon = True # worker_preprocess_p.start() worker_hand_p1.start() worker_face_p1.start() worker_handres_mailman.start() worker_faceres_mailman.start() worker_pref_writeman.start() worker_svm_trainer.start() with open('./profiles/profiles.pkl', 'rb') as pref_fd: try: pref_db = pkl.load(pref_fd) except EOFError: # if profiles_add.pkl is empty pref_db = pd.DataFrame(index=[], columns=['username', 'gesture', 'location', 'scene', 'policy', 'myfeature', 'otfeature']) pref_db.set_index(['username'], inplace=True) with open('./profiles/uid2name.pkl', 'rb') as uid2name_fd: try: uid2name = pkl.load(uid2name_fd) except EOFError:
class Loaner(object): """ Object for control of threaded Loaner loop""" def __init__(self, config): if os.path.isfile(config): with open(config) as f: config = json.load(f) self.polo = poloniex.Poloniex(config['key'], config['secret'], extend=True) self.coins = config['coins'] self.interval = config['interval'] self._running, self._thread = False, None self.openLoanOffers = None self.availBalance = None def _run(self): """ Main loop that is threaded (set Loaner._running to 'False' to stop loop) """ while self._running: try: self.openLoanOffers = self.polo.myOpenLoanOrders() for coin in self.coins: # Check for old offers self.cancelOldOffers(coin) self.availBalance = self.polo.myAvailBalances() for coin in self.coins: # ALL the coins?? if self.coins[coin]['allBal']: self.moveAll2Lending(coin) self.availBalance = self.polo.myAvailBalances() for coin in self.coins: # Creat new offer self.createLoanOffer(coin) # wait the interval (or shutdown) for i in range(self.interval*2): if not self._running: break time.sleep(0.5) except Exception as e: logging.exception(e) time.sleep(10) def start(self): """ Start Loaner.thread""" self._thread = Thread(target=self._run) self._thread.daemon = True self._running = True self._thread.start() logging.info(P('LOANER:')+C(' started')) def stop(self): """ Stop Loaner.thread""" self._running = False self._thread.join() logging.info(P('LOANER:')+R(' stopped')) def moveAll2Lending(self, coin): if 'exchange' in self.availBalance: if coin in self.availBalance['exchange']: result = self.polo.transferBalance( coin, self.availBalance['exchange'][coin], 'exchange', 'lending' ) if 'error' in result: raise RuntimeError(P('LOANER:')+' %s' % R(result['error'])) else: logging.info(P('LOANER:')+' %s' % result['message']) if 'margin' in self.availBalance: if coin in self.availBalance['margin']: result = self.polo.transferBalance( coin, self.availBalance['margin'][coin], 'margin', 'lending' ) if 'error' in result: raise RuntimeError(P('LOANER:')+' %s' % R(result['error'])) else: logging.info(P('LOANER:')+' %s' % result['message']) def getLoanOfferAge(self, coin, order): # epoch of loan order opnTime = poloniex.UTCstr2epoch(order['date']) # current epoch curTime = time.time() # age of open order = now-timeopened orderAge = (curTime-opnTime) logging.info(P('LOANER:')+' %s order %s has been open %s mins' % ( C(coin), G(str(order['id'])), C(str(orderAge/60)) )) return orderAge def cancelOldOffers(self, coin): if coin in self.openLoanOffers: for offer in self.openLoanOffers[coin]: age = self.getLoanOfferAge(coin, offer) # check if it is beyond max age if age > self.coins[coin]['maxAge']: result = self.polo.cancelLoanOrder(offer['id']) if 'error' in result: raise RuntimeError(P('LOANER:')+' %s' % R(result['error'])) else: logging.info(P('LOANER:')+' %s [ID: %s]' % ( C(result['message']), G(str(offer['id'])) )) def createLoanOffer(self, coin): if 'lending' in self.availBalance: if coin in self.availBalance['lending']: # and amount is more than min if float(self.availBalance['lending'][coin]) > self.coins[coin]['minAmount']: # get lowset rate topRate = float( self.polo.marketLoans(coin)['offers'][0]['rate'] ) # create loan result = self.polo.createLoanOrder( coin, self.availBalance['lending'][coin], topRate+(self.coins[coin]['offset']*0.000001), autoRenew = 1 ) if 'error' in result: raise RuntimeError(P('LOANER:')+' %s' % R(result['error'])) else: logging.info(P('LOANER:')+' %s %s [Amount: %s Rate: %s]' % ( C(coin), result['message'].lower(), O(str(self.availBalance['lending'][coin])), O(str(100*(topRate+(self.coins[coin]['offset']*0.000001)))+'%') ))
class rootMenu(menu): """ Main menu :author: Alexander Berezhnoy (alexander.berezhnoy |at| gmail.com) """ # Wait at most 20 seconds for the core to start the scan MAX_WAIT_FOR_START = 20 def __init__(self, name, console, core, parent=None): menu.__init__(self, name, console, core, parent) self._load_help('root') # At first, there is no scan thread self._scan_thread = None mapDict(self.addChild, { 'plugins': pluginsMenu, 'target': (ConfigMenu, self._w3af.target), 'misc-settings': (ConfigMenu, MiscSettings()), 'http-settings': (ConfigMenu, self._w3af.uri_opener.settings), 'profiles': ProfilesMenu, 'bug-report': bug_report_menu, 'exploit': exploit, 'kb': kbMenu }) def _cmd_start(self, params): """ Start the core in a different thread, monitor keystrokes in the main thread. :return: None """ # Check if the console output plugin is enabled or not, and warn. output_plugins = self._w3af.plugins.get_enabled_plugins('output') if 'console' not in output_plugins: msg = ("\nWarning: You disabled the console output plugin. If you" " start a new scan, the discovered vulnerabilities won\'t be" " printed to the console, we advise you to enable at least" " one output plugin in order to be able to actually see the" " the scan output.") print msg # Note that I'm NOT starting this in a new multiprocess Process # please note the multiprocessing.dummy , this is required because # I want to start new threads inside this thread and there is a bug # with that http://bugs.python.org/issue10015 self._scan_thread = Process(target=self._real_start) self._scan_thread.name = 'ConsoleScanThread' self._scan_thread.daemon = True self._scan_thread.start() # let the core thread start scan_started = self.wait_for_start() if not scan_started: om.out.console('The scan failed to start.') self._w3af.stop() return try: self.show_progress_on_request() except KeyboardInterrupt: self.handle_scan_stop() def wait_for_start(self): delay = 0.1 for _ in xrange(int(self.MAX_WAIT_FOR_START / delay)): if self._w3af.status.is_running(): return True time.sleep(delay) return False def handle_scan_stop(self, *args): om.out.console('User pressed Ctrl+C, stopping scan.') self._w3af.stop() def _cmd_cleanup(self, params): """ The user runs this command, when he has finished a scan, and wants to cleanup everything to start a new scan to another target. :return: None """ self._w3af.cleanup() def _real_start(self): """ Actually run core.start() :return: None """ try: self._w3af.plugins.init_plugins() self._w3af.verify_environment() self._w3af.start() except BaseFrameworkException, w3: om.out.error(str(w3)) except ScanMustStopException, w3: om.out.error(str(w3))
class rootMenu(menu): """ Main menu :author: Alexander Berezhnoy (alexander.berezhnoy |at| gmail.com) """ def __init__(self, name, console, core, parent=None): menu.__init__(self, name, console, core, parent) self._load_help("root") # At first, there is no scan thread self._scan_thread = None mapDict( self.addChild, { "plugins": pluginsMenu, "target": (ConfigMenu, self._w3af.target), "misc-settings": (ConfigMenu, MiscSettings()), "http-settings": (ConfigMenu, self._w3af.uri_opener.settings), "profiles": profilesMenu, "bug-report": bug_report_menu, "exploit": exploit, "kb": kbMenu, }, ) def _cmd_start(self, params): """ Start the core in a different thread, monitor keystrokes in the main thread. :return: None """ # Check if the console output plugin is enabled or not, and warn. output_plugins = self._w3af.plugins.get_enabled_plugins("output") if "console" not in output_plugins: msg = ( "Warning: You disabled the console output plugin. If you" " start a new scan, the discovered vulnerabilities won't be" " printed to the console, we advise you to enable at least" " one output plugin in order to be able to actually see the" " the scan output." ) print msg # Note that I'm NOT starting this in a new multiprocess Process # please note the multiprocessing.dummy , this is required because # I want to start new threads inside this thread and there is a bug # with that http://bugs.python.org/issue10015 self._scan_thread = Process(target=self._real_start) self._scan_thread.name = "ConsoleScanThread" self._scan_thread.daemon = True self._scan_thread.start() # let the core thread start time.sleep(1) try: if self._w3af.status.get_status() != "Not running.": self.show_progress_on_request() except KeyboardInterrupt: om.out.console("User pressed Ctrl+C, stopping scan.") self._w3af.stop() def _cmd_cleanup(self, params): """ The user runs this command, when he has finished a scan, and wants to cleanup everything to start a new scan to another target. :return: None """ self._w3af.cleanup() def _real_start(self): """ Actually run core.start() :return: None """ try: self._w3af.plugins.init_plugins() self._w3af.verify_environment() self._w3af.start() except BaseFrameworkException, w3: om.out.error(str(w3)) except ScanMustStopException, w3: om.out.error(str(w3))
except Exception as e: print("Error in worker {}:\n\t{}\n\tRestarting in 3 seconds...".format(process_number, repr(e))) sleep(3) return my_func @error_catching def f(process_number): print("starting worker:", process_number) while True: sleep(2) print("Worker {} checks in.".format(process_number)) if __name__ == '__main__': processes = [] manager = Manager() for i in range(3): p = Process(target=f) p.daemon = True p.start() processes.append(p) try: while True: sleep(1) except KeyboardInterrupt: print("Keyboard interrupt in main") sys.exit()
def run(self): """stays open until stop()""" thread = Thread(target=PyKeyboardEvent.run, args=[self]) thread.start()
def post_process(self): """ :return: """ nb_features = len(self.timeline) nb_lambda = len(self.lambdas) density = [0]*nb_features decision = [0]*nb_features decision_smooth = 5 processes = [] result = Queue(10000) maxproc = 10 boundaries = [] self.features = log(self.features) determinants = Array(c_double, nb_features*self.winsizemax) reste = [l for l in self.lambdas] while len(reste) > 0: l = reste[0] processes = [p for p in processes if p.is_alive()] if len(processes) < maxproc: p = Process(target=scalable_bic_segmentation, name='lambda %.2f' % l, args=(self.features, l, 120, self.winsizemax, self.enlargment_step, result, determinants )) reste.remove(l) processes += [p] p.start() if not result.empty(): while not result.empty(): boundaries += [result.get()] map(Process.join, processes) while not result.empty(): boundaries += [result.get()] for _, _, t in sorted(boundaries): density[t] += 1.0/float(nb_lambda) tmp = [d for d in density] while max(tmp) > 0: i = argmax(tmp) start, stop = max([0, i-decision_smooth]), min([nb_features, i+decision_smooth]) decision[i] = sum(tmp[start:stop]) for p in range(start, stop): tmp[p] = 0 precompute = frombuffer(determinants.get_obj()).reshape((nb_features, self.winsizemax)) precompute[precompute > 0] = 1 segments = [] current_start = 0 for i, v in enumerate(decision): if v > self.thvote: segments += [(current_start, i, len(segments) % 2)] current_start = i if current_start < (nb_features-1): segments += [(current_start, nb_features-1, len(segments) % 2)] if self.regroup: segments = bic_clustering(self.features, segments, self.lambdas) segments = sorted(map(lambda x: (x[0]*self.wStep, x[1]*self.wStep, x[2]), segments), key=lambda x:x[0]) segs = self.new_result(data_mode='label', time_mode='segment') label = set([v[2] for v in segments]) segs.data_object.label_metadata.label = {lab: str(lab) for lab in label} segs.data_object.time = array([s[0] for s in segments]) segs.data_object.duration = array([s[1] - s[0] for s in segments]) segs.data_object.label = array([s[2] for s in segments]) self.add_result(segs)