def main() -> None: """Yooo main function.""" mpd.Process(target=w_data).start() mpd.Process(target=close_spi).start() while True: print("Start a new spider at:\t", time.ctime()) p = mpd.Process(target=run) p.start() SPIPQ.put(p) time.sleep(24 * 3600 * 7)
def _launch_workers(self): workers = [] input_queue = multiprocessing.Queue(self._buffer_size) self._output_queue = multiprocessing.Queue(self._buffer_size) p = dummy.Process(target=pusher, args=(self.image_paths, input_queue, self._output_queue, self.shuffle)) p.start() workers.append(p) for i in range(multiprocessing.cpu_count()): p = dummy.Process(target=worker, args=(input_queue, self._output_queue)) p.start() workers.append(p)
def checkLiveness(pID, pStub, obj_type, timeout=5): """ Used to detect if a peer is still alive. """ logging.debug("Confirming connection to peer {}.".format(pID)) try: parent_conn, child_conn = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=_get_line, args=(child_conn, pID, pStub, obj_type)) p.daemon = True p.start() it_did_timeout = (not parent_conn.poll(timeout)) if it_did_timeout: logging.info("Connection to peer {} timed out.".format(pID)) return False else: parent_said_yes = parent_conn.recv() if not parent_said_yes: logging.info( "No connection to peer {} established.".format(pID)) return parent_said_yes except: err = sys.exc_info() logging.debug( "Encountered an error while spawning a process to check if peer {} is still alive:\n{}: {}" .format(pID, err[0], err[1])) return False
def main(argv): parse_args(argv) init_config() get_password() authenticate() remove_tmp_checkpoint() if not os.path.exists(config['outputpath']): os.makedirs(config['outputpath']) # get databases slated for backup q = get_dbs() # break up the processing across multiple threads threads = [] for i in range(config['num_threads']): t = multiprocessing.Process(target=stream_all_docs, args=(q, )) threads.append(t) t.start() q.put(None) for t in threads: t.join() rename_checkpoint_file()
def __init__(self): self.cellDict = {} self.queue = multiprocessing.Queue() logger.info('start process for parse Msg') self.process = multiprocessing.Process(target=parseMsg, args=(self, self.queue, logger)) self.process.start()
def __init__(self, constructor, strategy='thread'): """Step environment in a separate process for lock free parallelism. The environment will be created in the external process by calling the specified callable. This can be an environment class, or a function creating the environment and potentially wrapping it. The returned environment should not access global variables. Args: constructor: Callable that creates and returns an OpenAI gym environment. Attributes: observation_space: The cached observation space of the environment. action_space: The cached action space of the environment. """ if strategy == 'thread': import multiprocessing.dummy as mp elif strategy == 'process': import multiprocessing as mp else: raise NotImplementedError(strategy) self._conn, conn = mp.Pipe() self._process = mp.Process(target=self._worker, args=(constructor, conn)) atexit.register(self.close) self._process.start() self._observ_space = None self._action_space = None
def main(): timeout_watch = utils.Stopwatch() timeout_watch.start() args = _parse_cli_args(sys.argv[1:]) if args.log_verbose: logging.getLogger().setLevel(level=logging.DEBUG) else: logging.getLogger().setLevel(level=logging.INFO) stop_event = StopEvent() main_run = mp.Process(target=run, args=(args, stop_event)) try: main_run.start() while main_run.is_alive() and ( not args.timelimit or timeout_watch.curr_s() < args.timelimit): sleep(0.1) finally: timeout_watch.stop() if args.timelimit and timeout_watch.sum() >= args.timelimit: logging.info("Timelimit reached.\n") logging.info("Time taken: " + str(timeout_watch.sum())) stop_event.set() if main_run.is_alive(): try: main_run.join(5) except mp.TimeoutError: logging.info( "Main run didn't terminate within acceptable limit. Killing it." ) main_run.terminate()
def main(argv): parse_args(argv) init_config() get_password() authenticate() q = multiprocessing.Queue() # if we're processing a directory which contains X number of json files if os.path.isfile(config['inputpath']): q.put(config['inputpath']) config['num_threads'] = 1 else: for f in os.listdir('.'): q.put(f) threads = [] for i in range(config['num_threads']): t = multiprocessing.Process(target=upload_dispatcher, args=(q, )) t.setDaemon(True) threads.append(t) t.start() q.put(None) for t in threads: t.join()
def run_async(self, thread_num=3): """ 用例执行, 开启新的线程异步执行 """ new_thread = multithreading.Process(target=self.run, args=(thread_num,)) new_thread.start()
def interpret_explain(self, **kwargs): jobs = [] for i in range(len(self.pipeline_obj_list)): p = multiprocessing.Process( target=self.pipeline_obj_list[i].interpret_explain, kwargs=kwargs) jobs.append(p) p.start()
def test_api(client_key): # 1. create test image data and both processing and result queues urls = ['https://demo.restb.ai/images/demo/demo-1.jpg', 'https://demo.restb.ai/images/demo/demo-2.jpg', 'https://demo.restb.ai/images/demo/demo-3.jpg', 'https://demo.restb.ai/images/demo/demo-4.jpg', 'https://demo.restb.ai/images/demo/demo-5.jpg', 'https://demo.restb.ai/images/demo/demo-6.jpg'] queue = mp.Queue() image_id = 1 for url in urls: for model in __MODELS.keys(): queue.put(dict(id=image_id, url=url, model=model)) image_id += 1 results = mp.Queue() # 2. Pick which API endpoint to use (US vs. EU) url = __URL_US # 3. Define concurrency specific objects # stats objects lock_stats = mp.Lock() counter = mp.Value('i', 0) avg_req_time = mp.Value('f', 0) time_start = mp.Value('f', 999999999999999) time_end = mp.Value('f', 0) # 4. Spawn processes/threads to process the images in the queue pool = [] for i in range(__requests_per_second): # pass in necessary parameters to thread, including client key, etc. p = mp.Process(target=image_process_thread, args=(url, client_key, queue, results, lock_stats, counter, avg_req_time, time_start, time_end)) pool.append(p) p.start() # 5. clean-up after queue has been processed with "poison pill" while not queue.empty(): # wait for queue to be processed time.sleep(1) for i in pool: # seed shutdown messages / poison pills queue.put(dict(id=-1, url='shutdown', model='shutdown')) for p in pool: # enforce clean shutdown of threads p.join() # 6. finally, return accumulated results total = time_end.value - time_start.value print('[{requests}] requests processed in [{seconds}] seconds with average time [{time}] ms, total throughput: [{throughput}] rps'.format( requests=counter.value, seconds=str(round(total / 1000.0, 1)), time=str(round(avg_req_time.value / counter.value, 0)), throughput=str(round(counter.value / (total / 1000.0), 2)) )) return results
def start(self): for _ in range(self.size): pipe = mp.Pipe() p = mp.Process(target=Actor, args=(pipe[1], self.actor_device, (global_config, actor_config), self.ground_truth)) p.start() self.actors.append((p, pipe[0]))
def vkSleep(netconf): global net net = ConnectorNetwork(netconf, appid='0', name='vk') global db db = DB() p = thread.Process(target=poll_events, args=(db, )) p.start() start_app(db) p.join()
def main(): p = multithread.Process(target=keyHandler) p.start() while not stop: # print (stop) print("do computation") time.sleep(1.0) pass p.join()
def start_listen_threaded(self): inp, out = thr.Pipe() process = thr.Process(target=self.run_function, args=(inp, ), name=self.name) process.start() while True: time.sleep(0.01) text = input("Console input>>>") self.send(text)
def thr_camera(cam, q_size=1, block_get=False): q = Queue(maxsize=q_size) def loop(): c = Camera_source(cam) queue_put(c, q) p = thr.Process(target=loop, args=()) p.start() source = queue_source(q, block=block_get) return source, p
def start(name="", port=80): myServer = HTTPServer((name, port), MyServer) print(time.asctime(), "Server Starts - %s:%s" % (name, port)) p = thr.Process(target=lambda: myServer.serve_forever(), args=()) try: p.start() except KeyboardInterrupt: pass myServer.server_close() return p
def __init__(self, parentFrameId, childFrameId, tfListener): self.listener = tfListener self.parentFrameId = parentFrameId self.childFrameId = childFrameId # Time delay before timeout self.timeTolerance = 0.1 self.translation = np.zeros(3) self.rotation = np.zeros(4) self.time = -1 self.stop = False self.process = mp.Process(target=self.start) self.process.start() self.updated = False
def __init__(self, constructor, strategy='thread'): if strategy == 'thread': import multiprocessing.dummy as mp elif strategy == 'process': import multiprocessing as mp else: raise NotImplementedError(strategy) self._strategy = strategy self._conn, conn = mp.Pipe() self._process = mp.Process(target=self._worker, args=(constructor, conn)) atexit.register(self.close) self._process.start() self._observ_space = None self._action_space = None
def create_consumers(self, customer_count=0): """ Summary: 创建消费者 :return: """ consumer_count = customer_count or multithreading.cpu_count() consumer_list = list() for _ in range(consumer_count): curr_consumer = multithreading.Process( target=self.worker, args=()) curr_consumer.daemon = True # 设置为守护进程,主进程结束则子进程结束 consumer_list.append(curr_consumer) curr_consumer.start() return consumer_list
def __init__(self, ctor, strategy="process"): self._strategy = strategy if strategy == "none": self._env = ctor() elif strategy == "thread": import multiprocessing.dummy as mp elif strategy == "process": import multiprocessing as mp else: raise NotImplementedError(strategy) if strategy != "none": self._conn, conn = mp.Pipe() self._process = mp.Process(target=self._worker, args=(ctor, conn)) atexit.register(self.close) self._process.start() self._obs_space = None self._action_space = None
def __init__(self, constructor, strategy='thread'): self._pickled_ctor = cloudpickle.dumps(constructor) if strategy == 'process': import multiprocessing as mp context = mp.get_context('spawn') elif strategy == 'thread': import multiprocessing.dummy as context else: raise NotImplementedError(strategy) self._strategy = strategy self._conn, conn = context.Pipe() self._process = context.Process(target=self._worker, args=(conn,)) atexit.register(self.close) self._process.start() self._receive() # Ready. self._obs_space = None self._act_space = None
def main(): dbs = s.get('https://{0}.cloudant.com/_all_dbs'.format(account), auth=(account, pwd)).json() if '_replicator' in dbs: dbs.remove('_replicator') range_size = len(dbs) // num_threads db_chunks = slice_array(dbs, range_size) threads = [] for chunk in db_chunks: t = multiprocessing.Process(target=delete_dbs, args=(chunk, )) threads.append(t) t.start() for t in threads: t.join()
def run_function(self, pipe): #text,notif =None,None def listen_for_notif(): print("console connector listening for notification...") while True: notif = self.source.recv_json() print('console got notif:', notif) self.source.send_string("CONSOLE OK") #listener = thr.Process(target=listen_for_input) notificator = thr.Process(target=listen_for_notif) #listener.start() notificator.start() while True: text = pipe.recv() self.send(text) time.sleep(0.01)
def _execute_handler_async(self, p, job_id, data_dict): """ This private execution handler executes a process in a background thread using `multiprocessing.dummy` https://docs.python.org/3/library/multiprocessing.html#module-multiprocessing.dummy # noqa :param p: `pygeoapi.process` object :param job_id: job identifier :param data_dict: `dict` of data parameters :returns: tuple of None (i.e. initial response payload) and JobStatus.accepted (i.e. initial job status) """ _process = dummy.Process(target=self._execute_handler_sync, args=(p, job_id, data_dict)) _process.start() return None, JobStatus.accepted
def __init__ (self, targetDir, fps=10.0): # Create target directory and pose log if not os.path.exists(targetDir) : os.mkdir (targetDir) self.poseFd = open(targetDir+"/pose.csv", 'w') self.path = targetDir self.currentImage = None self.currentPose = None self.fps = fps self.bridge = CvBridge() # Prepare multiprocessing self.dataEx = Lock() self.process = mp.Process(target=self.start) self.end = False self.process.start()
def listen_for_notif(self, callback): def listener(callback): _print("Listening DMX on %s ..."%self.myaddr) while True: notif = self.notif_sock.recv_json() try: response = callback(notif) except Exception as e: _print("ERROR in notif callback. sending fail") self.notif_sock.send_string("FAIL") raise e self.notif_sock.send_string(response) p = thread.Process(target=listener, args=(callback,), name=self.name+'_notif_listen') p.start() return p
def downToDB(param): start = time.time() filename = '%s.zip' % param['contract'].replace('/', '-') log.info('downloading %s: ' % filename) before = time.time() total = downloadFile( url + '/hist-ticks?date=%s&contract=%s' % (param['date'], param['contract']) + '&format=json', filename) if total == 0: log.info('%s has no data!' % (url + '/hist-ticks?date=%s&contract=%s' % (param['date'], param['contract']) + '&format=json')) return log.info('downloaded %s with %.2s seconds' % (filename, float(time.time() - before))) p = thread.Process(target=storeDB, args=(param['contract'], )) p.daemon = False p.start() spent = time.time() - start return spent
def multithreading_task(cls, targets, kwargs_list=None): """ 多线程,使用于不同target的情况 :param targets: fun :param kwargs_list: kwargs_list = [None, None, {'i': 555555}] :return: """ works = [] for i, t in enumerate(targets): if kwargs_list is not None: kwargs = kwargs_list[i] else: kwargs = {} work = dummy.Process(target=t, kwargs=kwargs) works.append(work) for work in works: work.start() for work in works: work.join()
def main(): doc = {'docs': []} for i in range(num_docs): doc['docs'].append({ 'Years': 18, 'Name': 'Angel', 'Job': 'Clerk', 'Dept': 93, 'Salary': 53055 }) print json.dumps(doc, indent=4) threads = [] for i in range(num_threads): t = multiprocessing.Process(target=create_dbs, args=(doc, )) threads.append(t) t.start() for t in threads: t.join()