def main(): auth_server = Process(target=run_auth_server) auth_server.start() app_server = Process(target=run_app_server) app_server.start() print("Access http://localhost:8081/app in your browser") def sigint_handler(signal, frame): print("Terminating servers...") auth_server.terminate() auth_server.join() app_server.terminate() app_server.join() signal.signal(signal.SIGINT, sigint_handler)
def test_litmus_with_authentication(self): """Run litmus test suite on HTTP with authentification. This test passes """ try: proc = Process(target=run_wsgidav_server, args=(True, False)) proc.daemon = True proc.start() time.sleep(1) try: self.assertEqual( subprocess.call([ "litmus", "http://127.0.0.1:8080/", "tester", "secret" ]), 0, "litmus suite failed: check the log") except OSError: print "*" * 70 print "This test requires the litmus test suite." print "See http://www.webdav.org/neon/litmus/" print "*" * 70 raise finally: proc.terminate() proc.join()
def test_wsgi_404(self): def run_provider(queue): try: provider = create_provider() app = Application(provider=provider) httpd = make_server('', 15486, app, handler_class=NoLoggingHandler) queue.put({"result": 0}) httpd.serve_forever() except Exception as e: queue.put({"result": 1, "error_message": str(e)}) ready_queue = Queue() self.server = Process(target=run_provider, args=(ready_queue,)) self.server.start() provider_started = ready_queue.get() if provider_started["result"] != 0: raise Exception("Error starting Provider process with message" "'{0}'".format(provider_started["error_message"])) try: urlopen("http://127.0.0.1:15486/invalid-path").read() except HTTPError as e: self.assertEqual(404, e.code)
def service_background_jobs(self): logger.info('service_background_jobs') # NOTE: paths must begin with a "/", indicating that the first part of # the URI is a script name (which each app, i.e "reports" serves as). # see django.core.handlers.wsgi.__init__ uri = '/' + '/'.join([SCHEMA.REPORTS_API_URI, JOB.resource_name]) data = {JOB.STATE: SCHEMA.VOCAB.job.state.PENDING} kwargs = {} logger.info('get jobs: %r', uri) resp = self.api_client.get(uri, data=data, **kwargs) job_listing = self.api_client.deserialize(resp) if API_RESULT_DATA in job_listing: job_listing = job_listing[API_RESULT_DATA] for job in job_listing: logger.info('found job: %r', job) job_id = job[JOB.ID] logger.info('Process the job: %r', job_id) p = Process(target=self.client_processor.service, args=(job_id, )) # make the parent process wait: # p.daemon = True # if set to true, then the parent process won't wait. logger.info('start') p.start() logger.info('started...') logger.debug('servicing completed')
def __init__(self, knownHosts, port=4000, cacheStorage=None): # TODO id? nodeId = random.randrange(NETWORK_SIZE) self.process = Process(target=self.createNode, args=(nodeId, port, cacheStorage, knownHosts)) self.process.start() self.node = ServerProxy(('', port), nodeId, ('localhost', port))
def __init__(self, worker_list): from .worker import Worker self.worker_list = worker_list assert isinstance(self.worker_list, list) for worker in self.worker_list: assert isinstance(worker, Worker) self.process = Process(target=self.run_forever, args=()) self.running = False
def async_file_reading(fd, callback): """Helper which instantiate and run an AsynchronousFileReader.""" queue = SimpleQueue() reader = AsynchronousFileReader(fd, queue) reader.start() consummer = Process(target=consume_queue, args=(queue, callback)) consummer.start() return (reader, consummer)
def multiprocess_map(target,params, *args): mgr = multiprocessing.Manager() dict_threadsafe = mgr.dict() jobs = [Process(target=target_wrapper, args=(target,param,index,dict_threadsafe,args)) for index, param in enumerate(params)] for job in jobs: job.start() for job in jobs: job.join() return dict_threadsafe.values()
def __enter__(self): self.smtp_process_queue = Queue() self.smtp_process = Process( target=get_otp_mail, args=(self.smtp_process_queue, self.timeout)) self.smtp_process.start() self.port = self.smtp_process_queue.get(True, 5) self._do_lintop_config() return self
def __init__(self, config, logger=None, logname='imagemngr'): """ Create an instance of the image manager. """ if logger is None and logname is None: self.logger = logging.getLogger(logname) log_handler = logging.StreamHandler() logfmt = '%(asctime)s [%(name)s] %(levelname)s : %(message)s' log_handler.setFormatter(logging.Formatter(logfmt)) log_handler.setLevel(logging.INFO) self.logger.addHandler(log_handler) elif logname is not None: self.logger = logging.getLogger(logname) self.logger.info('ImageMngr using logname %s' % (logname)) else: print "Using upstream logger" self.logger = logger print logger self.logger.info('ImageMngr using upstream logger') self.logger.debug('Initializing image manager') self.config = config if 'Platforms' not in self.config: raise NameError('Platforms not defined') self.systems = [] # Time before another pull can be attempted self.pullupdatetimeout = 300 if 'PullUpdateTime' in self.config: self.pullupdatetimeout = self.config['PullUpdateTimeout'] # Max amount of time to allow for a pull self.pulltimeout = self.pullupdatetimeout # This is not intended to provide security, but just # provide a basic check that a session object is correct self.magic = 'imagemngrmagic' if 'Authentication' not in self.config: self.config['Authentication'] = "munge" self.auth = Authentication(self.config) self.platforms = self.config['Platforms'] for system in self.config['Platforms']: self.systems.append(system) # Connect to database if 'MongoDBURI' not in self.config: raise NameError('MongoDBURI not defined') threads = 1 if 'WorkerThreads' in self.config: threads = int(self.config['WorkerThreads']) self.workers = WorkerThreads(threads=threads) self.status_queue = self.workers.get_updater_queue() self.status_proc = Process(target=self.status_thread, name='StatusThread') self.status_proc.start() atexit.register(self.shutdown) self.mongo_init() # Cleanup any pending requests self._images_remove({'status': 'PENDING'})
def load_models(self, pmodels): """assumes the pmodels length is equivalent to the number of desired processes.""" if self.use_lock: lock = Lock() else: lock = None processes = [ Process(target=self.loader, args=(pm, lock)) for pm in pmodels ] self.run(processes)
def main(): auth_server = Process(target=run_auth_server) auth_server.start() def sigint_handler(signal, frame): print("Terminating servers...") auth_server.terminate() auth_server.join() signal.signal(signal.SIGINT, sigint_handler)
def test_mcdpweb_server(dirname): port = random.randint(11000, 15000) base = 'http://127.0.0.1:%s' % port p = Process(target=start_server, args=( dirname, port, )) p.start() print('sleeping') time.sleep(5) try: url_wrong = base + '/not-existing' urllib2.urlopen(url_wrong).read() except HTTPError: pass else: raise Exception('Expected 404') # now run the spider tmpdir = tempfile.mkdtemp(prefix='wget-output') cwd = '.' cmd = ['wget', '-nv', '-P', tmpdir, '-m', base] # res = system_cmd_result( # cwd, cmd, # display_stdout=True, # display_stderr=True, # raise_on_error=True) sub = subprocess.Popen(cmd, bufsize=0, cwd=cwd) sub.wait() exc = get_exceptions(port) if len(exc) == 0: msg = 'Expected at least a not-found error' raise Exception(msg) if not 'not-existing' in exc[0]: raise Exception('Could not find 404 error') exc = exc[1:] if exc: msg = 'Execution raised errors:\n\n' msg += str("\n---\n".join(exc)) raise_desc(Exception, msg) url_exit = base + '/exit' urllib2.urlopen(url_exit).read() print('waiting for start_server() process to exit...') p.join() print('...clean exit')
def forecast_demands(self): """ See :meth:`StatisticalForecast.forecast_demands`. This method uses processes, to speed up the calculation. This drives the cpu to full load for a short time.""" cached = StatisticalForecast.read_from_cache(self) if cached != None: return cached split_results = [[] for i in range(7)] #multi processed forecasting ## WARNING: hangups: #v.1 : pool.map #v.2 use multiple processes instead of pool.map to circumvent a hangup caused # by a multiprocessing/django incompabatility #share results in a multiaccess queue #note: this queue can not hold unlimited elements and will hang up with no warning if there are too many elements #12 weeks 1hourly will work, 20 not #result_queue = multiprocessing.Queue() #v.3 now uses a shared multiprocessing dict, to circumvent hang up problems with queue on windows mgr = multiprocessing.Manager() dict_threadsafe = mgr.dict() logger.debug("forecasting demands with daytype strategy..") #call class as Functor because class methods are not pickeable jobs = [ Process(target=self, args=(demand, index, dict_threadsafe)) for index, demand in enumerate(self.demands) ] for job in jobs: job.start() for job in jobs: job.join() for index in dict_threadsafe.keys(): split_results[index] = dict_threadsafe[index] forecasted_demands = [] self.calculated_parameters = [] for fc_tuple in split_results: forecasted_demands.append(list(fc_tuple[0])) self.calculated_parameters.append(fc_tuple[1]) #cache forecasts pickle.dump( { "forecasts": forecasted_demands, "parameters": self.calculated_parameters, "date": self.env.now }, open(os.path.join(BASE_DIR, "cache", "cached_forecasts.cache"), "wb")) logger.debug("forecasting completed") return forecasted_demands
def main(): auth_server = Process(target=run_auth_server) auth_server.start() print "Access http://10.10.112.59:8081/app in your browser" def sigint_handler(signal, frame): print "Terminating servers..." auth_server.terminate() auth_server.join() signal.signal(signal.SIGINT, sigint_handler)
def run_rule(self, rule, params=None): try: log.info('trying job %s' % rule.name) job_settings = self.settings.copy() if params: job_settings.update(params) name = rule.qualified_name args = (name, job_settings) Process(target=run_rule_async, args=args).start() except Exception: log.error('Error running rule: %s' % rule.name)
def startNode(self, requests, nodesIds, idx, factory, resultsLogger, knownHosts): nodeName = nodesIds[idx] nodeRequests = requests[nodeName] client = SimulatorClientProcess(factory.port, nodeName, idx, resultsLogger, knownHosts) process = Process(target=client.startNode, args=(factory, nodeRequests)) self.clients[nodeName] = process
def _launch(type_name, config, is_important): plugin_clazz = _get_type(type_name) if not plugin_clazz: logging.warn('could not find %s plugin' % type_name) d = plugin_clazz(config) p = Process(target=d.start) p.daemon = not is_important p.name = 'plugin: %s' % d.name p.start()
def start(self): """ Starts the background process that prunes expired items """ def task(): while True: self.prune_expired() pytime.sleep(.5) self.processs = Process(target=task) self.processs.start()
def setUp(self): #setup game server to run on a seperate process self.game_server = Process(target=start_server) self.game_server.start() #create the game client self.client = GameClient(host="127.0.0.1", port="5000") self.player_one = "Arthur" self.player_one_suspect = game_state.PEACOCK self.player_two = "Steven" self.player_two_suspect = game_state.PLUM
def load(self): indices_groups = array_split(self.indices, self.procs) if self.use_lock: lock = Lock() else: lock = None processes = [ Process(target=self.loader, args=(indices_group, self.f, self.fkwds, self.Model, lock)) for indices_group in indices_groups ] self.run(processes)
def test_wsgi(self): def run_provider(queue): try: provider = create_provider() app = Application(provider=provider) httpd = make_server('', 15486, app, handler_class=NoLoggingHandler) queue.put({"result": 0}) httpd.serve_forever() except Exception as e: queue.put({"result": 1, "error_message": str(e)}) ready_queue = Queue() self.server = Process(target=run_provider, args=(ready_queue, )) self.server.start() provider_started = ready_queue.get() if provider_started["result"] != 0: raise Exception("Error starting Provider process with message" "'{0}'".format(provider_started["error_message"])) self.client = Process(target=run_client, args=(ready_queue, )) self.client.start() client_started = ready_queue.get() if client_started["result"] != 0: raise Exception("Error starting Client Application process with " "message '{0}'".format( client_started["error_message"])) self.access_token()
def test_tornado(self): def run_provider(queue): try: provider = create_provider() app = TornadoApplication([ url(r"/authorize", OAuth2Handler, dict(provider=provider)), url(r"/token", OAuth2Handler, dict(provider=provider)) ], debug=True) app.listen(15486) queue.put({"result": 0}) IOLoop.current().start() except Exception as e: queue.put({"result": 1, "error_message": str(e)}) ready_queue = Queue() self.server = Process(target=run_provider, args=(ready_queue, )) self.server.start() provider_started = ready_queue.get() if provider_started["result"] != 0: raise Exception("Error starting Provider process with message" "'{0}'".format(provider_started["error_message"])) self.client = Process(target=run_client, args=(ready_queue, )) self.client.start() client_started = ready_queue.get() if client_started["result"] != 0: raise Exception("Error starting Client Application process with " "message '{0}'".format( client_started["error_message"])) self.access_token()
def setUp(self): # Use Thread for debug: if USE_THREADS: self.server = threading.Thread(target=ServerLoop, args=(self.container_name,)) else: self.server = Process(target=ServerLoop, args=(self.container_name,)) self.server.start() self.count = 0 self.data_arrays_sent = 0 time.sleep(0.1) # give it some time to start self.rdaemon = Pyro4.Proxy("PYRO:Pyro.Daemon@./u:" + self.container_name) self.comp = self.rdaemon.getObject("mycomp")
def main(): auth_server = Process(target=run_auth_server) auth_server.start() print("To test getting an auth token, execute the following curl command:") print( "curl --ipv4 -v -X POST" " -d 'grant_type=client_credentials&client_id=abc&client_secret=xyz' " "http://localhost:8080/token" ) def sigint_handler(signal, frame): print("Terminating server...") auth_server.terminate() auth_server.join() signal.signal(signal.SIGINT, sigint_handler)
def start_sched(): j_logger.info(u'starting job scheduler ...') jobs = get_all_jobs() for job in jobs: j_logger.info(u'starting job %s ' % job.name) job.run_status = Value('i', 0) #job的状态值 try: p = Process(target=_inner_job, name=job.name, args=(job, )) p.start() job.process_id = p.pid j_logger.info(u'job %s started !' % job.name) except Exception as e: j_logger.error(u'job %s fail to start,due to [%s]!' % (job.name, e)) register_signal_notify() j_logger.info(u'job scheduler started !')
def calculate(self, funcs): ## always count the data base_funcs = [{'function': len, 'name': 'count_agg', 'raw': False}] ## check if there are raw calculation functions if any([f['raw'] for f in funcs]): has_raw = True if self.sub.value_set == {}: raise (ValueError( 'Raw aggregate statistics requested with no "value_set_coll"!!' )) else: has_raw = False ## need to count the raw data values if raw value are present if has_raw: base_funcs.append({ 'function': len, 'name': 'count_raw', 'raw': True }) ## append the rest of the functions funcs = base_funcs + funcs ## check the function definition dictionary for common problems check_function_dictionary(funcs) ## convert the time vector for faster referencing time_conv = [[getattr(time, grp) for grp in self.time_grouping] for time in self.sub.timevec] ## the shared list all_attrs = Manager().list() if self.procs > 1: processes = [ Process(target=self.f_calculate, args=(all_attrs, self.sub, groups, funcs, time_conv, self.time_grouping, self.grouping, has_raw)) for groups in self.groups ] pmanager = ProcessManager(processes, self.procs) pmanager.run() else: self.f_calculate(all_attrs, self.sub, self.groups[0], funcs, time_conv, self.time_grouping, self.grouping, has_raw) self.stats = merge_dict_list(list(all_attrs))
def __init__(self, name=None): if name is None: name = 'default' if name in _workers.keys(): return _workers[name] = self super(Worker, self).__init__() self.daemon = True self.name = name self._queue = _ver.queue.Queue(1) self.last_exception = None self._pon = _mp.Value('b', True) tsk, self.task = _mp.Pipe(False) self.out, res = _mp.Pipe(False) self.process = Process(target=process, args=(self._pon, tsk, res), name=name) self.process.start() self._on = True self.start() _time.sleep(1)
def test_cleanup(self): """ Test the cleanup thread """ # We want the cleanup thread to run in this context # but it is an infitite loop. So let's delay a shtudown. sd2 = "/tmp/shut2" rec = { 'address': '1.2.3.4', 'ip': '10.128.0.1', 'router': 'router', 'last_associated': time(), 'end_time': 0, 'uid': 501, 'user': '******', 'jobid': '1233', 'status': 'used' } self.db.routes2.insert(rec) # We want to shutdown the thread that is started on init with open(sd2, 'w') as f: f.write('1') settings = self.settings.copy() settings['COLLECTION'] = 'routes2' settings['SHUTFILE'] = sd2 rt = router.Router(settings) # Wait for the init thread to shutdown sleep(0.2) # Now let's start our own if os.path.exists(sd2): os.remove(sd2) shut = Process(target=self._shutdown) shut.start() rt.cleanup() shut.terminate() r = self.db.routes2.find_one({'address': '1.2.3.4'}) self.assertEquals(r['status'], 'available') self.db.routes2.remove({}) rv = rt.cleanup() self.assertEquals(-1, rv)
def __init__(self, settings): logging.debug("Initializing Router") self.dbhost = settings['DBHOST'] self.poll = float(settings['POLLINTERVAL']) self.agent = settings['JOBSURL'] user = settings['RTRUSER'] mapfile = settings['MAPFILE'] key = None if 'RTRKEY' in settings: key = settings['RTRKEY'] self.map = self._load_mapfile(mapfile) self.vyos = vyos_interface.vyosInterface(user, key=key) self.ddns = self._init_dns(settings) self.shutdown_file = settings.get('SHUTFILE', "/tmp/shutdown_sdn") self.collection = settings.get('COLLECTION', 'routes') self.cleanup_proc = Process(target=self.cleanup, name='CleanupThread') self.cleanup_proc.start() client = MongoClient(self.dbhost) self.routes = client.sdn[self.collection] if self.routes.find_one() is None: self.cleanup_proc.terminate() raise OSError('DB not initialized')