def process_input_dir(args, input_path, output_path): patt = input_path + os.sep + "*" + args.extension files = glob.glob(patt) docs_num = len(files) if docs_num > args.threads: slice_size = docs_num / args.threads else: slice_size = 1 print "Threads:", args.threads print "Documents number:", docs_num print "Documents per thread:", slice_size start = 0 jobs = [] for job_num in range(args.threads): print "Initializing process", job_num end = start + slice_size p = Process(target=lemmatize_files, args=(files[start:end], output_path, args)) print files[start:end] jobs.append(p) p.start() start += slice_size for p in jobs: p.join() if (docs_num % 2) == 1: lemmatize_files(files, output_path, args)
def test_litmus_with_authentication(self): """Run litmus test suite on HTTP with authentification. This test passes """ try: proc = Process(target=run_wsgidav_server, args=(True, False)) proc.daemon = True proc.start() time.sleep(1) try: self.assertEqual(subprocess.call(["litmus", "http://127.0.0.1:8080/", "tester", "secret"]), 0, "litmus suite failed: check the log") except OSError: print "*" * 70 print "This test requires the litmus test suite." print "See http://www.webdav.org/neon/litmus/" print "*" * 70 raise finally: proc.terminate() proc.join()
def main(gcm="", rcm="", out_folder=""): pc = None pf = None kwargs = { "start_year": 1970, "end_year": 1999, "rcm": rcm, "gcm": gcm, "out_folder": out_folder } in_folder = "data/narccap/{0}-{1}/current".format(gcm, rcm) if os.path.isdir(in_folder): pc = Process(target=interpolate_to_amno, args=(in_folder, ), kwargs=kwargs) pc.start() else: print "{0} does not exist, ignoring the period ...".format(in_folder) kwargs = { "start_year": 2041, "end_year": 2070, "rcm": rcm, "gcm": gcm, "out_folder": out_folder } in_folder = "data/narccap/{0}-{1}/future".format(gcm, rcm) if os.path.isdir(in_folder): pf = Process(target=interpolate_to_amno, args=(in_folder, ), kwargs=kwargs) pf.start() else: print "{0} does not exist, ignoring the period ...".format(in_folder) #do current and future climates in parallel if pc is not None: pc.join() if pf is not None: pf.join()
def test_litmus_with_authentication(self): """Run litmus test suite on HTTP with authentification. This test passes """ try: proc = Process(target=run_wsgidav_server, args=(True, False)) proc.daemon = True proc.start() time.sleep(1) try: self.assertEqual( subprocess.call([ "litmus", "http://127.0.0.1:8080/", "tester", "secret" ]), 0, "litmus suite failed: check the log") except OSError: print "*" * 70 print "This test requires the litmus test suite." print "See http://www.webdav.org/neon/litmus/" print "*" * 70 raise finally: proc.terminate() proc.join()
def test_mcdpweb_server(dirname): port = random.randint(11000, 15000) base = 'http://127.0.0.1:%s' % port p = Process(target=start_server, args=( dirname, port, )) p.start() print('sleeping') time.sleep(5) try: url_wrong = base + '/not-existing' urllib2.urlopen(url_wrong).read() except HTTPError: pass else: raise Exception('Expected 404') # now run the spider tmpdir = tempfile.mkdtemp(prefix='wget-output') cwd = '.' cmd = ['wget', '-nv', '-P', tmpdir, '-m', base] # res = system_cmd_result( # cwd, cmd, # display_stdout=True, # display_stderr=True, # raise_on_error=True) sub = subprocess.Popen(cmd, bufsize=0, cwd=cwd) sub.wait() exc = get_exceptions(port) if len(exc) == 0: msg = 'Expected at least a not-found error' raise Exception(msg) if not 'not-existing' in exc[0]: raise Exception('Could not find 404 error') exc = exc[1:] if exc: msg = 'Execution raised errors:\n\n' msg += str("\n---\n".join(exc)) raise_desc(Exception, msg) url_exit = base + '/exit' urllib2.urlopen(url_exit).read() print('waiting for start_server() process to exit...') p.join() print('...clean exit')
def test_mcdpweb_server(dirname): port = random.randint(11000, 15000) base = 'http://127.0.0.1:%s' % port p = Process(target=start_server, args=(dirname, port,)) p.start() print('sleeping') time.sleep(5) try: url_wrong = base + '/not-existing' urllib2.urlopen(url_wrong).read() except HTTPError: pass else: raise Exception('Expected 404') # now run the spider tmpdir = tempfile.mkdtemp(prefix='wget-output') cwd = '.' cmd = ['wget', '-nv', '-P', tmpdir, '-m', base] # res = system_cmd_result( # cwd, cmd, # display_stdout=True, # display_stderr=True, # raise_on_error=True) sub = subprocess.Popen( cmd, bufsize=0, cwd=cwd) sub.wait() exc = get_exceptions(port) if len(exc) == 0: msg = 'Expected at least a not-found error' raise Exception(msg) if not 'not-existing' in exc[0]: raise Exception('Could not find 404 error') exc = exc[1:] if exc: msg = 'Execution raised errors:\n\n' msg += str("\n---\n".join(exc)) raise_desc(Exception, msg) url_exit = base + '/exit' urllib2.urlopen(url_exit).read() print('waiting for start_server() process to exit...') p.join() print('...clean exit')
class Downloader(object): def __init__(self, timeout=30, retries=100, wait=1): self.timeout = timeout self.retries = retries self.wait = wait self.manager = SyncManager() self.manager.start() def retry_fetch_data(self, url): market_data = self.fetch_data(url) retries = 1 while not market_data and retries < self.retries: print "Retry #%s..." % str(retries) market_data = self.fetch_data(url) if market_data: print "Fetched: " + str(len(market_data)) else: print "Fetched nothing!" retries += 1 return market_data def fetch_data(self, url): limit = 60 msg = "Downloading " + url[0: min(limit, len(url))] if len(url) > limit: msg += "(+" + str(len(url) - limit) + ")" print msg return_dict = self.manager.dict() self.job = Process(target=get_page_data, args=(url, return_dict)) self.job.start() self.job.join(self.timeout) if self.job.is_alive(): self.job.terminate() self.job = None market_data = None if 'page' in return_dict: market_data = return_dict['page'] if self.wait > 0: time.sleep(self.wait) return market_data
def set_from_file(self, varfile_path, args): q = Queue() p = Process(target=set_from_file, args=(q, varfile_path, args)) p.start() p.join() there_are_results = False while True: try: results = q.get_nowait() there_are_results = True if len(results) == 1: raise DataError(results[0]) self.set(*results) except Empty: if not there_are_results: raise DataError('No variables') return
def prepare_proxies(configdata): if configdata[const.PROXY_CONFIG].get(const.PROXY_CONFIG_SOURCE_TYPE, u'1') != u'2': return p = Process(group=None, target=fetch_proxy,) p.start() p.join() print u'%s get %d free proxy' % (datetime.datetime.now(), len(open(u'proxy.txt', u'r').readlines())) c = Process(group=None, target=valid_proxy,) c.start() valid_time = int(configdata[const.PROXY_CONFIG].get(const.PROXY_VALID_TIME)) print u'%s following %d seconds will valid the proxy' % (datetime.datetime.now(), valid_time) time.sleep(valid_time) c.terminate() print u'%s get %d effective proxy' % (datetime.datetime.now(), len(open(u'enable_proxies.txt', u'r').readlines()))
def process_mongo(args, output_path): # connects to the MongoDB server if args.port: connection = Connection(args.address, args.port) else: connection = Connection(args.address) # gets the DB db = connection[args.db_name] # gets the collection collec = db[args.collection] # sets the number of documents to be processed by each thread docs_num = collec.count() slice_size = docs_num / args.threads print "Threads:", args.threads print "Documents number:", docs_num print "Documents per thread:", slice_size # initiates a thread for each slice of documents # the slices are controlled using the base and offset variables base = 0 offset = slice_size jobs = [] for thread_num in range(args.threads): print "Initializing process", thread_num p = Process(target=lemmatize_slice, args=(collec, base, offset, args, output_path)) jobs.append(p) p.start() base += offset for p in jobs: p.join() if (docs_num % 2) == 1: lemmatize_slice(collec, base, offset, args, output_path)
class SmtpMessageServer(object): """ This class can start an SMTP debugging server, configure LinOTP to talk to it and read the results back to the parent tester. On open, an SMTP server is set up to listen locally. Derived classes can define a hook to set the LinOTP configuration to point to this server. Example usage: with SmtpMessageServer(testcase) as smtp: get_otp() """ def __init__(self, testcase, message_timeout): self.testcase = testcase # We need a minimum version of 2.9.2 to set the SMTP port number, so # skip if testing an earlier version self.testcase.need_linotp_version('2.9.2') self.timeout = message_timeout self.set_config = SetConfig(testcase.http_protocol, testcase.http_host, testcase.http_port, testcase.http_username, testcase.http_password) # We advertise the local SMTP server hostname # using the IP address that connects to LinOTP self.addr = self._get_local_ip() self.msg_payload = None def __enter__(self): self.smtp_process_queue = Queue() self.smtp_process = Process(target=get_otp_mail, args=(self.smtp_process_queue, self.timeout)) self.smtp_process.start() self.port = self.smtp_process_queue.get(True, 5) self._do_lintop_config() return self def _do_lintop_config(self): parameters = self.get_config_parameters() logger.debug("Configuration parameters: %s", parameters) result = self.set_config.setConfig(parameters) assert result, "It was not possible to set the config. Result:%s" % result def get_config_parameters(self): # This function can be overridden to provide configuration parameters to configure # specific parts of LinOTP assert False, "This function should be overridden" def get_otp(self): messagestr = self.smtp_process_queue.get(True, 10) msg = email.message_from_string(messagestr) otp = msg.get_payload() logger.debug("Received email message payload:%s", otp) return otp def __exit__(self, *args): self.smtp_process_queue.close() self.smtp_process.terminate() self.smtp_process.join(5) def _get_local_ip(self): """ Get the IP address of the interface that connects to LinOTP """ with closing( socket.create_connection( (self.testcase.http_host, int(self.testcase.http_port)), 10)) as s: addr = s.getsockname()[0] return addr
class AuthorizationCodeTestCase(unittest.TestCase): def setUp(self): self.client = None self.provider = None def test_request_access_token(self): def run_provider(queue): try: redirect_uri = "http://127.0.0.1:15487/callback" stores = store_factory(client_identifier="abc", client_secret="xyz", redirect_uris=[redirect_uri]) provider = Provider( access_token_store=stores["access_token_store"], auth_code_store=stores["auth_code_store"], client_store=stores["client_store"], site_adapter=TestSiteAdapter(), token_generator=Uuid4()) provider.add_grant(AuthorizationCodeGrant(expires_in=120)) provider.add_grant(RefreshToken(expires_in=60)) app = Wsgi(server=provider) httpd = make_server('', 15486, app, handler_class=NoLoggingHandler) queue.put({"result": 0}) httpd.serve_forever() except Exception as e: queue.put({"result": 1, "error_message": str(e)}) def run_client(queue): try: app = ClientApplication( callback_url="http://127.0.0.1:15487/callback", client_id="abc", client_secret="xyz", provider_url="http://127.0.0.1:15486") httpd = make_server('', 15487, app, handler_class=NoLoggingHandler) queue.put({"result": 0}) httpd.serve_forever() except Exception as e: queue.put({"result": 1, "error_message": str(e)}) uuid_regex = "^[a-z0-9]{8}\-[a-z0-9]{4}\-[a-z0-9]{4}\-[a-z0-9]{4}-[a-z0-9]{12}$" ready_queue = Queue() self.provider = Process(target=run_provider, args=(ready_queue, )) self.provider.start() provider_started = ready_queue.get() if provider_started["result"] != 0: raise Exception("Error starting Provider process with message" "'{0}'".format(provider_started["error_message"])) self.client = Process(target=run_client, args=(ready_queue, )) self.client.start() client_started = ready_queue.get() if client_started["result"] != 0: raise Exception("Error starting Client Application process with " "message '{0}'".format( client_started["error_message"])) access_token_result = urlopen("http://127.0.0.1:15487/app").read() access_token_data = json.loads(access_token_result.decode('utf-8')) self.assertEqual(access_token_data["token_type"], "Bearer") self.assertEqual(access_token_data["expires_in"], 120) self.assertRegexpMatches(access_token_data["access_token"], uuid_regex) self.assertRegexpMatches(access_token_data["refresh_token"], uuid_regex) request_data = { "grant_type": "refresh_token", "refresh_token": access_token_data["refresh_token"], "client_id": "abc", "client_secret": "xyz" } refresh_token_result = urlopen("http://127.0.0.1:15486/token", urlencode(request_data).encode('utf-8')) refresh_token_data = json.loads( refresh_token_result.read().decode('utf-8')) self.assertEqual(refresh_token_data["token_type"], "Bearer") self.assertEqual(refresh_token_data["expires_in"], 120) self.assertRegexpMatches(refresh_token_data["access_token"], uuid_regex) def tearDown(self): if self.client is not None: self.client.terminate() self.client.join() if self.provider is not None: self.provider.terminate() self.provider.join()
class AuthorizationCodeTestCase(unittest.TestCase): def setUp(self): self.client = None self.provider = None def test_request_access_token(self): def run_provider(queue): try: redirect_uri = "http://127.0.0.1:15487/callback" stores = store_factory(client_identifier="abc", client_secret="xyz", redirect_uris=[redirect_uri]) provider = Provider(access_token_store=stores["access_token_store"], auth_code_store=stores["auth_code_store"], client_store=stores["client_store"], site_adapter=TestSiteAdapter(), token_generator=Uuid4()) provider.add_grant(AuthorizationCodeGrant(expires_in=120)) provider.add_grant(RefreshToken(expires_in=60)) app = Wsgi(server=provider) httpd = make_server('', 15486, app, handler_class=NoLoggingHandler) queue.put({"result": 0}) httpd.serve_forever() except Exception as e: queue.put({"result": 1, "error_message": str(e)}) def run_client(queue): try: app = ClientApplication( callback_url="http://127.0.0.1:15487/callback", client_id="abc", client_secret="xyz", provider_url="http://127.0.0.1:15486") httpd = make_server('', 15487, app, handler_class=NoLoggingHandler) queue.put({"result": 0}) httpd.serve_forever() except Exception as e: queue.put({"result": 1, "error_message": str(e)}) uuid_regex = "^[a-z0-9]{8}\-[a-z0-9]{4}\-[a-z0-9]{4}\-[a-z0-9]{4}-[a-z0-9]{12}$" ready_queue = Queue() self.provider = Process(target=run_provider, args=(ready_queue,)) self.provider.start() provider_started = ready_queue.get() if provider_started["result"] != 0: raise Exception("Error starting Provider process with message" "'{0}'".format(provider_started["error_message"])) self.client = Process(target=run_client, args=(ready_queue,)) self.client.start() client_started = ready_queue.get() if client_started["result"] != 0: raise Exception("Error starting Client Application process with " "message '{0}'" .format(client_started["error_message"])) access_token_result = urlopen("http://127.0.0.1:15487/app").read() access_token_data = json.loads(access_token_result.decode('utf-8')) self.assertEqual(access_token_data["token_type"], "Bearer") self.assertEqual(access_token_data["expires_in"], 120) self.assertRegexpMatches(access_token_data["access_token"], uuid_regex) self.assertRegexpMatches(access_token_data["refresh_token"], uuid_regex) request_data = {"grant_type": "refresh_token", "refresh_token": access_token_data["refresh_token"], "client_id": "abc", "client_secret": "xyz"} refresh_token_result = urlopen( "http://127.0.0.1:15486/token", urlencode(request_data).encode('utf-8') ) refresh_token_data = json.loads(refresh_token_result.read().decode('utf-8')) self.assertEqual(refresh_token_data["token_type"], "Bearer") self.assertEqual(refresh_token_data["expires_in"], 120) self.assertRegexpMatches(refresh_token_data["access_token"], uuid_regex) def tearDown(self): if self.client is not None: self.client.terminate() self.client.join() if self.provider is not None: self.provider.terminate() self.provider.join()
class SmtpMessageServer(object): """ This class can start an SMTP debugging server, configure LinOTP to talk to it and read the results back to the parent tester. On open, an SMTP server is set up to listen locally. Derived classes can define a hook to set the LinOTP configuration to point to this server. Example usage: with SmtpMessageServer(testcase) as smtp: get_otp() """ def __init__(self, testcase, message_timeout): self.testcase = testcase # We need a minimum version of 2.9.2 to set the SMTP port number, so # skip if testing an earlier version self.testcase.need_linotp_version('2.9.2') self.timeout = message_timeout self.set_config = SetConfig(testcase.http_protocol, testcase.http_host, testcase.http_port, testcase.http_username, testcase.http_password) # We advertise the local SMTP server hostname # using the IP address that connects to LinOTP self.addr = self._get_local_ip() self.msg_payload = None def __enter__(self): self.smtp_process_queue = Queue() self.smtp_process = Process( target=get_otp_mail, args=(self.smtp_process_queue, self.timeout)) self.smtp_process.start() self.port = self.smtp_process_queue.get(True, 5) self._do_lintop_config() return self def _do_lintop_config(self): parameters = self.get_config_parameters() logger.debug("Configuration parameters: %s", parameters) result = self.set_config.setConfig(parameters) assert result, "It was not possible to set the config. Result:%s" % result def get_config_parameters(self): # This function can be overridden to provide configuration parameters to configure # specific parts of LinOTP assert False, "This function should be overridden" def get_otp(self): messagestr = self.smtp_process_queue.get(True, 10) msg = email.message_from_string(messagestr) otp = msg.get_payload() logger.debug("Received email message payload:%s", otp) return otp def __exit__(self, *args): self.smtp_process_queue.close() self.smtp_process.terminate() self.smtp_process.join(5) def _get_local_ip(self): """ Get the IP address of the interface that connects to LinOTP """ with closing(socket.create_connection((self.testcase.http_host, int(self.testcase.http_port)), 10)) as s: addr = s.getsockname()[0] return addr
class Fetcher(object): __metaclass__ = ABCMeta def __init__(self, worker_list): from .worker import Worker self.worker_list = worker_list assert isinstance(self.worker_list, list) for worker in self.worker_list: assert isinstance(worker, Worker) self.process = Process(target=self.run_forever, args=()) self.running = False def start(self): self.process.start() @abstractmethod def shutdown(self): # 处理进程关闭 self.running = False def setup_shutdown(self): """ 设置优雅退出 :return: """ import signal def on_sigterm(*ignore): self.shutdown() signal.signal(signal.SIGTERM, on_sigterm) signal.signal(signal.SIGINT, on_sigterm) @abstractmethod def choose(self, msg): """ 选择进程的处理进程下标 如:hash(msg) % len(self.worker_list) :param msg: :return: """ pass def do_feed(self, msg): index = self.choose(msg) assert isinstance(index, int) and index < len(self.worker_list) while True: try: self.worker_list[index].feed(msg) except Full: p = self.worker_list[index].process assert isinstance(p, Process) # 队列满了重试发送 logging.error( "pid:%s worker is full. Please check the Thread blocking situation." % str(p.pid)) continue break @abstractmethod def run_forever(self): """ 启动处理 :return: """ self.running = True def join(self): self.process.join()
class WorkerThread(threading.Thread): # too flags : stop after x errors # slow down after every seeing y errors # value_list is a list of document generators def __init__( self, serverInfo, name, values_list, ignore_how_many_errors=5000, override_vBucketId=-1, terminate_in_minutes=120, write_only=False, moxi=True, async_write=False, delete_ratio=0, expiry_ratio=0, ): threading.Thread.__init__(self) self.log = logger.Logger.get_logger() self.serverInfo = serverInfo self.name = name self.values_list = [] self.values_list.extend(copy.deepcopy(values_list)) self._value_list_copy = [] self._value_list_copy.extend(copy.deepcopy(values_list)) self._inserted_keys_count = 0 self._rejected_keys = [] self._rejected_keys_count = 0 self._delete_ratio = delete_ratio self._expiry_ratio = expiry_ratio self._delete_count = 0 self._expiry_count = 0 self._delete = [] self.ignore_how_many_errors = ignore_how_many_errors self.override_vBucketId = override_vBucketId self.terminate_in_minutes = terminate_in_minutes self._base_uuid = uuid.uuid4() self.queue = Queue() self.moxi = moxi # let's create a read_thread self.info = {"server": serverInfo, "name": self.name, "baseuuid": self._base_uuid} self.write_only = write_only self.aborted = False self.async_write = async_write def inserted_keys_count(self): return self._inserted_keys_count def rejected_keys_count(self): return self._rejected_keys_count # smart functin that gives you sth you can use to # get inserted keys # we should just expose an iterator instead which # generates the key,values on fly def keys_set(self): # let's construct the inserted keys set # TODO: hard limit , let's only populated up to 1 million keys inserted_keys = [] for item in self._value_list_copy: for i in range(0, (int(item["how_many"]))): key = "{0}-{1}-{2}".format(self._base_uuid, item["size"], i) if key not in self._rejected_keys: inserted_keys.append(key) if len(inserted_keys) > 2 * 1024 * 1024: break return inserted_keys, self._rejected_keys def run(self): msg = "starting a thread to set keys mixed set-get ? {0} and using async_set ? {1}" msg += " with moxi ? {2}" msg = msg.format(self.write_only, self.async_write, self.moxi) self.log.info(msg) awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name) client = None if self.moxi: try: client = MemcachedClientHelper.proxy_client(self.serverInfo, self.name) except Exception as ex: self.log.info("unable to create memcached client due to {0}. stop thread...".format(ex)) import traceback traceback.print_exc() return # keeping keys in the memory is not such a good idea because # we run out of memory so best is to just keep a counter ? # if someone asks for the keys we can give them the formula which is # baseuuid-{0}-{1} , size and counter , which is between n-0 except those # keys which were rejected # let's print out some status every 5 minutes.. if not self.write_only: self.reader = Process(target=start_reader_process, args=(self.info, self._value_list_copy, self.queue)) self.reader.start() start_time = time.time() last_reported = start_time backoff_count = 0 while len(self.values_list) > 0 and not self.aborted: selected = MemcachedClientHelper.random_pick(self.values_list) selected["how_many"] -= 1 if selected["how_many"] < 1: self.values_list.remove(selected) if (time.time() - start_time) > self.terminate_in_minutes * 60: self.log.info( "its been more than {0} minutes loading data. stopping the process..".format( self.terminate_in_minutes ) ) break else: # every two minutes print the status if time.time() - last_reported > 2 * 60: if not self.moxi: awareness.done() try: awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name) except Exception: # vbucket map is changing . sleep 5 seconds time.sleep(5) awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name) self.log.info("now connected to {0} memcacheds".format(len(awareness.memcacheds))) last_reported = time.time() for item in self.values_list: self.log.info( "{0} keys (each {1} bytes) more to send...".format(item["how_many"], item["size"]) ) key = "{0}-{1}-{2}".format(self._base_uuid, selected["size"], int(selected["how_many"])) if not self.moxi: client = awareness.memcached(key) if not client: self.log.error("client should not be null") value = "*" try: value = selected["value"].next() except StopIteration: pass try: if self.override_vBucketId >= 0: client.vbucketId = self.override_vBucketId if self.async_write: client.send_set(key, 0, 0, value) else: client.set(key, 0, 0, value) self._inserted_keys_count += 1 backoff_count = 0 # do expiry sets, 30 second expiry time if Random().random() < self._expiry_ratio: client.set(key + "-exp", 30, 0, value) self._expiry_count += 1 # do deletes if we have 100 pending # at the end delete the remaining if len(self._delete) >= 100: # self.log.info("deleting {0} keys".format(len(self._delete))) for key_del in self._delete: client.delete(key_del) self._delete = [] # do delete sets if Random().random() < self._delete_ratio: client.set(key + "-del", 0, 0, value) self._delete.append(key + "-del") self._delete_count += 1 except MemcachedError as error: if not self.moxi: awareness.done() try: awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name) except Exception: # vbucket map is changing . sleep 5 seconds time.sleep(5) awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name) self.log.info("now connected to {0} memcacheds".format(len(awareness.memcacheds))) if isinstance(self.serverInfo, dict): self.log.error( "memcached error {0} {1} from {2}".format(error.status, error.msg, self.serverInfo["ip"]) ) else: self.log.error( "memcached error {0} {1} from {2}".format(error.status, error.msg, self.serverInfo.ip) ) if error.status == 134: backoff_count += 1 if backoff_count < 5: backoff_seconds = 15 * backoff_count else: backoff_seconds = 2 * backoff_count self.log.info("received error # 134. backing off for {0} sec".format(backoff_seconds)) time.sleep(backoff_seconds) self._rejected_keys_count += 1 self._rejected_keys.append({"key": key, "value": value}) if len(self._rejected_keys) > self.ignore_how_many_errors: break except Exception as ex: if not self.moxi: awareness.done() try: awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name) except Exception: awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name) self.log.info("now connected to {0} memcacheds".format(len(awareness.memcacheds))) if isinstance(self.serverInfo, dict): self.log.error("error {0} from {1}".format(ex, self.serverInfo["ip"])) import traceback traceback.print_exc() else: self.log.error("error {0} from {1}".format(ex, self.serverInfo.ip)) self._rejected_keys_count += 1 self._rejected_keys.append({"key": key, "value": value}) if len(self._rejected_keys) > self.ignore_how_many_errors: break # before closing the session let's try sending those items again retry = 3 while retry > 0 and self._rejected_keys_count > 0: rejected_after_retry = [] self._rejected_keys_count = 0 for item in self._rejected_keys: try: if self.override_vBucketId >= 0: client.vbucketId = self.override_vBucketId if self.async_write: client.send_set(item["key"], 0, 0, item["value"]) else: client.set(item["key"], 0, 0, item["value"]) self._inserted_keys_count += 1 except MemcachedError: self._rejected_keys_count += 1 rejected_after_retry.append({"key": item["key"], "value": item["value"]}) if len(rejected_after_retry) > self.ignore_how_many_errors: break self._rejected_keys = rejected_after_retry retry = -1 # clean up the rest of the deleted keys if len(self._delete) > 0: # self.log.info("deleting {0} keys".format(len(self._delete))) for key_del in self._delete: client.delete(key_del) self._delete = [] self.log.info("deleted {0} keys".format(self._delete_count)) self.log.info("expiry {0} keys".format(self._expiry_count)) # client.close() awareness.done() if not self.write_only: self.queue.put_nowait("stop") self.reader.join() def _initialize_memcached(self): pass def _set(self): pass def _handle_error(self): pass # if error is memcached error oom related let's do a sleep def _time_to_stop(self): return self.aborted or len(self._rejected_keys) > self.ignore_how_many_errors
class Worker(_th.Thread): def __new__(cls,name=None): if name is None: name='default' if name in _workers.keys(): return _workers[name] return super(Worker,cls).__new__(cls) def __init__(self,name=None): if name is None: name='default' if name in _workers.keys(): return _workers[name] = self super(Worker,self).__init__() self.daemon = True self.name = name self._queue = _ver.queue.Queue(1) self.last_exception = None self._pon = _mp.Value('b',True) tsk,self.task = _mp.Pipe(False) self.out,res = _mp.Pipe(False) self.process = Process(target=process,args=(self._pon,tsk,res),name=name) self.process.start() self._on = True self.start() _time.sleep(1) def run(self): _sup.debug('%s started' % (str(self.name),)) while self._on or not self._queue.empty(): try: result,target,args,kwargs = self._queue.get(True,.1) _sup.debug('%s: %s-task received' % (str(self.name),target.__name__)) self.task.send((target,args,kwargs)) res = self.out.recv() del(result[self.name]) _sup.debug(res) result[target.__name__] = res _sup.debug('%s: %s-task done' % (str(self.name),target.__name__)) self._queue.task_done() except _ver.queue.Empty: continue except KeyboardInterrupt as ki: raise ki except Exception as exc: _sup.debug('%s: %s' % (str(self.name),str(exc)),0) if result is not None: result[self.name] = exc self.last_exception = exc _sup.debug('%s: done!' % (str(self.name),)) self._pon.value = False del(_workers[self.name]) def join(self): self._on = False self._queue.join() super(Worker,self).join() self._pon.value = False self.process.join() def put(self,target,*args,**kwargs): result = {self.name:target.__name__} self._queue.put((result,target,args,kwargs)) _time.sleep(.1) return result
def join(self, timeout=None, close=True): """Close the file and join the thread.""" if close: self._queue.put(StopIteration) self._fd.close() Process.join(self, timeout)
def execute(): # Eventually consistent trick p = Process(target=_update_library_keywords, args=(path, args)) p.start() p.join() print 'should refresh database'
self.assertTrue(bool(signal.result)) self.assertEqual(signal.result.a, 1) signal.result.a = 2 self.assertEqual(signal.result.a, 2) self.assertTrue('a' in signal.result) self.assertFalse('b' in signal.result) def test_error(self): signal = TestErrorSignal({'a': 2}) self.assertFalse(bool(signal.result)) self.assertEqual(signal.result.error, {'a': 2}) self.assertRaises(AttributeError, lambda: signal.result.a) def test_no_handler(self): client = Client('127.0.0.1', 1234) res = client.send('signal_not_exists', {'a': 1}) self.assertFalse(bool(res)) self.assertIn('signal_not_exists', res.error) self.assertRaises(AttributeError, lambda: res.a) if __name__ == '__main__': server = Process(target=start_server, args=('127.0.0.1', 1234, 100)) tests = Process(target=unittest.main) server.start() tests.start() server.join() time.sleep(2) tests.join()
def join(self, timeout=None, close=True): """Close the file and join the thread.""" if close: self._queue.put(StopIteration) self._fd.close() Process.join(self, timeout)
class AuthorizationCodeTestCase(unittest.TestCase): def setUp(self): self.client = None self.server = None def test_tornado(self): def run_provider(queue): try: provider = create_provider() app = TornadoApplication([ url(r"/authorize", OAuth2Handler, dict(provider=provider)), url(r"/token", OAuth2Handler, dict(provider=provider)) ], debug=True) app.listen(15486) queue.put({"result": 0}) IOLoop.current().start() except Exception as e: queue.put({"result": 1, "error_message": str(e)}) ready_queue = Queue() self.server = Process(target=run_provider, args=(ready_queue,)) self.server.start() provider_started = ready_queue.get() if provider_started["result"] != 0: raise Exception("Error starting Provider process with message" "'{0}'".format(provider_started["error_message"])) self.client = Process(target=run_client, args=(ready_queue,)) self.client.start() client_started = ready_queue.get() if client_started["result"] != 0: raise Exception("Error starting Client Application process with " "message '{0}'" .format(client_started["error_message"])) self.access_token() def test_wsgi(self): def run_provider(queue): try: provider = create_provider() app = Application(provider=provider) httpd = make_server('', 15486, app, handler_class=NoLoggingHandler) queue.put({"result": 0}) httpd.serve_forever() except Exception as e: queue.put({"result": 1, "error_message": str(e)}) ready_queue = Queue() self.server = Process(target=run_provider, args=(ready_queue,)) self.server.start() provider_started = ready_queue.get() if provider_started["result"] != 0: raise Exception("Error starting Provider process with message" "'{0}'".format(provider_started["error_message"])) self.client = Process(target=run_client, args=(ready_queue,)) self.client.start() client_started = ready_queue.get() if client_started["result"] != 0: raise Exception("Error starting Client Application process with " "message '{0}'" .format(client_started["error_message"])) self.access_token() def test_wsgi_404(self): def run_provider(queue): try: provider = create_provider() app = Application(provider=provider) httpd = make_server('', 15486, app, handler_class=NoLoggingHandler) queue.put({"result": 0}) httpd.serve_forever() except Exception as e: queue.put({"result": 1, "error_message": str(e)}) ready_queue = Queue() self.server = Process(target=run_provider, args=(ready_queue,)) self.server.start() provider_started = ready_queue.get() if provider_started["result"] != 0: raise Exception("Error starting Provider process with message" "'{0}'".format(provider_started["error_message"])) try: urlopen("http://127.0.0.1:15486/invalid-path").read() except HTTPError as e: self.assertEqual(404, e.code) def access_token(self): uuid_regex = "^[a-z0-9]{8}\-[a-z0-9]{4}\-[a-z0-9]{4}\-[a-z0-9]{4}-[a-z0-9]{12}$" try: access_token_result = urlopen("http://127.0.0.1:15487/app").read() except HTTPError as e: print(e.read()) exit(1) access_token_data = json.loads(access_token_result.decode('utf-8')) self.assertEqual(access_token_data["token_type"], "Bearer") self.assertEqual(access_token_data["expires_in"], 120) self.assertRegexpMatches(access_token_data["access_token"], uuid_regex) self.assertRegexpMatches(access_token_data["refresh_token"], uuid_regex) request_data = {"grant_type": "refresh_token", "refresh_token": access_token_data["refresh_token"], "client_id": "abc", "client_secret": "xyz"} refresh_token_result = urlopen( "http://127.0.0.1:15486/token", urlencode(request_data).encode('utf-8') ) refresh_token_data = json.loads(refresh_token_result.read().decode('utf-8')) self.assertEqual(refresh_token_data["token_type"], "Bearer") self.assertEqual(refresh_token_data["expires_in"], 120) self.assertRegexpMatches(refresh_token_data["access_token"], uuid_regex) def tearDown(self): if self.client is not None: self.client.terminate() self.client.join() if self.server is not None: self.server.terminate() self.server.join()
toBeAdded.append(server) if len(toBeAdded) == how_many: break for server in toBeAdded: rest.add_node('Administrator', 'password', server.ip) #check if its added ? nodes = rest.node_statuses() otpNodes = [node.id for node in nodes] started = rest.rebalance(otpNodes, []) msg = "rebalance operation started ? {0}" self.log.info(msg.format(started)) if started: result = rest.monitorRebalance() msg = "successfully rebalanced out selected nodes from the cluster ? {0}" self.log.info(msg.format(result)) return result return False if __name__ == "__main__": process1 = Process(target=start_load, args=(sys.argv,)) process1.start() process2 = Process(target=start_combo, args=(sys.argv,)) process2.start() process3 = Process(target=start_backup, args=(sys.argv,)) process3.start() process1.join() process2.join() process3.join()
class Worker(_th.Thread): def __new__(cls, name=None): if name is None: name = 'default' if name in _workers.keys(): return _workers[name] return super(Worker, cls).__new__(cls) def __init__(self, name=None): if name is None: name = 'default' if name in _workers.keys(): return _workers[name] = self super(Worker, self).__init__() self.daemon = True self.name = name self._queue = _ver.queue.Queue(1) self.last_exception = None self._pon = _mp.Value('b', True) tsk, self.task = _mp.Pipe(False) self.out, res = _mp.Pipe(False) self.process = Process(target=process, args=(self._pon, tsk, res), name=name) self.process.start() self._on = True self.start() _time.sleep(1) def run(self): _sup.debug('%s started' % (str(self.name), )) while self._on or not self._queue.empty(): try: result, target, args, kwargs = self._queue.get(True, .1) _sup.debug('%s: %s-task received' % (str(self.name), target.__name__)) self.task.send((target, args, kwargs)) res = self.out.recv() del (result[self.name]) _sup.debug(res) result[target.__name__] = res _sup.debug('%s: %s-task done' % (str(self.name), target.__name__)) self._queue.task_done() except _ver.queue.Empty: continue except KeyboardInterrupt as ki: raise ki except Exception as exc: _sup.debug('%s: %s' % (str(self.name), str(exc)), 0) if result is not None: result[self.name] = exc self.last_exception = exc _sup.debug('%s: done!' % (str(self.name), )) self._pon.value = False del (_workers[self.name]) def join(self): self._on = False self._queue.join() super(Worker, self).join() self._pon.value = False self.process.join() def put(self, target, *args, **kwargs): result = {self.name: target.__name__} self._queue.put((result, target, args, kwargs)) _time.sleep(.1) return result