def wait_for_slaves(self, min_slaves, timeout): """ Wait the specified timeout seconds until the minimum number of slaves come online :param min_slaves: Minimum number of slaves to expect :param timeout: Max number of seconds to wait before raising an exception :raises: polling.TimeoutException :return: The count of slaves currently available """ return polling.poll( lambda: len(self.clients.ready), check_success=lambda ready: ready >= min_slaves, timeout=timeout, step=1)
def upload_transformed_data_zip(self): log.info('[upload] upload transform zip to molgenis') files = {'file': open(self.zip_src, 'rb')} multipart_headers = { 'x-molgenis-token': self.config['molgenis']['token'] } import_endpoint = '%s/plugin/importwizard/importFile?packageId=lifelines' % self.config[ 'molgenis']['hostname'] res = requests.post(import_endpoint, headers=multipart_headers, files=files) res.raise_for_status() batch_status_endpoint = '%s%s' % (self.config['molgenis']['hostname'], res.text) log.debug('[upload] importing data...') polling.poll(lambda: requests.get(batch_status_endpoint, headers=self.json_headers).json()[ 'status'] != 'RUNNING', step=20, timeout=3600) res = requests.get(batch_status_endpoint, headers=self.json_headers) res.raise_for_status() res = res.json() batch_status = res['status'] if batch_status != 'FINISHED': log.error('failed to import: %s' % json.dumps(res)) raise Exception('failed to import:\n%s' % json.dumps(res)) log.info('[upload] import finished with status: %s' % (batch_status)) log.debug('batch response:\n%s' % json.dumps(res))
def run(self): logger.debug('Starting Hue polling') while run_event.is_set(): onBeforePoll = myHue.getLight( whichLights[0])['json']['state']['on'] logger.debug('Current Hue on/off state is: %s', onBeforePoll) logger.debug('Watching for it to change') # The poll times out and loops after 10 seconds so that the main thread # can quit this for KeyboardInterrupt etc without waiting forever try: polling.poll(lambda: myHue.getLight(whichLights[0])['json'][ 'state']['on'] != onBeforePoll, step=howOften, timeout=pollerTimeout) except polling.TimeoutException, te: logger.debug('Hue poller timed out') onAfterPoll = myHue.getLight(whichLights[0])['json']['state']['on'] if onAfterPoll != onBeforePoll: logger.debug('Lamps are now %s', ('Off', 'On')[onAfterPoll]) logger.info('Hue event detected - Updating WeMo') # Now send the on/off to the WeMo switch switch.on() if onAfterPoll else switch.off() logger.debug( 'Turning on switch') if onAfterPoll else logger.debug( 'Turning off switch')
def push_mfa_polling(factor_id, state_token): _valid = False url = '{org_url}/api/v1/authn/factors/{factor_id}/verify'.format(org_url=app.config['OKTA_ORG_URL'], factor_id=factor_id) payload = { 'stateToken': state_token, } headers = { 'Accept': 'application/json', 'Content-Type': 'application/json', } try: polling.poll( lambda: requests.request("POST", url, headers=headers, data=json.dumps(payload)), check_success=push_mfa_verify, step=2, # Attempt 3 times. timeout=10 # Timeout after 10 seconds. ) _valid = True except polling.TimeoutException as te: while not te.values.empty(): print(te.values.get()) return _valid
def test_abort(find_executable, artifacts): """Test user's ability to abort their program using the q input :param fixture find_executable: Finds and returns the simulator executable :param fixture artifacts: Sets up the artifacts folder, organizes artifacts at teardown """ TEST_PROGRAM = r"SourcePrograms/SimpleProgram.s" # Execute simulator program = xexpect.spawn("./" + find_executable, ["-f" + TEST_PROGRAM, "-d"]) for debug_option in ('', '', '', 'q'): try: program.expect("Debug Option: ", timeout=0.2) except (xexpect.TIMEOUT, xexpect.EOF) as e: print(e) assert(program.isalive() == False), "Program is hung" break program.sendline(debug_option) try: program.expect("Aborting program...", timeout=0.2) except: assert(False), "Program did not abort" try: polling.poll(lambda: program.isalive() == False, step=0.1, timeout=1) except polling.TimeoutException as te: assert(False), "Program is hung" returncode = program.wait() assert(returncode == 0), "Program did not execute successfully"
def delete_all(self, timeout_secs=300): """Delete all locks. Parameters ---------- timeout_secs: int How long to wait for internal locks (note these need to be cleared before external locks can be deleted) Raises ------ APIException """ try: polling.poll( lambda: len(self.get()["_embedded"]["internal_locks"]) == 0, step=10, poll_forever=False, timeout=timeout_secs, ) except polling.TimeoutException: return False except Exception as e: self.client.log.error(e) return False if len(self.get()["_embedded"]["external_locks"]) > 0: for lock in self.get()["_embedded"]["external_locks"]: lock_id = lock["_links"]["self"]["href"] self.delete(lock_id) return True
def delete_all(self, timeout_secs=300): """Delete all locks Arguments: timeout_secs: int How long to wait for internal locks (note these need to be cleared before external locks can be deleted) Raises: APIException """ try: polling.poll( lambda: len(self.get()["_embedded"]["internal_locks"]) == 0, step=60, poll_forever=False, timeout=timeout_secs, ) except polling.TimeoutException: raise Exception("Timed out waiting for internal locks to free.") if len(self.get()["_embedded"]["external_locks"]) > 0: for lock in self.get()["_embedded"]["external_locks"]: lock_id = lock["_links"]["self"]["href"] self.delete(lock_id) return True
def request(self, data): request_id = str(uuid.uuid4()) # Get consumers consumers = self.redis.smembers(KeyPattern.consumers(self.topic)) # Set request data self.redis.hmset(KeyPattern.request(self.topic, request_id), data) # Notify each consumer for consumer in consumers: self.redis.rpush(KeyPattern.consumer(self.topic, consumer), request_id) # Poll for responses def has_responses(): for consumer in consumers: if not self.redis.exists( KeyPattern.response(self.topic, consumer, request_id)): return False return True polling.poll(has_responses, step=0.5, timeout=3) # Get all responses resps = [] for consumer in consumers: ret = self.redis.hgetall( KeyPattern.response(self.topic, consumer, request_id)) resps.append(ret) return resps
def start(self): Logger().info("Starting scheduler ...") try: Logger().info("Polling ...") poll(lambda: self.scan_commands(Configurator().db_host, Configurator().db_port, Configurator().db_name, Configurator().db_table, Configurator().db_user, Configurator().db_password), step=POLLING_STEP_DELAY, step_function=Scheduler.step_function, poll_forever=True) Logger().info("Polling is done.") self.shutdown() except TimeoutError as ex: Logger().exception("Polling timeout occurred. {}".format(str(ex))) except TimeoutException as ex: Logger().exception("Polling timeout occurred. {}".format(str(ex))) except MaxCallException as ex: Logger().exception("Max attempts polling error. {}".format( str(ex))) except PollingException as ex: Logger().exception("Polling error. {}".format(str(ex))) except KeyboardInterrupt: Logger().error("Keyboard interrupt.")
def wait_for_status(self, worker_id, status=[], timeout_secs=60): """ Uses: https://github.com/justiniso/polling/blob/master/polling.py status: WorkerK8sStatus value, e.g. WorkerK8sStatus.configured raises: Exception """ assert len(status) > 0, "At least one 'status' must be provided" assert timeout_secs >= 0, "'timeout_secs' must be >= 0" # current_status = self.get_k8shost(worker_id).status # # if we aren't configuring, we aren't going to change state # if current_status.find('configuring') < 0: # raise Exception('Host status is: {} - not polling for an update'.format(current_status)) try: polling.poll( lambda: self.get_k8shost(worker_id).status in WorkerK8sStatus.status_names(status), step=10, poll_forever=False, timeout=timeout_secs ) except polling.TimeoutException: status_names = [ WorkerK8sStatus.status_name(s) for s in status ] raise Exception( "Timed out waiting for status(es): {} on K8S Worker: {}".format( status_names, worker_id))
def main(): db = FilesystemStore("/home/kiselev/db") poll_cooldown = 30 locations = [Location(56.880372, 60.729744, 56.928178, 60.843899)] advertiser = PeriscopeAdvertiser(locations, db, logger) polling.poll(advertiser.poll, step=poll_cooldown, poll_forever=True)
def run_topic_model(job_name, num_topics): comprehend = boto3.client(service_name='comprehend', region_name='us-east-1') input_s3_url = "s3://redditdocuments/documents/" + job_name input_doc_format = "ONE_DOC_PER_FILE" output_s3_url = "s3://redditdocuments/analysis/" + job_name data_access_role_arn = "arn:aws:iam::449600645648:role/S3role" number_of_topics = num_topics input_data_config = {"S3Uri": input_s3_url, "InputFormat": input_doc_format} output_data_config = {"S3Uri": output_s3_url} start_topics_detection_job_result = \ comprehend.start_topics_detection_job(NumberOfTopics=number_of_topics, InputDataConfig=input_data_config, OutputDataConfig=output_data_config, DataAccessRoleArn=data_access_role_arn) print("----Topic modelling job started----") try: poll(lambda: checkJobDone(comprehend, start_topics_detection_job_result), timeout=3000, step=1) except TimeoutException as tee: print("Value was not registered") print("----Topic modelling job finished----") status = getJobStatus(comprehend, start_topics_detection_job_result) output_path = status['TopicsDetectionJobProperties']['OutputDataConfig']['S3Uri'][21:] return output_path
def wait_until(predicate, timeout_at=30, poll_every=1): try: polling.poll(predicate, poll_every, timeout=timeout_at) except polling.TimeoutException: raise PyDriverTimeoutException('Timed out after {} seconds waiting for {} to be true. ' 'The predicate was polled every {} seconds.'.format(timeout_at, predicate, poll_every))
def query_athena(query, database, output_bucket='gdot-spm-athena'): response = ath.start_query_execution( QueryString=query, QueryExecutionContext={ 'Database': database }, ResultConfiguration={ 'OutputLocation': 's3://{}'.format(output_bucket) } ) print ('Started query.') # Wait for s3 object to be created polling.poll( lambda: 'Contents' in s3.list_objects(Bucket=output_bucket, Prefix=response['QueryExecutionId']), step=0.2, timeout=60) print ('Query complete.') key = '{}.csv'.format(response['QueryExecutionId']) time.sleep(1) s3.download_file(Bucket=output_bucket, Key=key, Filename=key) df = pd.read_csv(key) os.remove(key) print ('Results downloaded.') return df
def main(): global do_processing try: print("Starting cleaner ...") print("Polling ...") def step_function(step): return step poll(lambda: do_cleaning(), step=POLLING_STEP_DELAY, step_function=step_function, poll_forever=True) print("Polling is done.") print("Stopped cleaner.") except TimeoutError as ex: print("Polling timeout occurred {0}".format(str(ex))) except TimeoutException as ex: print("Polling timeout occurred {0}".format(str(ex))) except MaxCallException as ex: do_processing = False print("Max attempts polling error {0}".format(str(ex))) except PollingException as ex: do_processing = False print("Polling error {0}".format(str(ex))) except KeyboardInterrupt: do_processing = False print("Keyboard interrupt.") except Exception as ex: do_processing = False print("Error occurred. {0}".format(str(ex)))
def test_serve_transcoded_image(hostname, large_file): """ Test that the correct image content type is served after transcoding :param hostname: The hostname under test (this fixture is automatically injected by pytest) :param large_file: A large-ish filename (this fixture is automatically injected by pytest) """ with open(large_file, 'r') as f: resp = requests.post(hostname + '/images', data={'user_id': 'test-user-{}'.format(uuid.uuid4())}, files={'file': ('bridge.jpeg', f)}) img_id = resp.json()['id'] # Resize the image resp = requests.put(hostname + '/image/{}'.format(img_id), data={'action': 'transcode', 'extension': 'png'}) job_id = resp.json()['job_id'] resp = requests.get(hostname + '/image/{}'.format(img_id)) assert resp.json()['last_job'] == job_id # Wait for the job to be done polling.poll( lambda: requests.get(hostname + '/image/{}'.format(img_id)), check_success=lambda response: response.json()['last_job_state'] == 'done', timeout=5, step=1) # Download the image, ensure it is smaller after resize download = requests.get(hostname + '/serve/{}'.format(img_id)) assert download.headers['content-type'] == 'image/png' # Clean up the data requests.delete(hostname + '/image/{}'.format(img_id))
def modify_instance_configure_until_finished(self, mongoCap, instance, disk, flag=True): instance_status =None if flag is False: status, resp = mongoCap.modifyMongoDbInstance_cap(instance=instance, disk=disk, flag=flag) time.sleep(95) if status != 200 or 'error' in resp.keys(): info_logger.debug("modify_instance_configure_until_finished interface return {}".format(resp)) return None try: ret_code, ret_data = polling.poll( lambda: mongoCap.queryFilterMongoDbs_cap(name=instance), check_success=utils.polling_check_status.modify_instanceSpec_until_finished, step=20, timeout=980) except polling.TimeoutException as e: info_logger.debug("create_instacne_until_finished timeout {}".format(e)) if ret_data["mongoDbInfos"] is not None: instance_status = int(ret_data["mongoDbInfos"][0]["status"]) return instance_status status, resp = mongoCap.modifyMongoDbInstance_cap(instance=instance,disk=disk) time.sleep(95) if status != 200 or 'error' in resp.keys(): info_logger.debug("modify_instance_configure_until_finished interface return {}".format(resp)) return None try: ret_code, ret_data = polling.poll( lambda : mongoCap.queryFilterMongoDbs_cap(name=instance), check_success=utils.polling_check_status.create_mongodb_until_finished, step=20,timeout=980) except polling.TimeoutException as e: info_logger.debug("create_instacne_until_finished timeout {}".format(e)) return None if ret_data["mongoDbInfos"] is not None: instance_status=int(ret_data["mongoDbInfos"][0]["status"]) return instance_status
def test_timeout_exception(self, patch_sleep, patch_time): # Since the timeout is < 0, the first iteration of polling should raise the error if max timeout < 0 try: polling.poll(lambda: False, step=10, timeout=-1) except polling.TimeoutException, e: assert e.values.qsize() == 1, 'There should have been 1 value pushed to the queue of values' assert e.last is False, 'The last value was incorrect'
def wait_until(predicate, timeout_at=30, poll_every=1): try: polling.poll(predicate, poll_every, timeout=timeout_at) except polling.TimeoutException: raise PyDriverTimeoutException( 'Timed out after {} seconds waiting for {} to be true. ' 'The predicate was polled every {} seconds.'.format( timeout_at, predicate, poll_every))
def wait_for_step_complete(emr_client, jobflow_id, step_id, sleep_interval_s): """ Will poll EMR until provided step has a terminal status """ poll(is_step_complete, args=(emr_client, jobflow_id, step_id), step=sleep_interval_s, poll_forever=True)
def _wait_for_file(dir, path): g = os.path.join(dir, path) poll( lambda: glob.glob(g), timeout=_WAIT_FOR_FILE_DOWNLOAD_SECONDS, step=_WAIT_FOR_FILE_DOWNLOAD_POLL_SECONDS, ) return glob.glob(g)
def recv_files(arg): polling.poll(lambda: requests.get(f'http://{server}:{port}/checkPayload', data={ 'username': username }).status_code == 200, check_success=respF, step=5, poll_forever=True)
def order_status(merch_id): print(merch_id) polling.poll(lambda: fetch_payment_details(merch_id) == 'COMPLETED' or fetch_payment_details(merch_id) == 'FAILED', check_success=is_correct_response, step=2, timeout=240) return client.code.get_payment_details(merch_id)
def poll(step, timeout): polling.poll( lambda: pretty_print( requests.get("https://api.mybitx.com/api/1/ticker?pair=XBTZAR"). json()), step=step, timeout=timeout, )
def submit(request_data, resubmit=False): url = conform_url_format(request_data['url']) if url[0:5] == 'https': url = url[8:] elif url[0:4] == 'http': url = url[7:] body = request_data['body'] required_mem = int(request_data['memory']) required_cores = int(request_data['cores']) conn = http.client.HTTPConnection(url) conn.request('POST', '/ws/v1/cluster/apps/new-application') response = conn.getresponse().read().decode('utf-8') response = json.loads(response) submitter_id = response['application-id'] body['application-id'] = str(submitter_id) maximum_resource_capacity = response['maximum-resource-capability'] max_mem = maximum_resource_capacity['memory'] max_core = maximum_resource_capacity['vCores'] err_msg = {'err_msg': []} if required_mem > max_mem: err_msg['err_msg'].append( "YARN RM does not allow more than {} MB memory\n".format(max_mem)) if required_cores > max_core: err_msg['err_msg'].append( "YARN RM does not allow more than {} cores\n".format(max_core)) if len(err_msg['err_msg']) > 0: conn.close() return Response(err_msg) conn.request('POST', '/ws/v1/cluster/apps/', body=json.dumps(body), headers=headers) response = conn.getresponse().read().decode('utf-8') request_data_dump = json.dumps(request_data) SubmitRequest(app_id=submitter_id, request_data=request_data_dump).save() polling.poll(lambda: wait_for_submit(url, submitter_id, request_data_dump), step=3, poll_forever=True) """ conn.request('PUT', '/ws/v1/cluster/apps/{}/state'.format(submitter_id), body=json.dumps({"state": "KILLED"}), headers=headers) if conn.getresponse(): pass """ conn.close() return Response(response)
def polled_expected_status(hc, stack_id, status): try: poll(lambda: hc.is_status_expected(hc, stack_id, status), timeout=1800, step=10) return True except: # noqa: E722 print("Waiting for polling stack status Timed out") return False
def poll_report(self, status_url: str): """polls status url once a second until it gets a 200 response , and times out after one minute""" polling.poll( lambda: self._request.get( '{0}/{1}'.format(self._iq_url, status_url)).text, check_success=self.__handle_response, step=1, timeout=60)
def no_results(self) -> bool: def get_items(): try: return self.browser.find_element_by_class_name("searchbar_results-empty").is_displayed() except NoSuchElementException: return False polling.poll(get_items, step=0.1, timeout=5) return True
def check_auth(self) -> bool: try: polling.poll( lambda: self._success_partial_url in self._driver.current_url, step=3, timeout=30, ) except polling.TimeoutException: return False return True
def test_max_call_exception(self): """ Test that a MaxCallException will be raised """ tries = 100 try: polling.poll(lambda: False, step=0, max_tries=tries) except polling.MaxCallException, e: assert e.values.qsize() == tries, 'Poll function called the incorrect number of times' assert e.last is False, 'The last value was incorrect'
def wait_for_runtime_status(bot_uuid: str, status: AssistantRuntimeStatus): settings = ToolSettings() current_environment = settings.get_current_environment() robo = get_robo_client(current_environment) polling.poll( lambda: robo.assistants.runtimes.get(bot_uuid).content.status == status, step=5, poll_forever=True, )
def wait_for_result(response): if response.ok: job_id = response.json()['id'] print(f'Waiting for job (ID: {job_id}) to finish...') # check every 10 seconds until the job is finished (wait up to 48 hours) poll(lambda: is_done(job_id), step=10, timeout=172800) result = get_job_result(job_id) return result else: raise HTTPError(f'Failed with response : {response.json()}')
def wait_for_pipeline_status_to_be_completed(build_id): pipeline_status_url = organization_url + "/_apis/build/Builds/" + str( build_id) polling.poll(lambda: json.loads( requests.request("GET", pipeline_status_url, headers=headers).text). get('status'), check_success=is_correct_response, step=1, timeout=timeout)
def _wait_for_deletion_complete(self): """Poll GET request till 404 occured.""" try: polling.poll(lambda: self.get().status_code == 404, timeout=120, step=2) except polling.TimeoutException as exception: self._exceptions.append(exception) else: self._deleted_status = True
def recv_msgs(arg): if arg(): exit(0) polling.poll(lambda: requests.get( f'http://{server}:{port}/isThereAMessage', data={ 'username': username }).status_code == 200, check_success=resp, step=2, poll_forever=True)
def test_timeout_exception(self, patch_sleep, patch_time): # Since the timeout is < 0, the first iteration of polling should raise the error if max timeout < 0 try: polling.poll(lambda: False, step=10, timeout=-1) except polling.TimeoutException as e: assert e.values.qsize() == 1, 'There should have been 1 value pushed to the queue of values' assert e.last is False, 'The last value was incorrect' else: assert False, 'No timeout exception raised' # Test happy path timeout val = polling.poll(lambda: True, step=0, timeout=0) assert val is True, 'Val was: {} != {}'.format(val, True)
def wait_for_inshape_loading(driver): def is_element_present_and_displayed(driver, classname): hover_elems = driver.find_elements_by_class_name(classname) if not hover_elems: return False for elem in hover_elems: if not elem.is_displayed(): return False return True polling.poll( lambda: not is_element_present_and_displayed(driver, 'hoverLoading'), step=1, timeout=90, )
def test_serve_cropped_image(hostname, large_file): """ Test that the correct image is served after cropped :param hostname: The hostname under test (this fixture is automatically injected by pytest) :param large_file: A large-ish filename (this fixture is automatically injected by pytest) """ with open(large_file, 'r') as f: resp = requests.post(hostname + '/images', data={'user_id': 'test-user-{}'.format(uuid.uuid4())}, files={'file': ('bridge.jpeg', f)}) img_id = resp.json()['id'] # Get the original size download = requests.get(hostname + '/serve/{}'.format(img_id)) original_size = download.headers['content-length'] # Resize the image resp = requests.put(hostname + '/image/{}'.format(img_id), data={'action': 'crop', 'box': '25,25,900,200'}) job_id1 = resp.json()['job_id'] resp = requests.get(hostname + '/image/{}'.format(img_id)) assert resp.json()['last_job'] == job_id1 # Wait for the job to be done polling.poll( lambda: requests.get(hostname + '/image/{}'.format(img_id)), check_success=lambda response: response.json()['last_job_state'] == 'done', timeout=5, step=1) # Download the image, ensure it was actually cropped download = requests.get(hostname + '/serve/{}'.format(img_id)) assert download.headers['content-length'] < original_size assert download.headers['content-type'] == 'image/jpeg' # Clean up the data requests.delete(hostname + '/image/{}'.format(img_id))
def _wait_result(exp_prefix, exp_name, timeout): """ Poll for the sync of params.pkl (currently hardcoded) from S3, indicating that the task is done. :param exp_prefix: str, experiment name prefix (dir where results are expected to be stored) :param exp_name: str, experiment name. Name of dir below exp_prefix where result files of individual run are expected to be stored :param timeout: int, polling timeout in seconds :return bool. False if the polling times out. True if successful. """ result_path = os.path.join(config.LOG_DIR, "s3", exp_prefix, exp_name, 'params.pkl') print("Polling for results in",result_path) try: file_handle = polling.poll( lambda: open(result_path), ignore_exceptions=(IOError,), timeout=timeout, step=60) file_handle.close() except polling.TimeoutException: return False return True
def poll(action_fn, error_message, *error_message_args): """ Poll until action_fn returns something truthy. After GLOBAL_TIMEOUT throw an exception. action_fn may return: - a tuple: first element is a result (truthy or falsy), second element any detail - any other type: truthy or falsy decides whether the polling has been successful or not error_message may be: - a string to be formatted with error_message_args - a callable returning such a string""" details = {'last_seen': None} def wrapped_action_fn(): possible_result = action_fn() if isinstance(possible_result, tuple) and len(possible_result) == 2: details['last_seen'] = possible_result[1] return possible_result[0] else: return possible_result try: return polling.poll( wrapped_action_fn, timeout=GLOBAL_TIMEOUT, step=5 ) except polling.TimeoutException: if callable(error_message): error_message_template = error_message() else: error_message_template = error_message built_error_message = error_message_template % tuple(error_message_args) if 'last_seen' in details: built_error_message = built_error_message + "\n" + pformat(details['last_seen']) if isinstance(details['last_seen'], ConnectionError): host = urlparse(details['last_seen'].request.url).netloc built_error_message = built_error_message + ("\nHost: %s" % host) built_error_message = built_error_message + ("\nIp: %s" % debug.get_host_ip(host)) raise TimeoutError.giving_up_on(built_error_message)
print "[-] Error: New instance did not assign itself EIP: {0}. Cannot proceed with judgment. " \ "Initiating rollback.".format(args.eip) rollback(deployment_id) assign_eip(args.eip, rbinstance_id) sys.exit(2) if not args.bypass: if args.nofollow: allow_redirects = False else: allow_redirects = True try: polling.poll( lambda: requests.get(test_url, allow_redirects=allow_redirects, timeout=requests_timeout), check_success=qa, ignore_exceptions=(requests.exceptions.Timeout, requests.exceptions.ConnectionError), step=3, timeout=judge_timeout ) except polling.TimeoutException, te: print "[-] Error running test: Test url failed to meet test criteria." while not te.values.empty(): print "{0}".format(te.values.get()) print "[+] Initiating rollback of deployment." rollback(deployment_id) if args.eip: assign_eip(args.eip, rbinstance_id) sys.exit(2) print "[+] Tests passed. Proceeding deployment." proceed(deployment_id)
def main(): version = '1.3' parser = argparse.ArgumentParser(description="AMI Asgard Deployment Script.") parser.add_argument("-n", "--nofollow", action='store_true', help="Do not follow redirects for the test URL.") parser.add_argument("--eip", help="Elastic IP address for rollback. (EIP based deployments only).", required=False) parser.add_argument("-b", "--bypass", action="store_true", help="Bypass the URL test") parser.add_argument("-j", "--judgment", help="Judgment timeout Default: 150 seconds", required=False, const=150, nargs='?', type=int, default=150) parser.add_argument("-s", "--startup", help="Asgard in-service startup timeout. Default: 10 minutes", required=False, const=10, nargs='?', type=int, default=10) parser.add_argument('-t', '--timeout', help='Asgard polling timeout. (How long to wait for deployment to enter' 'judgment phase before rolling back). ' 'Default: 600 seconds', required=False, const=600, nargs='?', type=int, default=600) parser.add_argument('asg_id') parser.add_argument('ami_id') parser.add_argument('test_url', nargs='?') parser.add_argument('regex', nargs='?') args = parser.parse_args() print "AMI Asgard Deployment Script {0}".format(version) asg_id = args.asg_id ami_id = args.ami_id poll_timeout = args.timeout judge_timeout = args.judgment startup_timeout = args.startup if not args.bypass: test_url = args.test_url if args.regex: global regex regex = args.regex print "[+] Asgard Host: {0}".format(asgard_host) print "[+] EC2 Region: {0}".format(ec2_region) print "[+] ASG: {0}".format(asg_id) print "[+] AMI to Launch: {0}".format(ami_id) if args.eip: print "[+] Determining rollback instance_id for: {0}".format(args.eip) rbinstance_id = search_ip(args.eip) print "{0}".format(rbinstance_id) query = base_url + '/deployment/prepare?id=' + asg_id f = urllib2.urlopen(query) deflcjson = f.read() f.close() deflc = json.loads(deflcjson) deflc['lcOptions']['imageId'] = ami_id deflc['deploymentOptions'] = { "clusterName": asg_id, "notificationDestination": notify, "steps": [ {"type": "CreateAsg"}, {"type": "Resize", "targetAsg": "Next", "capacity": deflc['asgOptions']['minSize'], "startUpTimeoutMinutes": startup_timeout}, {"type": "DisableAsg", "targetAsg": "Previous"}, {"type": "Judgment", "durationMinutes": 30}, {"type": "DeleteAsg", "targetAsg": "Previous"}, ] } post_url = base_url + '/deployment/start' headers = {'Content-type': 'application/json', 'Accept': 'text/plain'} response = requests.post(post_url, data=json.dumps(deflc), headers=headers) print response print response.text print "[+] Waiting for deployment to enter judgement state. Timeout is: {0}".format(poll_timeout) deployment_id = response.json()["deploymentId"] poll_url = base_url + '/task/show/' + deployment_id + '.json' time.sleep(15) try: polling.poll( lambda: requests.get(poll_url, allow_redirects=True, timeout=requests_timeout), check_success=judgement_ready, step=30, timeout=poll_timeout ) except polling.TimeoutException, te: print "[-] Error waiting for judgement." while not te.values.empty(): print "{0}".format(te.values.get()) print "{0}".format(get_error(poll_url)) sys.exit(2)
def test_arg_validation(self): """Tests various permutations of calling with invalid args""" # No function try: polling.poll() except TypeError: pass else: assert False, 'No error raised with no args' try: polling.poll(lambda: True) except TypeError: pass else: assert False, 'No error raised with no step' try: polling.poll(lambda: True, step=1) except AssertionError: pass else: assert False, 'No error raised without specifying poll_forever or a timeout/max_tries' try: polling.poll(lambda: True, step=1, timeout=1, max_tries=1, poll_forever=True) except AssertionError: pass else: assert False, 'No error raised when specifying poll_forever with timeout/max_tries' # Valid options polling.poll(lambda: True, step=1, poll_forever=True) polling.poll(lambda: True, step=1, timeout=1) polling.poll(lambda: True, step=1, max_tries=1) polling.poll(lambda: True, step=1, timeout=1, max_tries=1)
elem_pass.send_keys(password) driver.find_element_by_class_name('login-lonely-button').click() # Navigates to the manufacturing tool driver.find_element_by_link_text("Manufacturer tool").click() wait_for_inshape_loading(driver) # inputs the tray name elem_tray = driver.find_element_by_id("searchBox") elem_tray.send_keys(tray_final) driver.find_element_by_xpath("//select[@name='searchType']/option[9]").click() cooling_tab = polling.poll( lambda: driver.find_element_by_id("substatus-344"), step=1, timeout=90, ignore_exceptions=(Exception,) ) cooling_tab.click() wait_for_inshape_loading(driver) # navagates to the cooling tab driver.find_element_by_id("substatus-344").click() wait_for_inshape_loading(driver) # select the tray by tray name driver.find_element_by_id("assign-bulk").click() # clicks update
def main(): parser, options, arguments = parse_options() # setup logging setup_logging(options.loglevel, options.logfile) logger = logging.getLogger(__name__) if options.show_version: print "Locust %s" % (version,) sys.exit(0) if os.path.isdir(options.locustfile): all_locustfiles = collect_locustfiles(options.locustfile) else: locustfile = find_locustfile(options.locustfile) if not locustfile: logger.error("Could not find any locustfile! Ensure file ends in '.py' and see --help for available options.") sys.exit(1) all_locustfiles = load_locustfile(locustfile) logger.info("All available locustfiles: {}".format(all_locustfiles)) # Use the first locustfile for the default locusts locusts = all_locustfiles.values()[0] if options.list_commands: console_logger.info("Available Locusts:") for name in locusts: console_logger.info(" " + name) sys.exit(0) if not locusts: logger.error("No Locust class found!") sys.exit(1) # make sure specified Locust exists if arguments: missing = set(arguments) - set(locusts.keys()) if missing: logger.error("Unknown Locust(s): %s\n" % (", ".join(missing))) sys.exit(1) else: names = set(arguments) & set(locusts.keys()) locust_classes = [locusts[n] for n in names] else: locust_classes = locusts.values() if options.show_task_ratio: console_logger.info("\n Task ratio per locust class") console_logger.info( "-" * 80) print_task_ratio(locust_classes) console_logger.info("\n Total task ratio") console_logger.info("-" * 80) print_task_ratio(locust_classes, total=True) sys.exit(0) if options.show_task_ratio_json: from json import dumps task_data = { "per_class": get_task_ratio_dict(locust_classes), "total": get_task_ratio_dict(locust_classes, total=True) } console_logger.info(dumps(task_data)) sys.exit(0) if options.master and options.no_web and not options.min_slaves: logger.error("When running --master and --no-web, you must specify --min-slaves to be available before starting to swarm") sys.exit(1) if options.master and options.no_web and not (options.timeout or options.num_requests): logger.error("When running --master and --no-web, you must specify either --num-request or --timeout to tell the slaves when to stop running each locustfile") sys.exit(1) if not options.no_web and not options.slave: # spawn web greenlet logger.info("Starting web monitor at %s:%s" % (options.web_host or "*", options.port)) main_greenlet = gevent.spawn(web.start, locust_classes, options) if options.slave: logger.info("Waiting for master to become available") try: runners.locust_runner = polling.poll( lambda: SlaveLocustRunner(locust_classes, options, available_locustfiles=all_locustfiles), timeout=60, step=1, ignore_exceptions=(socket.error,)) except polling.TimeoutException, e: logger.error("Failed to connect to the Locust master: %s", e.last) sys.exit(-1) main_greenlet = runners.locust_runner.greenlet
#! coding: utf-8 import datetime import polling def print_now(): """print_now""" print datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') if __name__ == '__main__': # run print_now every step sec polling.poll(print_now, step=1, poll_forever=True)
@patch('time.sleep', return_value=None) @patch('time.time', return_value=0) def test_timeout_exception(self, patch_sleep, patch_time): # Since the timeout is < 0, the first iteration of polling should raise the error if max timeout < 0 try: polling.poll(lambda: False, step=10, timeout=-1) except polling.TimeoutException, e: assert e.values.qsize() == 1, 'There should have been 1 value pushed to the queue of values' assert e.last is False, 'The last value was incorrect' else: assert False, 'No timeout exception raised' # Test happy path timeout val = polling.poll(lambda: True, step=0, timeout=0) assert val is True, 'Val was: {} != {}'.format(val, True) def test_max_call_exception(self): """ Test that a MaxCallException will be raised """ tries = 100 try: polling.poll(lambda: False, step=0, max_tries=tries) except polling.MaxCallException, e: assert e.values.qsize() == tries, 'Poll function called the incorrect number of times' assert e.last is False, 'The last value was incorrect' else: assert False, 'No MaxCallException raised'