def test_show_platform_fanstatus_mocked(duthosts, enum_rand_one_per_hwsku_hostname, mocker_factory, disable_thermal_policy): """ @summary: Check output of 'show platform fan'. """ duthost = duthosts[enum_rand_one_per_hwsku_hostname] # Mock data and check mocker = mocker_factory(duthost, 'FanStatusMocker') pytest_require( mocker, "No FanStatusMocker for %s, skip rest of the testing in this case" % duthost.facts['asic_type']) logging.info('Mock FAN status data...') mocker.mock_data() logging.info('Wait and check actual data with mocked FAN status data...') retry_call(check_cli_output_with_mocker, fargs=[ duthost, mocker, CMD_PLATFORM_FANSTATUS, THERMAL_CONTROL_TEST_WAIT_TIME, 2 ], tries=3, delay=30)
def test_config_start_no_api(test_microvm_with_ssh, vm_config_file): """Test microvm start when API server thread is disabled.""" test_microvm = test_microvm_with_ssh _configure_vm_from_json(test_microvm, vm_config_file) test_microvm.jailer.extra_args.update({'no-api': None}) test_microvm.spawn() # Get Firecracker PID so we can check the names of threads. firecracker_pid = test_microvm.jailer_clone_pid # Get names of threads in Firecracker. cmd = 'ps -T --no-headers -p {} | awk \'{{print $5}}\''.format( firecracker_pid ) # Retry running 'ps' in case it failed to list the firecracker process # The regex matches any expression that contains 'firecracker' and does # not contain 'fc_api' retry_call( utils.search_output_from_cmd, fkwargs={ "cmd": cmd, "find_regex": re.compile("^(?!.*fc_api)(?:.*)?firecracker", re.DOTALL) }, exceptions=RuntimeError, tries=10, delay=1)
def run_training(rank, world_size, model_args, data, load_from, new, num_train_steps, name): is_main = rank == 0 is_ddp = world_size > 1 seed = 1 if is_ddp: set_seed(seed) os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = '12355' dist.init_process_group('nccl', rank=rank, world_size=world_size) print(f"{rank + 1}/{world_size} process initialized.") model_args.update(is_ddp=is_ddp, rank=rank, world_size=world_size) model = Trainer(**model_args) if not new: model.load(load_from) else: model.clear() model.set_data_src(data) for _ in tqdm(range(num_train_steps - model.steps), initial=model.steps, total=num_train_steps, mininterval=10., desc=f'{name}<{data}>'): retry_call(model.train, tries=3, exceptions=NanException) if is_main and _ % 50 == 0: model.print_log() if is_ddp: dist.destroy_process_group()
def test_everything(driver, profile, base_url, base_api_url): do_user_registration(driver, profile, base_url) test_ids = get_service_templates_and_api_key_for_tests(driver, profile) client = NotificationsAPIClient( base_url=base_api_url, service_id=test_ids['service_id'], api_key=test_ids['api_key'] ) upload_csv_page = UploadCsvPage(driver) email_notification_id = send_notification_via_csv(profile, upload_csv_page, 'email') email_notification = retry_call( get_notification_by_id_via_api, fargs=[client, email_notification_id, ['sending', 'delivered']], tries=Config.NOTIFICATION_RETRY_TIMES, delay=Config.NOTIFICATION_RETRY_INTERVAL ) assert_notification_body(email_notification_id, email_notification) sms_notification_id = send_notification_via_csv(profile, upload_csv_page, 'sms') sms_notification = retry_call( get_notification_by_id_via_api, fargs=[client, sms_notification_id, ['sending', 'delivered']], tries=Config.NOTIFICATION_RETRY_TIMES, delay=Config.NOTIFICATION_RETRY_INTERVAL ) assert_notification_body(sms_notification_id, sms_notification) do_edit_and_delete_email_template(driver) do_user_can_invite_someone_to_notify(driver, profile, base_url)
def process(self, payload): entity_key_name = str(uuid.uuid4()) if 'trace' in payload: entity_key_name = payload['trace'].split('/')[-1] elif 'insert_id' in payload: entity_key_name = payload['insert_id'] entity_key = self.client.key(config.DB_ERROR_REPORTING_KIND, entity_key_name) entity = self.client.get(entity_key) if entity: print('Trace already exists') else: entity = datastore.Entity(key=entity_key) payload['project_id'] = '' if 'logName' in payload: payload['project_id'] = payload['logName'].split('/')[1] elif 'resource' in payload \ and 'labels' in payload['resource'] \ and 'project_id' in payload['resource']['labels']: payload['project_id'] = payload['resource']['labels']['project_id'] retry_call( self.populate_data, fargs=[entity, payload, entity_key_name], exceptions=gcp_exceptions.Aborted, tries=10, delay=random.randint(2, 5), jitter=(1, 5), logger=None) # nosec
def download_file_with_retry(self, file_info): retry_call(self.download_file, fargs=[file_info], tries=self.tries, delay=5, backoff=2, max_delay=30, logger=self.logger)
def test_call_on_exception(): exception = RuntimeError() f_mock = MagicMock(side_effect=exception) callback_mock = MagicMock() try: retry_call(f_mock, tries=1, on_exception=callback_mock) except RuntimeError: pass callback_mock.assert_called_once_with(exception)
def create_session(self): s = self.Session() try: # __enter__ retry_call(s.execute, fargs=['SELECT 1'], tries=6, delay=10) yield s finally: # __exit__ s.close()
def test_retry_call(): f_mock = MagicMock(side_effect=RuntimeError) tries = 2 try: retry_call(f_mock, exceptions=RuntimeError, tries=tries) except RuntimeError: pass assert f_mock.call_count == tries
def create_or_update_snap_recipe(self, name, owner, version, repo, branch, track, arch): """Creates/update snap recipe Note: You can delete snaps with: lp._browser.delete('https://api.launchpad.net/devel/~k8s-jenkaas-admins/+snap/kube-apiserver-1.13') """ lp_snap_name = f"{name}-{version}" lp_snap_project_name = f"snap-{name}" lp_owner = self.owner(owner) if not isinstance(track, list): track = [track] try: snap = self.snaps.getByName(name=lp_snap_name, owner=lp_owner) snap.git_path = branch snap.auto_build = True snap.auto_build_pocket = "Updates" snap.auto_build_archive = self.archive() snap.store_upload = True snap.store_name = name snap.store_series = self.snappy_series() snap.store_channels = track except NotFound: snap = self.snaps.new( name=lp_snap_name, owner=lp_owner, distro_series=self.distro_series(), git_repository=self.snap_git_repo(lp_owner, lp_snap_project_name), git_path=branch, store_upload=True, store_name=name, store_series=self.snappy_series(), store_channels=track, processors=[ "/+processors/amd64", "/+processors/s390x", "/+processors/ppc64el", "/+processors/arm64", ], auto_build=True, auto_build_pocket="Updates", auto_build_archive=self.archive(), ) retry_call( snap.lp_save, delay=2, backoff=2, tries=15, exceptions=( Exception, PreconditionFailed, ), ) return snap
def test_gcs_handle_exception(self, mock_gcs_filesystem): with self.assertRaises(RateLimitException): retry_call( self.gcs_handler.write, exceptions=RateLimitException, fargs=['raise_limit_exception', 'gs://bucket/test.ipynb'], tries=_RETRIES, delay=1, backoff=1, max_delay=2)
def inner(self, *args, **kw): if hasattr(self, 'retries'): delay = getattr(self, 'retry_delay', 10) retry_call(method, fargs=[self, *args], fkwargs=kw, tries=getattr(self, 'retries'), delay=delay) else: method(self, *args, **kw)
def test_provider_inbound_sms_delivery_via_api(client): unique_content = 'inbound test {}'.format(uuid.uuid4()) client.send_sms_notification( phone_number=config['service']['inbound_number'], template_id=config['service']['templates']['sms'], personalisation={'build_id': unique_content}) retry_call(get_inbound_sms, fargs=[client, unique_content], tries=config['provider_retry_times'], delay=config['provider_retry_interval'])
def attach_resources(self): out_path = Path(self.src_path) / "tmp" os.makedirs(str(out_path), exist_ok=True) resource_spec = yaml.safe_load( Path(self.build.resource_spec).read_text()) resources = resource_spec.get(self.entity, {}) # Build any custom resources. resource_builder = self.opts.get("build-resources", None) if resource_builder and not resources: raise SystemExit( "Custom build-resources specified for {self.entity} but no spec found" ) if resource_builder: resource_builder = resource_builder.format( out_path=out_path, src_path=self.src_path, ) self.echo("Running custom build-resources") ret = script(resource_builder, echo=self.echo) if not ret.ok: raise SystemExit("Failed to build custom resources") # Pull any `upstream-image` annotated resources. for name, details in self._read_metadata_resources().items(): upstream_image = details.get("upstream-source") if details["type"] == "oci-image" and upstream_image: self.echo(f"Pulling {upstream_image}...") sh.docker.pull(upstream_image) resources[name] = upstream_image self.echo(f"Attaching resources:\n{pformat(resources)}") # Attach all resources. for name, resource in resources.items(): # If the resource is a file, populate the path where it was built. # If it's a custom image, it will be in Docker and this will be a no-op. resource = resource.format(out_path=out_path) retry_call( cmd_ok, fargs=[[ "charm", "attach", self.new_entity, f"{name}={resource}", ]], fkwargs={ "check": True, "echo": self.echo }, delay=2, backoff=2, tries=15, exceptions=CalledProcessError, )
def _send_request_with_retries(self, batch): retry_call(self._send_request, fargs=[self.http_headers, batch], exceptions=RecoverableException, tries=self.conf[ConfigOptions.retry_max_attempts], delay=self.conf[ConfigOptions.retry_initial_delay], max_delay=self.conf[ConfigOptions.retry_max_delay], backoff=self.conf[ConfigOptions.retry_backoff], jitter=(self.conf[ConfigOptions.retry_jitter_min], self.conf[ConfigOptions.retry_jitter_max]))
def remove_docker_image(repo: str, tag: str, dc: DockerClient): """Remove docker image""" image_name = f"{repo}:{tag}" for container in dc.containers.list(filters=dict(ancestor=image_name)): with suppress_docker_wait_error(): container.wait(condition="removed", timeout=30) retry_call( dc.images.remove, fargs=[image_name], fkwargs={"force": True}, tries=5, )
def destroy(driver: WebDriver, tries: int = 2) -> None: """Destroy a driver""" # This is some very flaky code in selenium. Hence the retries # and catch-all exceptions try: retry_call(driver.close, tries=tries) except Exception: # pylint: disable=broad-except pass try: driver.quit() except Exception: # pylint: disable=broad-except pass
def initial_connect(self, ssh_config): """Create an initial SSH client connection (retry until it works).""" retry_call(self.ssh_client.connect, fargs=[ssh_config['hostname']], fkwargs={ 'look_for_keys': False, 'username': ssh_config['username'], 'key_filename': ssh_config['ssh_key_path'] }, exceptions=ssh_exception.NoValidConnectionsError, delay=1, backoff=2, max_delay=32)
def upload_file_with_retry(self, file_paths, folder_id="", folder_hash=""): for filePath in file_paths: retry_call(self.upload_file, fargs=[filePath], fkwargs={ "folder_id": folder_id, "folder_hash": folder_hash }, tries=self.tries, delay=5, backoff=2, max_delay=30, logger=self.logger)
def _scp_file_helper(cmd): retry_call( run, fargs=[cmd], fkwargs={ 'shell': True, 'check': True }, exceptions=ssh_exception.NoValidConnectionsError, delay=1, backoff=2, max_delay=32 )
def _preupload_bundles(self): """Pre-upload bundles to ADCM image""" if self.preupload_bundle_urls: with allure.step( "Pre-upload bundles into ADCM before image initialization" ): self._init_adcm_cli() for url in self.preupload_bundle_urls: retry_call( self._upload_bundle, fargs=[url], tries=5, )
def test_show_platform_temperature_mocked(duthosts, enum_rand_one_per_hwsku_hostname, mocker_factory, ignore_particular_error_log): """ @summary: Check output of 'show platform temperature' """ duthost = duthosts[enum_rand_one_per_hwsku_hostname] # Mock data and check mocker = mocker_factory(duthost, 'ThermalStatusMocker') pytest_require(mocker, "No ThermalStatusMocker for %s, skip rest of the testing in this case" % duthost.facts['asic_type']) logging.info('Mock Thermal status data...') mocker.mock_data() logging.info('Wait and check actual data with mocked Thermal status data...') retry_call(check_cli_output_with_mocker, fargs=[duthost, mocker, CMD_PLATFORM_TEMPER, THERMAL_CONTROL_TEST_WAIT_TIME], tries=3, delay=30)
def test_inner_hashing(self, hash_keys, ptfhost, outer_ipver, inner_ipver, router_mac, vlan_ptf_ports, symmetric_hashing, duthost): logging.info( "Executing dynamic inner hash test for outer {} and inner {} with symmetric_hashing set to {}" .format(outer_ipver, inner_ipver, str(symmetric_hashing))) with allure.step('Run ptf test InnerHashTest'): timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S') log_file = "/tmp/inner_hash_test.DynamicInnerHashTest.{}.{}.{}.log".format( outer_ipver, inner_ipver, timestamp) logging.info("PTF log file: %s" % log_file) outer_src_ip_range, outer_dst_ip_range = get_src_dst_ip_range( outer_ipver) inner_src_ip_range, inner_dst_ip_range = get_src_dst_ip_range( inner_ipver) balancing_test_times = 150 balancing_range = 0.3 ptf_runner(ptfhost, "ptftests", "inner_hash_test.InnerHashTest", platform_dir="ptftests", params={ "fib_info": FIB_INFO_FILE_DST, "router_mac": router_mac, "src_ports": vlan_ptf_ports, "hash_keys": hash_keys, "vxlan_port": VXLAN_PORT, "inner_src_ip_range": ",".join(inner_src_ip_range), "inner_dst_ip_range": ",".join(inner_dst_ip_range), "outer_src_ip_range": ",".join(outer_src_ip_range), "outer_dst_ip_range": ",".join(outer_dst_ip_range), "balancing_test_times": balancing_test_times, "balancing_range": balancing_range, "outer_encap_formats": OUTER_ENCAP_FORMATS, "nvgre_tni": NVGRE_TNI, "symmetric_hashing": symmetric_hashing }, log_file=log_file, qlen=PTF_QLEN, socket_recv_size=16384) retry_call(check_pbh_counters, fargs=[ duthost, outer_ipver, inner_ipver, balancing_test_times, symmetric_hashing, hash_keys ], tries=5, delay=5)
def test3_replication_after_failover(self): master_pod_ip = self.assert_lb_backend_state("master", 1)[0][1] standby_pod_name, standby_pod_ip = self.assert_lb_backend_state( "standby", 1)[0] self.assertNotEqual(master_pod_ip, standby_pod_ip, "Expected master and standby ips to be different") table_name, table_row_count = test_utils.create_table() logging.info("Checking table size on %s", standby_pod_name) retry_call(self.assert_table_size, fargs=[standby_pod_ip, table_name, table_row_count], tries=3, delay=3)
def upload_resource(self, entity, resource_name, resource): # If the resource is a file, populate the path where it was built. # If it's a custom image, it will be in Docker and this will be a no-op. retry_call( self.charm.attach, fargs=[ entity, f"{resource_name}={resource}", ], delay=2, backoff=2, tries=15, exceptions=sh.ErrorReturnCode, )
def test_retry_call_with_fail_callback(): def f(): raise RuntimeError def cb(error): pass callback_mock = MagicMock(spec=cb) try: retry_call(f, fail_callback=callback_mock, tries=2) except RuntimeError: pass callback_mock.assert_called()
def run(self) -> str: startTime = time() # ensure lastRunTime is initialized at start of run method, rather than require all action methods to initialize it themselves # (although individual action methods can overwrite with a successful run timestamp upon completion if they want) # NOTE: this ensures we do not spam logs every 5 seconds trying to rerun checks that throw unhandled exception self.state['lastRunLocal'] = datetime.utcnow() try: self.duration = 0 self.success = False self.tracer.info("[%s] executing all actions of check" % self.fullName) self.tracer.debug("[%s] actions=%s" % (self.fullName, self.actions)) for action in self.actions: methodName = METHODNAME_ACTION % action["type"] parameters = action.get("parameters", {}) self.tracer.debug("[%s] calling action %s" % (self.fullName, methodName)) method = getattr(self, methodName) tries = action.get( "retries", self.providerInstance.retrySettings["retries"]) delay = action.get( "delayInSeconds", self.providerInstance.retrySettings["delayInSeconds"]) backoff = action.get( "backoffMultiplier", self.providerInstance.retrySettings["backoffMultiplier"]) try: retry_call(method, fkwargs=parameters, tries=tries, delay=delay, backoff=backoff, logger=self.tracer) self.success = True except Exception as e: self.tracer.error( "[%s] error executing action %s, Exception %s, skipping remaining actions" % (self.fullName, methodName, e)) self.checkMessage = str(e) # reraise exception since we have no valid JSON result string to return raise finally: self.duration = TimeUtils.getElapsedMilliseconds(startTime) return self.generateJsonString()
def launch_new_instances(self, group_name): # pragma: no cover # step 1: wait for ec2 creating instances try: logging.info( "Trying for maximum {0} minutes to allow for instances to be created." .format( self.calculate_max_minutes(self.creation_wait[0], self.creation_wait[1]))) new_instance_ids = retry_call(self.gather_instance_info, fargs=[group_name], tries=self.creation_wait[0], delay=self.creation_wait[1], logger=logging) except Exception as e: logging.error( "There are no instances in the group with build number {0}. Please ensure AMI was promoted." .format(self.build_number)) group_name = self.get_autoscale_group_name() self.set_autoscale_instance_desired_count( self.calculate_autoscale_desired_instance_count( group_name, 'decrease'), group_name) exit(self.exit_error_code) # step 2: waiting for instances coming up and ready logging.info( "Waiting maximum {0} minutes for instances to be ready.".format( self.calculate_max_minutes(self.ready_wait[0], self.ready_wait[1]))) self.wait_for_new_instances( new_instance_ids, self.ready_wait[0], self.ready_wait[1]) #Wait for new instances to be up and ready # step 3: waiting for instance health check to be completed try: logging.info( "Trying for maximum {0} minutes to health-check all instances." .format( self.calculate_max_minutes(self.health_wait[0], self.health_wait[1]))) retry_call(self.lb_healthcheck, fargs=[new_instance_ids], tries=self.health_wait[0], delay=self.health_wait[1], logger=logging) except Exception as e: logging.error( 'Load balancer healthcheck has exceeded the timeout threshold. Rolling back.' ) self.revert_deployment()
def cleanup(self): """Clean up this jailer context.""" # pylint: disable=subprocess-run-check if self.jailer_id: shutil.rmtree(self.chroot_base_with_id(), ignore_errors=True) if self.netns: _ = run( 'ip netns del {}'.format(self.netns), shell=True, stderr=PIPE ) # Remove the cgroup folders associated with this microvm. # The base /sys/fs/cgroup/<controller>/firecracker folder will remain, # because we can't remove it unless we're sure there's no other running # microVM. # Firecracker is interested in these 3 cgroups for the moment. controllers = ('cpu', 'cpuset', 'pids') for controller in controllers: # Obtain the tasks from each cgroup and wait on them before # removing the microvm's associated cgroup folder. try: retry_call( f=self._kill_crgoup_tasks, fargs=[controller], exceptions=TimeoutError, max_delay=5 ) except TimeoutError: pass # As the files inside a cgroup aren't real, they can't need # to be removed, that is why 'rm -rf' and 'rmdir' fail. # We only need to remove the cgroup directories. The "-depth" # argument tells find to do a depth first recursion, so that # we remove any sub cgroups first if they are there. back_cmd = r'-depth -type d -exec rmdir {} \;' cmd = 'find /sys/fs/cgroup/{}/{}/{} {}'.format( controller, FC_BINARY_NAME, self.jailer_id, back_cmd ) # We do not need to know if it succeeded or not; afterall, we are # trying to clean up resources created by the jailer itself not # the testing system. _ = run(cmd, shell=True, stderr=PIPE)
def destroy_webdriver(driver): """ Destroy a driver """ # This is some very flaky code in selenium. Hence the retries # and catch-all exceptions try: retry_call(driver.close, tries=2) except Exception: pass try: driver.quit() except Exception: pass
def up(run_name: str, local_registry_port: int, working_directory: str = None, namespace: str = None) -> \ Tuple[str, int]: try: docker_client = docker.from_env() # we've seen often a problems with connection to local Docker's daemon via socket. # here we retry a call to Docker in case of such problems # original call without retry_call: # docker_client.images.build(path=working_directory, tag=f"127.0.0.1:{local_registry_port}/{run_name}") retry_call(f=docker_client.images.build, fkwargs={ "path": working_directory, "tag": f"127.0.0.1:{local_registry_port}/{run_name}" }, exceptions=ConnectionError, tries=DOCKER_CONNECTION_MAX_TRIES, delay=DOCKER_CONNECTION_DELAY_SECONDS) except Exception as ex: # TODO: these exceptions should be reraised instead caught here logger.exception(ex) return Texts.DOCKER_IMAGE_NOT_BUILT, 100 try: # we've seen often a problems with connection to local Docker's daemon via socket. # here we retry a call to Docker in case of such problems # original call without retry_call: # docker_client.images.push(repository=f"127.0.0.1:{local_registry_port}/{run_name}") retry_call(f=docker_client.images.push, fkwargs={ "repository": f"127.0.0.1:{local_registry_port}/{run_name}" }, exceptions=ConnectionError, tries=DOCKER_CONNECTION_MAX_TRIES, delay=DOCKER_CONNECTION_DELAY_SECONDS) except Exception as ex: logger.exception(ex) return Texts.DOCKER_IMAGE_NOT_SENT, 101 try: dirs = os.listdir(f"{working_directory}/charts") helm.install_helm_chart(f"{working_directory}/charts/{dirs[0]}", release_name=run_name, tiller_namespace=namespace) except Exception as ex: logger.exception(ex) return Texts.APP_NOT_RELEASED, 102 return "", 0
def get_requests(start_date, end_date, include_phone): """Gets the problems registered in the refered time stamp. dates must be a string in YYYY-MM-dd format (eg. '2015-08-01')""" phone_key = 'phone_key' if include_phone: phone_key = config.OPEN311_PHONE_KEY payload = {'start_date': start_date, 'end_date': end_date, 'phone_key': phone_key} r = None try: r = retry_call(requests.get, fargs=[config.OPEN311_END_POINTS['requests'] + '.' + config.OPEN311_RESPONSE_FORMATS['json']], fkwargs={'params': payload, 'allow_redirects': False}) except: pass requests_list = [] if not r: return requests_list for request in r.json(): if u'code' in request and request.get('code', default) == 404: return requests_list requests_list.append(request) return requests_list
def confirm_lb_has_only_new_instances(self): try: logging.info("Waiting maximum {0} minutes to terminate old instances.".format(self.calculate_max_minutes(self.only_new_wait[0], self.only_new_wait[1]))) return retry_call(self.only_new_instances_check, tries=self.only_new_wait[0], delay=self.only_new_wait[1], logger=logging) except Exception as e: logging.error("There are still old instances in the ELB. Please investigate.") exit(self.exit_error_code)
def cleanup(self): """Clean up this jailer context.""" shutil.rmtree(self.chroot_base_with_id(), ignore_errors=True) if self.netns: _ = run( 'ip netns del {}'.format(self.netns), shell=True, stderr=PIPE ) # Remove the cgroup folders associated with this microvm. # The base /sys/fs/cgroup/<controller>/firecracker folder will remain, # because we can't remove it unless we're sure there's no other running # microVM. # Firecracker is interested in these 3 cgroups for the moment. controllers = ('cpu', 'cpuset', 'pids') for controller in controllers: # Obtain the tasks from each cgroup and wait on them before # removing the microvm's associated cgroup folder. try: retry_call( f=self._kill_crgoup_tasks, fargs=[controller], exceptions=TimeoutError, max_delay=5 ) except TimeoutError: pass # As the files inside a cgroup aren't real, they can't need # to be removed, that is why 'rm -rf' and 'rmdir' fail. # We only need to remove the cgroup directories. The "-depth" # argument tells find to do a depth first recursion, so that # we remove any sub cgroups first if they are there. back_cmd = r'-depth -type d -exec rmdir {} \;' cmd = 'find /sys/fs/cgroup/{}/{}/{} {}'.format( controller, FC_BINARY_NAME, self.jailer_id, back_cmd ) # We do not need to know if it succeeded or not; afterall, we are # trying to clean up resources created by the jailer itself not # the testing system. _ = run(cmd, shell=True, stderr=PIPE)
def test_send_sms_and_email_via_api(profile, client): notification_id = send_notification_via_api(client, profile.sms_template_id, profile.mobile, 'sms') notification = retry_call( get_notification_by_id_via_api, fargs=[client, notification_id, 'delivered'], tries=Config.PROVIDER_RETRY_TIMES, delay=Config.PROVIDER_RETRY_INTERVAL ) assert_notification_body(notification_id, notification)
def test_send_sms_and_email_via_api(profile, client): notification_id = send_notification_via_api(client, profile.email_template_id, profile.email, 'email') notification = retry_call( get_notification_by_id_via_api, fargs=[client, notification_id, ['sending', 'delivered']], tries=Config.NOTIFICATION_RETRY_TIMES, delay=Config.NOTIFICATION_RETRY_INTERVAL ) assert_notification_body(notification_id, notification)
def test_retry_call_2(): side_effect = [RuntimeError, RuntimeError, 3] f_mock = MagicMock(side_effect=side_effect) tries = 5 result = None try: result = retry_call(f_mock, exceptions=RuntimeError, tries=tries) except RuntimeError: pass assert result == 3 assert f_mock.call_count == len(side_effect)
def deliver_dashboard(schedule): """ Given a schedule, delivery the dashboard as an email report """ dashboard = schedule.dashboard dashboard_url = _get_url_path( 'Superset.dashboard', dashboard_id=dashboard.id, ) # Create a driver, fetch the page, wait for the page to render driver = create_webdriver() window = config.get('WEBDRIVER_WINDOW')['dashboard'] driver.set_window_size(*window) driver.get(dashboard_url) time.sleep(PAGE_RENDER_WAIT) # Set up a function to retry once for the element. # This is buggy in certain selenium versions with firefox driver get_element = getattr(driver, 'find_element_by_class_name') element = retry_call( get_element, fargs=['grid-container'], tries=2, delay=PAGE_RENDER_WAIT, ) try: screenshot = element.screenshot_as_png except WebDriverException: # Some webdrivers do not support screenshots for elements. # In such cases, take a screenshot of the entire page. screenshot = driver.screenshot() # pylint: disable=no-member finally: destroy_webdriver(driver) # Generate the email body and attachments email = _generate_mail_content( schedule, screenshot, dashboard.dashboard_title, dashboard_url, ) subject = __( '%(prefix)s %(title)s', prefix=config.get('EMAIL_REPORTS_SUBJECT_PREFIX'), title=dashboard.dashboard_title, ) _deliver_email(schedule, subject, email)
def retry_method(self, method, *args, **kwargs): if self.proxies: # fixme: may be a little loud logger.debug("Using proxy %s for: %s", self.proxies["http"], args[0]) return retry_call(getattr(super(RetryingSession, self), method), fargs=args, fkwargs=kwargs, tries=3, delay=5, exceptions=(exceptions.ConnectionError, exceptions.ProxyError, exceptions.SSLError, exceptions.Timeout, exceptions.ConnectTimeout, exceptions.ReadTimeout, socket.timeout))
def send_daily_survey_replies(): """Task to send daily survey answers as PDF""" today = date.today() response = None try: response = retry_call(requests.get, fargs=['http://mopa.co.mz:8080/critical-points/' + today.strftime('%Y-%m-%d')], exceptions=ConnectTimeout, tries=3) except Exception, ex: ex_type, ex_obj, ex_tb = sys.exc_info() fname = os.path.split(ex_tb.tb_frame.f_code.co_filename)[1] current_app.logger.error("Could not fetch daily survey answers required to generate report.\nError message:{ex_msg}.\nException Type: {ex_type}.\nFile name: {file_name}.\nLine No: {line_no}.\nTraceback: {traceback}".format(ex_msg=str(ex), ex_type=str(ex_type), file_name=str(fname), line_no=str(ex_tb.tb_lineno), traceback=traceback.format_exc()))
def launch_new_instances(self, group_name): # pragma: no cover # step 1: wait for ec2 creating instances try: logging.info("Trying for maximum {0} minutes to allow for instances to be created.".format(self.calculate_max_minutes(self.creation_wait[0], self.creation_wait[1]))) new_instance_ids = retry_call(self.gather_instance_info, fargs=[group_name], tries=self.creation_wait[0], delay=self.creation_wait[1], logger=logging) except Exception as e: logging.error("There are no instances in the group with build number {0}. Please ensure AMI was promoted.".format(self.build_number)) group_name = self.get_autoscale_group_name() self.set_autoscale_instance_desired_count(self.calculate_autoscale_desired_instance_count(group_name, 'decrease'), group_name) exit(self.exit_error_code) # step 2: waiting for instances coming up and ready logging.info("Waiting maximum {0} minutes for instances to be ready.".format(self.calculate_max_minutes(self.ready_wait[0], self.ready_wait[1]))) self.wait_for_new_instances(new_instance_ids, self.ready_wait[0], self.ready_wait[1]) #Wait for new instances to be up and ready # step 3: waiting for instance health check to be completed try: logging.info("Trying for maximum {0} minutes to health-check all instances.".format(self.calculate_max_minutes(self.health_wait[0], self.health_wait[1]))) retry_call(self.lb_healthcheck, fargs=[new_instance_ids], tries=self.health_wait[0], delay=self.health_wait[1], logger=logging) except Exception as e: logging.error('Load balancer healthcheck has exceeded the timeout threshold. Rolling back.') self.revert_deployment()
def request_with_retry(func, *args, **kwds): def request_wrapper(): try: return func(*args, **kwds) except HTTPException as e: if isinstance(e, (Forbidden, NotFound)): raise status = e._raw.status_code # XXX: access private field raise ServerError("http error occurs in status={}".format(status)) return retry_call( request_wrapper, exceptions=(ServerError, Timeout), delay=60, jitter=60, max_delay=60 * 5, logger=logger )
def test_admin(driver, base_url, client, profile, login_user): upload_csv_page = UploadCsvPage(driver) csv_sms_notification_id = send_notification_via_csv(profile, upload_csv_page, 'sms') csv_sms_notification = retry_call( get_notification_by_id_via_api, fargs=[client, csv_sms_notification_id, ['sending', 'delivered']], tries=Config.NOTIFICATION_RETRY_TIMES, delay=Config.NOTIFICATION_RETRY_INTERVAL ) assert_notification_body(csv_sms_notification_id, csv_sms_notification) csv_email_notification_id = send_notification_via_csv(profile, upload_csv_page, 'email') csv_email_notification = retry_call( get_notification_by_id_via_api, fargs=[client, csv_email_notification_id, ['sending', 'delivered']], tries=Config.NOTIFICATION_RETRY_TIMES, delay=Config.NOTIFICATION_RETRY_INTERVAL ) assert_notification_body(csv_email_notification_id, csv_email_notification) upload_csv_page.sign_out()
def get_locations_online(self): """loads the locations from the API and converts into a python list""" if self.ONLINE_LOCATIONS: return self.ONLINE_LOCATIONS r = None try: r = retry_call(requests.get, fargs=[config.OPEN311_BASE_URL + "locations.json"], exceptions=ConnectTimeout, tries=3) except: pass if r and r.status_code == 200: z_json = str(r.text.decode("utf-8").encode("ascii", "ignore")).strip("'<>()\"` ").replace('\'', '\"') self.ONLINE_LOCATIONS = json.loads(z_json) return self.ONLINE_LOCATIONS else: return []
def test_retry_call_with_args(): def f(value=0): if value < 0: return value else: raise RuntimeError return_value = -1 result = None f_mock = MagicMock(spec=f, return_value=return_value) try: result = retry_call(f_mock, fargs=[return_value]) except RuntimeError: pass assert result == return_value assert f_mock.call_count == 1
def test_retry_call_with_kwargs(): def f(value=0): if value < 0: return value else: raise RuntimeError kwargs = {'value': -1} result = None f_mock = MagicMock(spec=f, return_value=kwargs['value']) try: result = retry_call(f_mock, fkwargs=kwargs) except RuntimeError: pass assert result == kwargs['value'] assert f_mock.call_count == 1
def _get_slice_visualization(schedule): slc = schedule.slice # Create a driver, fetch the page, wait for the page to render driver = create_webdriver() window = config.get('WEBDRIVER_WINDOW')['slice'] driver.set_window_size(*window) slice_url = _get_url_path( 'Superset.slice', slice_id=slc.id, ) driver.get(slice_url) time.sleep(PAGE_RENDER_WAIT) # Set up a function to retry once for the element. # This is buggy in certain selenium versions with firefox driver element = retry_call( driver.find_element_by_class_name, fargs=['chart-container'], tries=2, delay=PAGE_RENDER_WAIT, ) try: screenshot = element.screenshot_as_png except WebDriverException: # Some webdrivers do not support screenshots for elements. # In such cases, take a screenshot of the entire page. screenshot = driver.screenshot() # pylint: disable=no-member finally: destroy_webdriver(driver) # Generate the email body and attachments return _generate_mail_content( schedule, screenshot, slc.slice_name, slice_url, )
def send(self): """ Issues an HTTP request to Source Code Solutions end point which in turn will forward to destination number PS. This must use GET; the `from` param must be in small caps """ # probably save before sending if self.direction == 'O' and self.sent_to == 'Mopa': raise Exception("Invalid addressee for outgoing message" + str(self)) payload = {"to[]": self.sent_to, "message": self.text, "API_KEY": UX_SMS_API_KEY} response = None # Retry sending request 3 times if safe-retry ConnectTimeout exception is thrown and trap & report other errors try: response = retry_call(requests.post, fargs=[UX_SMS_END_POINT], fkwargs={"data": payload, 'allow_redirects': True}, exceptions=ConnectTimeout, tries=3, logger=current_app.logger) except Exception, ex: ex_type, ex_obj, ex_tb = sys.exc_info() fname = os.path.split(ex_tb.tb_frame.f_code.co_filename)[1] current_app.logger.error("Error delivering SMS to SMSC.\nError message:{ex_msg}.\nException Type: {ex_type}.\nFile name: {file_name}.\nLine No: {line_no}.\nTraceback: {traceback}".format(ex_msg=str(ex), ex_type=str(ex_type), file_name=str(fname), line_no=str(ex_tb.tb_lineno), traceback=traceback.format_exc())) return
def test_send_csv(driver, profile, login_seeded_user, seeded_client, message_type): dashboard_page = DashboardPage(driver) dashboard_page.go_to_dashboard_for_service() template_id = profile.email_template_id if message_type == 'email' else profile.sms_template_id dashboard_stats_before = get_dashboard_stats(dashboard_page, message_type, template_id) upload_csv_page = UploadCsvPage(driver) notification_id = send_notification_via_csv(profile, upload_csv_page, message_type, seeded=True) notification = retry_call( get_notification_by_id_via_api, fargs=[seeded_client, notification_id, ['sending', 'delivered']], tries=Config.NOTIFICATION_RETRY_TIMES, delay=Config.NOTIFICATION_RETRY_INTERVAL ) assert_notification_body(notification_id, notification) dashboard_page.go_to_dashboard_for_service() dashboard_stats_after = get_dashboard_stats(dashboard_page, message_type, template_id) assert_dashboard_stats(dashboard_stats_before, dashboard_stats_after)
def consume(self, topic, msg): print("Message of length {} to Kafka({})".format(len(msg), topic)) msg = bytes(msg, "UTF-8") retry_call(self.producer.send_messages, fargs=[topic, msg], tries=3,delay=0.5)
def retry(req, cmd): """A simple wrapper around retry_call() Avoid the caller to write all the retry parameters """ return retry_call(req, [cmd], tries=3, delay=1, backoff=2)
def _retry(func, *args, **kargs): return retry_call(func, fargs=args, fkwargs=kargs, delay=.5, backoff=2, max_delay=300, logger=logging)