def _cleanup_vm(): try: if initialize_provider.mgmt.does_vm_exist(name): initialize_provider.mgmt.delete_vm(name) initialize_provider.refresh_provider_relationships() except Exception as e: logger.exception(e)
def _setup_provider(provider_key, request=None): def skip(provider_key, previous_fail=False): if request: node = request.node name, location = get_test_idents(node) skip_data = {'type': 'provider', 'reason': provider_key} art_client.fire_hook('skip_test', test_location=location, test_name=name, skip_data=skip_data) if previous_fail: raise pytest.skip('Provider {} failed to set up previously in another test, ' 'skipping test'.format(provider_key)) else: raise pytest.skip('Provider {} failed to set up this time, ' 'skipping test'.format(provider_key)) # This function is dynamically "fixturized" to setup up a specific provider, # optionally skipping the provider setup if that provider has previously failed. if provider_key in _failed_providers: skip(provider_key, previous_fail=True) try: providers.setup_provider(provider_key) except Exception as ex: logger.error('Error setting up provider %s', provider_key) logger.exception(ex) _failed_providers.add(provider_key) skip(provider_key)
def get_registered_vm_files(provider_key): try: print("{} processing all the registered files..".format(provider_key)) vm_registered_files = defaultdict(set) provider = get_mgmt(provider_key) for vm_name in provider.list_vm(): try: vm_file_path = provider.get_vm_config_files_path(vm_name) vm_directory_name = re.findall(r'\s(.*)/\w*', vm_file_path) vm_registered_files[vm_directory_name[0]] = vm_name except Exception as e: logger.error(e) logger.error('Failed to get creation/boot time for {} on {}'.format( vm_name, provider_key)) continue print("\n**************************REGISTERED FILES ON {}***********************\n".format( provider_key)) for k, v in vm_registered_files.items(): print('FILE_NAME: {}\nVM_NAME: {}\n'.format(k, v)) return vm_registered_files except Exception as ex: # Print out the error message too because logs in the job get deleted print('{} failed ({}: {})'.format(provider_key, type(ex).__name__, str(ex))) logger.error('failed to process vms from provider {}'.format(provider_key)) logger.exception(ex)
def pytest_sessionfinish(self, exitstatus): # Now master/standalone needs to move all the reports to an appliance for the source report if store.parallelizer_role != 'master': manager().collect() # for slaves, everything is done at this point if store.parallelizer_role == 'slave': return # on master/standalone, merge all the collected reports and bring them back manager().merge() try: global ui_coverage_percent last_run = json.load(log_path.join('coverage', 'merged', '.last_run.json').open()) ui_coverage_percent = last_run['result']['covered_percent'] style = {'bold': True} if ui_coverage_percent > 40: style['green'] = True else: style['red'] = True store.write_line('UI Coverage Result: {}%'.format(ui_coverage_percent), **style) except Exception as ex: logger.error('Error printing coverage report to terminal') logger.exception(ex)
def testing_instance(setup_provider, provider): """ Fixture to provision instance on the provider """ instance = Instance.factory(random_vm_name('pwr-c'), provider) if not provider.mgmt.does_vm_exist(instance.name): instance.create_on_provider(allow_skip="default") elif instance.provider.type == "ec2" and \ provider.mgmt.is_vm_state(instance.name, provider.mgmt.states['deleted']): provider.mgmt.set_name( instance.name, 'test_terminated_{}'.format(fauxfactory.gen_alphanumeric(8))) instance.create_on_provider(allow_skip="default", find_in_cfme=True) provider.refresh_provider_relationships() # Make sure the instance shows up try: wait_for(lambda: instance.exists, fail_condition=False, num_sec=600, delay=15, fail_func=provider.refresh_provider_relationships) except TimedOutError: pytest.fail('Failed to find instance in CFME after creating on provider: {}' .format(instance.name)) yield instance logger.info('Fixture cleanup, deleting test instance: %s', instance.name) try: provider.mgmt.delete_vm(instance.name) except Exception: logger.exception('Exception when deleting testing_instance: %s', instance.name)
def scan_provider(provider_key, matchers, match_queue, scan_failure_queue): """ Process the VMs on a given provider, comparing name and creation time. Append vms meeting criteria to vms_to_delete Args: provider_key (string): the provider key from yaml matchers (list): A list of regex objects with match() method match_queue (Queue.Queue): MP queue to hold VMs matching age requirement scan_failure_queue (Queue.Queue): MP queue to hold vms that we could not compare age Returns: None: Uses the Queues to 'return' data """ logger.info('%s: Start scan for vm text matches', provider_key) try: vm_list = get_mgmt(provider_key).list_vm() except Exception: # noqa scan_failure_queue.put(VmReport(provider_key, FAIL, NULL, NULL, NULL)) logger.exception('%s: Exception listing vms', provider_key) return text_matched_vms = [name for name in vm_list if match(matchers, name)] for name in text_matched_vms: match_queue.put(VmProvider(provider_key, name)) non_text_matching = set(vm_list) - set(text_matched_vms) logger.info('%s: NOT matching text filters: %s', provider_key, non_text_matching) logger.info('%s: MATCHED text filters: %s', provider_key, text_matched_vms)
def run_command(self, command, timeout=RUNCMD_TIMEOUT): logger.info("Running command `{}`".format(command)) template = '%s\n' command = template % command try: session = self.get_transport().open_session() if timeout: session.settimeout(float(timeout)) session.exec_command(command) stdout = session.makefile() stderr = session.makefile_stderr() output = '' while True: if session.recv_ready: for line in stdout: output += line if self._streaming: sys.stdout.write(line) if session.recv_stderr_ready: for line in stderr: output += line if self._streaming: sys.stderr.write(line) if session.exit_status_ready(): break exit_status = session.recv_exit_status() return SSHResult(exit_status, output) except paramiko.SSHException as exc: logger.exception(exc) # Returning two things so tuple unpacking the return works even if the ssh client fails return SSHResult(1, None)
def process_provider_vms(provider_key, matchers, delta, vms_to_delete): with lock: print '{} processing'.format(provider_key) try: now = datetime.datetime.now() with lock: # Known conf issue :) provider = get_mgmt(provider_key) for vm_name in provider.list_vm(): if not match(matchers, vm_name): continue try: vm_creation_time = provider.vm_creation_time(vm_name) except: logger.error('Failed to get creation/boot time for %s on %s' % ( vm_name, provider_key)) continue if vm_creation_time + delta < now: vm_delta = now - vm_creation_time with lock: vms_to_delete[provider_key].add((vm_name, vm_delta)) with lock: print '{} finished'.format(provider_key) except Exception as ex: with lock: # Print out the error message too because logs in the job get deleted print '{} failed ({}: {})'.format(provider_key, type(ex).__name__, str(ex)) logger.error('failed to process vms from provider %s', provider_key) logger.exception(ex)
def get_datastores_per_host(provider_key): print('{} processing to get datastores per host'.format(provider_key)) try: provider = get_mgmt(provider_key) vm_registered_files = get_registered_vm_files(provider_key) hosts = provider.list_host() host_datastore_url = {host: provider.list_host_datastore_url(host) for host in hosts} unregistered_files = [] print("\n*********************UNREGISTERED FILES ON: {}**********************\n".format( provider_key)) print('HOST_NAME\t\tFILE_PATH\t\tTEMPLATE_VM_ISO\t\tNUMBER_OF_FILES\n') for host in host_datastore_url: try: list_orphaned_files_per_host(host, host_datastore_url[host], provider_key, vm_registered_files, unregistered_files) except Exception as e: logger.error(e) continue except Exception as ex: # Print out the error message too because logs in the job get deleted print('{} failed ({}: {})'.format(provider_key, type(ex).__name__, str(ex))) logger.error('failed to process vms from provider {}'.format(provider_key)) logger.exception(ex)
def appliance_preupdate(old_version, appliance): series = appliance.version.series() update_url = "update_url_{}".format(series.replace('.', '')) """Requests appliance from sprout based on old_versions, edits partitions and adds repo file for update""" usable = [] sp = SproutClient.from_config() available_versions = set(sp.call_method('available_cfme_versions')) for a in available_versions: if a.startswith(old_version): usable.append(Version(a)) usable.sort(reverse=True) try: apps, pool_id = sp.provision_appliances(count=1, preconfigured=True, lease_time=180, version=str(usable[0])) except Exception as e: logger.exception("Couldn't provision appliance with following error:{}".format(e)) raise SproutException('No provision available') apps[0].db.extend_partition() urls = process_url(cfme_data['basic_info'][update_url]) output = build_file(urls) with tempfile.NamedTemporaryFile('w') as f: f.write(output) f.flush() os.fsync(f.fileno()) apps[0].ssh_client.put_file( f.name, '/etc/yum.repos.d/update.repo') yield apps[0] apps[0].ssh_client.close() sp.destroy_pool(pool_id)
def process_provider_vms(provider_key, matchers, delta, vms_to_delete): with lock: print '%s processing' % provider_key try: now = datetime.datetime.now() provider = provider_factory(provider_key) for vm_name in provider.list_vm(): if not match(matchers, vm_name): continue try: vm_creation_time = provider.vm_creation_time(vm_name) except: logger.error('Failed to get creation/boot time for %s on %s' % ( vm_name, provider_key)) continue if vm_creation_time + delta < now: vm_delta = now - vm_creation_time with lock: vms_to_delete[provider_key].add((vm_name, vm_delta)) with lock: print '%s finished' % provider_key except Exception as ex: with lock: print '%s failed' % provider_key logger.error('failed to process vms from provider %s', provider_key) logger.exception(ex)
def cleanup_templates(api, edomain, days, max_templates): try: templates = api.storagedomains.get(edomain).templates.list() thread_queue = [] delete_templates = [] for template in templates: delta = datetime.timedelta(days=days) now = datetime.datetime.now(pytz.utc) template_creation_time = template.get_creation_time().astimezone(pytz.utc) if template.get_name().startswith('auto-tmp'): if now > (template_creation_time + delta): delete_templates.append(template) if not delete_templates: print("RHEVM: No old templates to delete in {}".format(edomain)) for delete_template in delete_templates[:max_templates]: thread = Thread(target=delete_edomain_templates, args=(api, delete_template, edomain)) thread.daemon = True thread_queue.append(thread) thread.start() for thread in thread_queue: thread.join() except Exception as e: logger.exception(e) return False
def _get_canvas_element(provider): try: canvas = provider.get_remote_console_canvas() except ItemNotFound: logger.exception('Could not find canvas element.') return False return canvas
def rename_vm(self, instance_name, new_name): instance = self._find_instance_by_name(instance_name) try: instance.update(new_name) except Exception as e: logger.exception(e) return instance_name else: return new_name
def stop(self): if self.pid is not None: if process_running(self.pid): os.kill(self.pid, SIGINT) os.waitpid(self.pid, 0) logger.info("Recording finished") self.pid = None else: logger.exception("Could not find recordmydesktop process #%d" % self.pid)
def is_ovirt_engine_running(provider_mgmt): try: stdout = make_ssh_client(provider_mgmt).run_command('service ovirt-engine status')[1] if 'running' not in stdout: return False return True except Exception as e: logger.exception(e) return False
def connect(self): """连接指定IP、端口""" if not self.connected: try: self._sock.connect((self.domain, self.port)) except socket.error as e: logger.exception(e) else: self.connected = 1 logger.debug('TCPClient connect to {0}:{1} success.'.format(self.domain, self.port))
def rename_vm(self, vm_name, new_vm_name): vm = self._get_vm(vm_name) try: vm.set_name(new_vm_name) vm.update() except Exception as e: logger.exception(e) return vm_name else: return new_vm_name
def is_ovirt_engine_running(rhevm_ip, sshname, sshpass): try: ssh_client = make_ssh_client(rhevm_ip, sshname, sshpass) stdout = ssh_client.run_command('service ovirt-engine status')[1] if 'running' not in stdout: return False return True except Exception as e: logger.exception(e) return False
def deploy_template(provider_key, vm_name, template_name=None, timeout=900, **deploy_args): """ Args: provider_key: Provider key on which the VM is to be created vm_name: Name of the VM to be deployed template_name: Name of the template that the VM is deployed from """ allow_skip = deploy_args.pop("allow_skip", ()) if isinstance(allow_skip, dict): skip_exceptions = allow_skip.keys() callable_mapping = allow_skip elif isinstance(allow_skip, basestring) and allow_skip.lower() == "default": skip_exceptions = (OSOverLimit, RHEVRequestError, exceptions.VMInstanceNotCloned, SSLError) callable_mapping = {} else: skip_exceptions = allow_skip callable_mapping = {} provider_crud = get_crud(provider_key) deploy_args.update(vm_name=vm_name) if template_name is None: try: deploy_args.update(template=provider_crud.data['small_template']) except KeyError: raise ValueError('small_template not defined for Provider {} in cfme_data.yaml'.format( provider_key)) else: deploy_args.update(template=template_name) deploy_args.update(provider_crud.deployment_helper(deploy_args)) logger.info("Getting ready to deploy VM/instance %s from template %s on provider %s", vm_name, deploy_args['template'], provider_crud.data['name']) try: try: logger.debug("Deploy args: %s", deploy_args) vm_name = provider_crud.mgmt.deploy_template(timeout=timeout, **deploy_args) logger.info("Provisioned VM/instance %s", vm_name) # instance ID in case of EC2 except Exception as e: logger.error('Could not provisioning VM/instance %s (%s: %s)', vm_name, type(e).__name__, str(e)) _vm_cleanup(provider_crud.mgmt, vm_name) raise except skip_exceptions as e: e_c = type(e) if e_c in callable_mapping and not callable_mapping[e_c](e): raise # Make it visible also in the log. store.write_line( "Skipping due to a provider error: {}: {}\n".format(e_c.__name__, str(e)), purple=True) logger.exception(e) pytest.skip("{}: {}".format(e_c.__name__, str(e))) return vm_name
def inner(): try: self.wharf.checkout() return super(WharfFactory, self).create(url_key) except urllib2.URLError as ex: # connection to selenum was refused for unknown reasons log.error('URLError connecting to selenium; recycling container. URLError:') write_line('URLError caused container recycle, see log for details', red=True) log.exception(ex) self.wharf.checkin() raise
def vm_name(provider, small_template): name = "test_alerts_{}".format(fauxfactory.gen_alpha()) try: name = deploy_template(provider.key, name, template_name=small_template, allow_skip="default") yield name finally: try: if provider.mgmt.does_vm_exist(name): provider.mgmt.delete_vm(name) except Exception as e: logger.exception(e)
def back_up_default_domain(ssh_client): ssh_client.run_command("rm -f /tmp/Default_backup.yaml") rc = ssh_client.run_rake_command( "evm:automate:export DOMAIN=Default YAML_FILE=/tmp/Default_backup.yaml " "PREVIEW=false OVERWRITE=true")[0] yield if rc == 0: rc, stdout = ssh_client.run_rake_command( "evm:automate:import DOMAIN=Default YAML_FILE=/tmp/Default_backup.yaml PREVIEW=false") if rc != 0: logger.exception("Could not re-improt back the Default domain!: `{}`".format(stdout))
def quit(self): # TODO: figure if we want to log the url key here log.info('closing browser') self._consume_cleanups() try: self.factory.close(self.browser) except Exception as e: log.error('An exception happened during browser shutdown:') log.exception(e) finally: self.browser = None
def _is_alive(self): log.debug("alive check") try: self.browser.current_url except UnexpectedAlertPresentException: # We shouldn't think that an Unexpected alert means the browser is dead return True except Exception: log.exception("browser in unknown state, considering dead") return False return True
def is_ovirt_engine_running(provider_mgmt): try: with make_ssh_client(provider_mgmt) as ssh_client: stdout = ssh_client.run_command('systemctl status ovirt-engine')[1] # fallback to sysV commands if necessary if 'command not found' in stdout: stdout = ssh_client.run_command('service ovirt-engine status')[1] return 'running' in stdout except Exception as e: logger.exception(e) return False
def login(user, submit_method=_js_auth_fn): """ Login to CFME with the given username and password. Optionally, submit_method can be press_enter_after_password to use the enter key to login, rather than clicking the button. Args: user: The username to fill in the username field. password: The password to fill in the password field. submit_method: A function to call after the username and password have been input. Raises: RuntimeError: If the login fails, ie. if a flash message appears """ if not user: username = conf.credentials['default']['username'] password = conf.credentials['default']['password'] cred = Credential(principal=username, secret=password) user = User(credential=cred) if not logged_in() or user.credential.principal is not current_username(): if logged_in(): logout() # workaround for strange bug where we are logged out # as soon as we click something on the dashboard sel.sleep(1.0) logger.debug('Logging in as user %s', user.credential.principal) try: fill(form, {'username': user.credential.principal, 'password': user.credential.secret}) except sel.InvalidElementStateException as e: logger.warning("Got an error. Details follow.") msg = str(e).lower() if "element is read-only" in msg: logger.warning("Got a read-only login form, will reload the browser.") # Reload browser quit() ensure_browser_open() sel.sleep(1.0) sel.wait_for_ajax() # And try filling the form again fill(form, {'username': user.credential.principal, 'password': user.credential.secret}) else: logger.warning("Unknown error, reraising.") logger.exception(e) raise with sel.ajax_timeout(90): submit_method() flash.assert_no_errors() user.full_name = _full_name() store.user = user
def handle_alert(cancel=False, wait=30.0, squash=False, prompt=None, check_present=False): """Handles an alert popup. Args: cancel: Whether or not to cancel the alert. Accepts the Alert (False) by default. wait: Time to wait for an alert to appear. Default 30 seconds, can be set to 0 to disable waiting. squash: Whether or not to squash errors during alert handling. Default False prompt: If the alert is a prompt, specify the keys to type in here check_present: Does not squash :py:class:`selenium.common.exceptions.NoAlertPresentException` Returns: True if the alert was handled, False if exceptions were squashed, None if there was no alert. No exceptions will be raised if ``squash`` is True and ``check_present`` is False. Raises: utils.wait.TimedOutError: If the alert popup does not appear selenium.common.exceptions.NoAlertPresentException: If no alert is present when accepting or dismissing the alert. """ # throws timeout exception if not found try: if wait: WebDriverWait(browser(), wait).until(expected_conditions.alert_is_present()) popup = get_alert() answer = 'cancel' if cancel else 'ok' t = "alert" if prompt is None else "prompt" logger.info('Handling %s %s, clicking %s', t, popup.text, answer) if prompt is not None: logger.info("Typing in: %s", prompt) popup.send_keys(prompt) popup.dismiss() if cancel else popup.accept() # Should any problematic "double" alerts appear here, we don't care, just blow'em away. dismiss_any_alerts() wait_for_ajax() return True except NoAlertPresentException: if check_present: raise else: return None except Exception as e: logger.exception(e) if squash: return False else: raise
def clean_jenkins_job(self, jenkins_job): try: log.info( "Check if pool already exists for this %r Jenkins job", jenkins_job[0]) jenkins_job_pools = self.client.find_pools_by_description(jenkins_job[0], partial=True) for pool in jenkins_job_pools: log.info("Destroying the old pool %s for %r job.", pool, jenkins_job[0]) self.client.destroy_pool(pool) except Exception: log.exception( "Exception occurred during old pool deletion, this can be ignored" "proceeding to Request new pool")
def fire(signal): """ Fires the signal, invoking all callbacks in the library for the signal. Args: signal: Name of signal to be invoked. """ logger.info('Invoking callback for signal [%s]', signal) for cb_obj in _callback_library.get(signal, set()): try: cb_obj() except Exception as e: logger.exception(e)
def send(self, data, dtype='str', suffix=''): if dtype == 'json': # dumps是将dict转化成str格式,loads是将str转化成dict格式。dump和load也是类似的功能,只是与文件操作结合起来了。 send_string = json.dumps(data) + suffix else: send_string = data + suffix self.connet() if self.connected: try: self._sock.send(send_string.encode()) logger.debug('TCPClient Send {0}'.format(send_string)) except socket.error as e: logger.exception(e) try: rec = self._sock.recv(self.max_receive).decode() if suffix: rec = rec[:-len(suffix)] logger.debug('TCPClient received {0}'.format(rec)) return rec except socket.error as e: logger.exception(e)
def send(self, data, dtype='str', suffix=''): """向服务器端发送send_string,并返回信息,若报错,则返回None""" if dtype == 'json': send_string = json.dumps(data) + suffix else: send_string = data + suffix self.connect() if self.connected: try: self._sock.send(send_string.encode()) logger.debug('TCPClient Send {0}'.format(send_string)) except socket.error as e: logger.exception(e) try: rec = self._sock.recv(self.max_receive).decode() if suffix: rec = rec[:-len(suffix)] logger.debug('TCPClient received {0}'.format(rec)) return rec except socket.error as e: logger.exception(e)
def pool_manager(func, arg_list): """Create a process pool and join the processes via apply_async Notes: Use Manager.Queue for any queues in the arg_list tuples. BLOCKS by joining # TODO put this into some utility library and handle kwargs, take pool size arg Args: func (method): A function to parallel process arg_list (list): a list of arg tuples Returns: list of the return values from apply_async """ # TODO increase pool size proc_pool = Pool(8) proc_results = [] for arg_tuple in arg_list: proc_results.append(proc_pool.apply_async(func, args=arg_tuple)) proc_pool.close() proc_pool.join() # Check for exceptions since they're captured # Don't care about non-exception results since all non-exception results are in the queues results = [] for proc_result in proc_results: try: result = proc_result.get() except Exception as ex: result = ex finally: if isinstance(result, Exception): logger.exception('Exception during function call %s', func.__name__) results.append(result) return results
def send(self): self.msg['Subject'] = self.title self.msg['From'] = self.sender self.msg['To'] = self.receiver self.msg['Cc'] = self.acc # 附上抄送人员 self.receiver += ';' + self.acc if self.acc else '' # 邮件正文 if self.message: self.msg.attach(MIMEText(self.message)) # 添加附件,支持多个附件(传入list),或者单个附件(传入str) if self.files: if isinstance(self.files, list): for f in self.files: self._attach_file(f) elif isinstance(self.files, str): self._attach_file(self.files) # 连接服务器并发送 try: #smtp_server = smtplib.SMTP(self.server) # 连接sever # 使用授权码的方式 smtp_server = smtplib.SMTP_SSL(self.server, 465) # 连接sever except (gaierror and error) as e: logger.exception('发送邮件失败,无法连接到SMTP服务器,检查网络以及SMTP服务器. %s', e) else: try: smtp_server.login(self.sender, self.password) # 登录 except smtplib.SMTPAuthenticationError as e: logger.exception('用户名密码验证失败!%s', e) else: smtp_server.sendmail(self.sender, self.receiver.split(';'), self.msg.as_string()) # 发送邮件 finally: smtp_server.quit() # 断开连接 logger.info('发送邮件"{0}"成功! 收件人:{1}。如果没有收到邮件,请检查垃圾箱,' '同时检查收件人地址是否正确'.format(self.title, self.receiver))
def send(self): self.msg['Subject'] = self.title self.msg['From'] = self.sender self.msg['To'] = self.receiver # 邮件正文 if self.message: msgAlternative = MIMEMultipart('alternative') self.msg.attach(msgAlternative) msgAlternative.attach(MIMEText(self.message, 'html', 'utf-8')) # self.msg.attach(MIMEText(self.message)) # 添加附件,支持多个附件(传入list),或者单个附件(传入str) if self.files: if isinstance(self.files, list): for f in self.files: self._attach_file(f) elif isinstance(self.files, str): self._attach_file(self.files) # 连接服务器并发送 try: smtp_server = smtplib.SMTP(self.server) # 连接sever except (gaierror and error) as e: logger.exception('发送邮件失败,无法连接到SMTP服务器,检查网络以及SMTP服务器. %s', e) else: try: smtp_server.ehlo() smtp_server.starttls() smtp_server.login(self.sender, self.password) # 登录 except smtplib.SMTPAuthenticationError as e: logger.exception('用户名密码验证失败!%s', e) else: smtp_server.sendmail(self.sender, self.receiver.split(';'), self.msg.as_string()) # 发送邮件 finally: smtp_server.quit() # 断开连接 logger.info('发送邮件"{0}"成功! 收件人:{1}。如果没有收到邮件,请检查垃圾箱,' '同时检查收件人地址是否正确'.format(self.title, self.receiver))
def send(self): self.msg['Subject'] = self.title self.msg['From'] = self.sender self.msg['To'] = self.receiver # 邮件正文 if self.message: self.msg.attach(MIMEText(self.message)) # 添加附件,支持多个附件(传入list)或者单个附件(传入str) if self.files: if isinstance(self.files, list): for f in self.files: self._attach_file(f) elif isinstance(self.files, str): self._attach_file(self.files) # 连接服务器 try: smtp_server = smtplib.SMTP(self.server) except (gaierror and error) as e: logger.exception( 'Send mail failed, cant\'t connect to server,check you network and smtp server' ) else: try: smtp_server.login(self.sender, self.password) # 登录 except smtplib.SMTPAuthenticationError as e: logger.exception('username or password wrong %s', e) else: smtp_server.sendmail(self.sender, self.receiver.split(';'), self.msg.as_string()) finally: smtp_server.quit() logger.info( 'send mail"{0}" success! receiver:{1}.if not receive mail,please check your trash box' ' and check receiver address'.format( self.title, self.receiver))
def upload_qc2_file(ssh_client, image_url, template_name, export, provider): try: command = ['glance'] command.append("--os-image-api-version 1") command.append("image-create") command.append("--copy-from {}".format(image_url)) command.append("--name {}".format(template_name)) command.append("--is-public true") command.append("--container-format bare") command.append("--disk-format qcow2") res_command = ' '.join(command) res = '{} && {}'.format(export, res_command) result = ssh_client.run_command(res) if result.failed: logger.error("RHOS:%r ERROR while uploading qc2 file: %r", provider, result) return False return str(result) except Exception: logger.exception() return False
def ping_pool(self): timeout = None # None - keep the half of the lease time try: self.client.prolong_appliance_pool_lease(self.pool, self.lease_time) except SproutException as e: log.exception( "Pool %s does not exist any more, disabling the timer.\n" "This can happen before the tests are shut down " "(last deleted appliance deleted the pool\n" "> The exception was: %s", self.pool, str(e)) self.pool = None # Will disable the timer in next reset call. except Exception as e: self.log.error( 'An unexpected error happened during interaction with Sprout:') self.log.exception(e) # Have a shorter timer now (1 min), because something is happening right now # WE have a reserve of half the lease time so that should be enough time to # solve any minor problems # Adding a 0-10 extra random sec just for sake of dispersing any possible "swarm" timeout = 60 + random.randint(0, 10) finally: self.reset_timer(timeout=timeout)
def check_proxy(proxy): """ 用于检查指定 代理IP 响应速度,匿名程度,支持协议类型 :param proxy: :return: 检查后的代理IP模型对象 """ #准备代理IP字典 proxies = { "http": f"http://{proxy.ip}:{proxy.port}", "https": f"https://{proxy.ip}:{proxy.port}" } try: # 测试该代理IP http, http_nick_type, http_speed = __check_http_proxies(proxies) https, https_nick_type, https_speed = __check_http_proxies( proxies, False) if http and https: proxy.protocol = 2 #包含http和https proxy.nick_type = http_nick_type proxy.speed = http_speed elif http: proxy.protocol = 0 #包含http proxy.nick_type = http_nick_type proxy.speed = http_speed elif https: proxy.protocol = 1 #包含https proxy.nick_type = https_nick_type proxy.speed = https_speed else: proxy.protocol = -1 proxy.nick_type = -1 proxy.speed = -1 return proxy except Exception as e: logger.exception(e)
def mail_send(self): try: f = open(self.files, 'rb') mail_body = f.read() f.close() mail = MIMEMultipart() mail.attach(MIMEText(mail_body, _subtype='html', _charset='utf-8')) # 构造附件att1,若是要带多个附件,可根据下边的格式构造 att1 = MIMEText(open(self.files, 'rb').read(), 'base64', 'utf-8') att1['Content-Type'] = 'application/octet-stream' att1[ 'Content-Disposition'] = 'attachment;filename="Test_report.html"' mail.attach(att1) mail['From'] = formataddr(['网易邮件', self.sender]) mail['To'] = formataddr(['吕心劲', self.receiver]) mail['subject'] = self.title try: server = smtplib.SMTP_SSL(self.server) # 发件人邮箱中的SMTP服务器,端口是465 except smtplib.SMTPAuthenticationError as e: logger.exception('发送邮件失败,无法连接到SMTP服务器,检查网络以及SMTP服务器. %s', e) else: try: server.login(self.sender, self.password) # 括号中对应的是发件人邮箱账号、邮箱密码 except smtplib.SMTPAuthenticationError as e: logger.exception('用户名密码验证失败!%s', e) else: server.sendmail( self.sender, self.receiver, mail.as_string()) # 括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件18 finally: server.quit() # 这句是关闭连接的意思 logger.info('发送邮件"{0}"成功! 收件人:{1}。如果没有收到邮件,请检查垃圾箱,' '同时检查收件人地址是否正确'.format(self.title, self.receiver)) except Exception as e: print(format(e))
def send(self): self.msg['Subject'] = self.title self.msg['From'] = self.sender self.msg['To'] = self.receiver if self.message: self.msg.attach(MIMEText(self.message)) try: smtp_server = SMTP(self.server) except (gaierror and error) as e: logger.exception('发送邮件失败,无法连接到SMTP服务器,检查网络以及SMTP服务器. %s', e) else: try: smtp_server.login(self.sender, self.password) # 登录 except smtplib.SMTPAuthenticationError as e: logger.exception('用户名密码验证失败!%s', e) else: smtp_server.sendmail(self.sender, self.receiver.split(';'), self.msg.as_string()) # 发送邮件 finally: smtp_server.quit() # 断开连接 logger.info('发送邮件"{0}"成功! 收件人:{1}。如果没有收到邮件,请检查垃圾箱,' '同时检查收件人地址是否正确'.format(self.title, self.receiver))
def send(self): self.msg['Subject'] = self.title self.msg['From'] = self.sender self.msg['To'] = self.receiver # 邮件正文 if self.message: self.msg.attach(MIMEText(self.message)) # 添加附件 支持多个附件 或者 单个附件 if self.files: if isinstance(self.files, list): for f in self.files: self._attach_file(f) elif isinstance(self.files, str): self._attach_file(self.files) # 连接服务器并发送 try: smtp_server = smtplib.SMTP(self.server) # 连接server except (gaierror and error) as e: logger.exception('发送邮件失败, 无法连接到SMTP服务器,检查网络以及SMTP服务器. %s', e) else: # 没有异常时执行 try: smtp_server.login(self.sender, self.password) # 登录服务器 except smtplib.SMTPAuthenticationError as e: logger.exception("用户名密码验证失败! %s", e) else: smtp_server.sendmail(self.sender, self.receiver.split(';'), self.msg.as_string()) finally: smtp_server.quit() # 断开连接 logger.info('发送邮件"{0}"成功! 收件人:{1}。如果没有收到邮件,请检查垃圾箱,' '同时检查收件人地址是否正确'.format(self.title, self.receiver))
def send(self): self.msg['Subject'] = self.title self.msg['From'] = self.sender self.msg['To'] = self.receiver #邮件正文 if self.message: self.msg.attach(MIMEText(self.message)) #添加附件,支持多个附件(采用传入list),或者单个附件(传入str) if self.files: if isinstance(self.files, list): for f in self.files: self.attach_file(f) elif isinstance(self.files, str): self.attach_file(self.files) #连接服务器并发送 try: SMTP_server = smtplib.SMTP() #连接server SMTP_server.connect('smtp.qq.com') except (gaierror and error) as e: logger.exception('发送邮件失败,无法连接到SMTP服务器,检查网络以及SMTP服务器:%s', e) else: try: SMTP_server.login(self.sender, self.password) #登陆qq邮箱 except smtplib.SMTPAuthenticationError as e: logger.exception('用户名密码验证失败 %s', e) else: SMTP_server.sendmail(self.sender, self.receiver.split(';'), self.msg.as_string()) #发送账号、接收账号和邮件信息 finally: SMTP_server.quit() #断开连接 logger.info('发送邮件"{0}"成功,收件人:{1},如果没有收到邮件,' '请检查垃圾箱,同时检查收件人地址是否正确'.format( self.title, self.receiver))
def clean_jenkins_job(self, jenkins_job): try: log.info("Check if pool already exists for this %r Jenkins job", jenkins_job[0]) jenkins_job_pools = self.client.find_pools_by_description( jenkins_job[0], partial=True) for pool in jenkins_job_pools: # Some jobs have overlapping descriptions, sprout API doesn't support regex # job-name-12345 vs job-name-master-12345 # the partial match alone will catch both of these, use regex to confirm pool # description is an accurate match if self.client.get_pool_description(pool) == '{}{}'.format( jenkins_job[0], pool): log.info("Destroying the old pool %s for %r job.", pool, jenkins_job[0]) self.client.destroy_pool(pool) else: log.info( 'Skipped pool destroy due to potential pool description overlap: %r', jenkins_job[0]) except Exception: log.exception( "Exception occurred during old pool deletion, this can be ignored" "proceeding to Request new pool")
def delete_edomain_templates(api, template, edomain): """deletes the template on edomain. Args: api: API for RHEVM. name: template_name edomain: Export domain of selected RHEVM provider. """ with lock: creation_time = template.get_creation_time().strftime("%d %B-%Y") name = template.get_name() print('Deleting {} created on {} ...'.format(name, creation_time)) try: template.delete() print('waiting for {} to be deleted..'.format(name)) wait_for(is_edomain_template_deleted, [api, name, edomain], fail_condition=False, delay=5) print("RHEVM: successfully deleted the template {}".format(name)) except Exception as e: with lock: print("RHEVM: Exception occurred while deleting the template {}". format(name)) logger.exception(e)
def delete_provider_vms(provider_key, vm_names): with lock: print('Deleting VMs from {} ...'.format(provider_key)) try: with lock: provider = get_mgmt(provider_key) except Exception as e: with lock: print("Could not retrieve the provider {}'s mgmt system ({}: {})".format( provider_key, type(e).__name__, str(e))) logger.exception(e) for vm_name in vm_names: with lock: print("Deleting {} from {}".format(vm_name, provider_key)) try: provider.delete_vm(vm_name) except Exception as e: with lock: print('Failed to delete {} on {}'.format(vm_name, provider_key)) logger.exception(e) with lock: print("{} is done!".format(provider_key))
def main(**kwargs): # get_mgmt validates, since it will explode without an existing key or type if kwargs.get('deploy', None): kwargs['configure'] = True kwargs['outfile'] = 'appliance_ip_address_1' provider_data = utils.conf.provider_data providers = provider_data['management_systems'] provider_dict = provider_data['management_systems'][kwargs['provider']] credentials =\ {'username': provider_dict['username'], 'password': provider_dict['password'], 'tenant': provider_dict['template_upload'].get('tenant_admin', 'admin'), 'auth_url': provider_dict.get('auth_url', None), } provider = get_mgmt(kwargs['provider'], providers=providers, credentials=credentials) flavors = provider_dict['template_upload'].get('flavors', ['m1.medium']) provider_type = provider_data['management_systems'][kwargs['provider']]['type'] deploy_args = { 'vm_name': kwargs['vm_name'], 'template': kwargs['template'], } else: provider = get_mgmt(kwargs['provider']) provider_dict = cfme_data['management_systems'][kwargs['provider']] provider_type = provider_dict['type'] flavors = cfme_data['appliance_provisioning']['default_flavors'].get(provider_type, []) deploy_args = { 'vm_name': kwargs['vm_name'], 'template': kwargs['template'], } logger.info('Connecting to {}'.format(kwargs['provider'])) if kwargs.get('destroy', None): # TODO: destroy should be its own script # but it's easy enough to just hijack the parser here # This returns True if destroy fails to give POSIXy exit codes (0 is good, False is 0, etc) return not destroy_vm(provider, deploy_args['vm_name']) # Try to snag defaults from cfme_data here for each provider type if provider_type == 'rhevm': cluster = provider_dict.get('default_cluster', kwargs.get('cluster', None)) if cluster is None: raise Exception('--cluster is required for rhev instances and default is not set') deploy_args['cluster'] = cluster if kwargs.get('place_policy_host', None) and kwargs.get('place_policy_aff', None): deploy_args['placement_policy_host'] = kwargs['place_policy_host'] deploy_args['placement_policy_affinity'] = kwargs['place_policy_aff'] elif provider_type == 'ec2': # ec2 doesn't have an api to list available flavors, so the first flavor is the default try: flavor = kwargs.get('flavor', None) or flavors[0] except IndexError: raise Exception('--flavor is required for EC2 instances and default is not set') deploy_args['instance_type'] = flavor elif provider_type == 'openstack': # filter openstack flavors based on what's available available_flavors = provider.list_flavor() flavors = filter(lambda f: f in available_flavors, flavors) try: flavor = kwargs.get('flavor', None) or flavors[0] except IndexError: raise Exception('--flavor is required for RHOS instances and ' 'default is not set or unavailable on provider') # flavour? Thanks, psav... deploy_args['flavour_name'] = flavor if 'network' in provider_dict: # support rhos4 network names deploy_args['network_name'] = provider_dict['network'] provider_pools = [p.name for p in provider.api.floating_ip_pools.list()] try: # TODO: If there are multiple pools, have a provider default in cfme_data floating_ip_pool = kwargs.get('floating_ip_pool', None) or provider_pools[0] except IndexError: raise Exception('No floating IP pools available on provider') if floating_ip_pool is not None: deploy_args['floating_ip_pool'] = floating_ip_pool elif provider_type == "virtualcenter": if "allowed_datastores" in provider_dict: deploy_args["allowed_datastores"] = provider_dict["allowed_datastores"] elif provider_type == 'scvmm': deploy_args["host_group"] = provider_dict["provisioning"]['host_group'] elif provider_type == 'gce': deploy_args['ssh_key'] = '{user_name}:{public_key}'.format( user_name=cred['ssh']['ssh-user'], public_key=cred['ssh']['public_key']) # Do it! try: logger.info('Cloning {} to {} on {}'.format(deploy_args['template'], deploy_args['vm_name'], kwargs['provider'])) provider.deploy_template(**deploy_args) except Exception as e: logger.exception(e) logger.error('Clone failed') if kwargs.get('cleanup', None): logger.info('attempting to destroy {}'.format(deploy_args['vm_name'])) destroy_vm(provider, deploy_args['vm_name']) return 12 if provider.is_vm_running(deploy_args['vm_name']): logger.info("VM {} is running".format(deploy_args['vm_name'])) else: logger.error("VM is not running") return 10 try: ip, time_taken = wait_for(provider.get_ip_address, [deploy_args['vm_name']], num_sec=1200, fail_condition=None) logger.info('IP Address returned is {}'.format(ip)) except Exception as e: logger.exception(e) logger.error('IP address not returned') return 10 try: if kwargs.get('configure', None): logger.info('Configuring appliance, this can take a while.') if kwargs.get('deploy', None): app = IPAppliance(address=ip) else: app = Appliance(kwargs['provider'], deploy_args['vm_name']) if provider_type == 'gce': with app as ipapp: ipapp.configure_gce() else: app.configure() logger.info('Successfully Configured the appliance.') except Exception as e: logger.exception(e) logger.error('Appliance Configuration Failed') if not kwargs.get('deploy', None): app = Appliance(kwargs['provider'], deploy_args['vm_name']) ssh_client = app.ssh_client() status, output = ssh_client.run_command('find /root/anaconda-post.log') if status == 0: ssh_client.get_file('/root/anaconda-post.log', log_path.join('anaconda-post.log').strpath) ssh_client.close() return 10 if kwargs.get('outfile', None) or kwargs.get('deploy', None): with open(kwargs['outfile'], 'w') as outfile: outfile.write("appliance_ip_address={}\n".format(ip)) # In addition to the outfile, drop the ip address on stdout for easy parsing print(ip)
def process_provider_vms(provider_key, matchers, delta, vms_to_delete): """ Process the VMs on a given provider, comparing name and creation time. Append vms meeting criteria to vms_to_delete :param provider_key: string provider key :param matchers: matching strings :param delta: time delta :param vms_to_delete: the list of vms that should be deleted :return: modifies vms_to_delete """ with lock: print('{} processing'.format(provider_key)) try: now = datetime.datetime.now() with lock: # Known conf issue :) provider = get_mgmt(provider_key) vm_list = provider.list_vm() for vm_name in vm_list: try: if not match(matchers, vm_name): continue if (isinstance(provider, VMWareSystem) and provider.vm_status(vm_name) == 'poweredOff'): hostname = provider.get_vm_host_name(vm_name) vm_config_datastore = provider.get_vm_config_files_path( vm_name) datastore_url = provider.get_vm_datastore_path( vm_name, vm_config_datastore) vm_creation_time = get_vm_config_modified_time( hostname, vm_name, datastore_url, provider_key) else: vm_creation_time = provider.vm_creation_time(vm_name) if vm_creation_time is False: # This VM must have some problem, include in report even though we can't delete status = provider.vm_status(vm_name) deleted_vms_list.append([ provider_key, vm_name, NULL, # can't know age, failed getting creation date status, FAIL ]) raise Exception # the except block message is accurate in this case if vm_creation_time + delta < now: vm_delta = now - vm_creation_time with lock: vms_to_delete[provider_key].add((vm_name, vm_delta)) except Exception as e: logger.error(e) logger.error( 'Failed to get creation/boot time for {} on {}'.format( vm_name, provider_key)) continue with lock: print('{} finished processing for matches'.format(provider_key)) except Exception as ex: with lock: # Print out the error message too because logs in the job get deleted print('{} failed ({}: {})'.format(provider_key, type(ex).__name__, str(ex))) logger.error( 'failed to process vms from provider {}'.format(provider_key)) logger.exception(ex)
def start(webdriver_name=None, base_url=None, **kwargs): """Starts a new web browser If a previous browser was open, it will be closed before starting the new browser Args: webdriver_name: The name of the selenium Webdriver to use. Default: 'Firefox' base_url: Optional, will use ``utils.conf.env['base_url']`` by default **kwargs: Any additional keyword arguments will be passed to the webdriver constructor """ # Try to clean up an existing browser session if starting a new one if thread_locals.browser is not None: quit() browser_conf = conf.env.get('browser', {}) if webdriver_name is None: # If unset, look to the config for the webdriver type # defaults to Firefox webdriver_name = browser_conf.get('webdriver', 'Firefox') webdriver_class = getattr(webdriver, webdriver_name) if base_url is None: base_url = store.base_url # Pull in browser kwargs from browser yaml browser_kwargs = browser_conf.get('webdriver_options', {}) # Handle firefox profile for Firefox or Remote webdriver if webdriver_name == 'Firefox': browser_kwargs['firefox_profile'] = _load_firefox_profile() elif (webdriver_name == 'Remote' and browser_kwargs['desired_capabilities']['browserName'] == 'firefox'): browser_kwargs['browser_profile'] = _load_firefox_profile() # Update it with passed-in options/overrides browser_kwargs.update(kwargs) if webdriver_name != 'Remote' and 'desired_capabilities' in browser_kwargs: # desired_capabilities is only for Remote driver, but can sneak in del (browser_kwargs['desired_capabilities']) if webdriver_name == 'Remote' and 'webdriver_wharf' in browser_conf and not thread_locals.wharf: # Configured to use wharf, but it isn't configured yet; check out a webdriver container wharf = Wharf(browser_conf['webdriver_wharf']) # TODO: Error handling! :D wharf.checkout() atexit.register(wharf.checkin) thread_locals.wharf = wharf if browser_kwargs['desired_capabilities']['browserName'] == 'chrome': # chrome uses containers to sandbox the browser, and we use containers to # run chrome in wharf, so disable the sandbox if running chrome in wharf co = browser_kwargs['desired_capabilities'].get( 'chromeOptions', {}) arg = '--no-sandbox' if 'args' not in co: co['args'] = [arg] elif arg not in co['args']: co['args'].append(arg) browser_kwargs['desired_capabilities']['chromeOptions'] = co if thread_locals.wharf: # Wharf is configured, make sure to use its command_executor wharf_config = thread_locals.wharf.config browser_kwargs['command_executor'] = wharf_config['webdriver_url'] view_msg = 'tests can be viewed via vnc on display {}'.format( wharf_config['vnc_display']) logger.info('webdriver command executor set to %s', wharf_config['webdriver_url']) logger.info(view_msg) write_line(view_msg, cyan=True) try: browser = tries(3, WebDriverException, webdriver_class, **browser_kwargs) browser.file_detector = UselessFileDetector() browser.maximize_window() browser.get(base_url) thread_locals.browser = browser except urllib2.URLError as ex: # connection to selenium was refused for unknown reasons if thread_locals.wharf: # If we're running wharf, try again with a new container logger.error( 'URLError connecting to selenium; recycling container. URLError:' ) # Plus, since this is a really weird thing that we need to figure out, # throw a message out to the terminal for visibility write_line( 'URLError caused container recycle, see log for details', red=True) logger.exception(ex) thread_locals.wharf.checkin() thread_locals.wharf = None start(webdriver_name, base_url, **kwargs) else: # If we aren't running wharf, raise it raise return thread_locals.browser
def upload_template(client, hostname, username, password, provider, url, name, provider_data, stream): try: if provider_data: kwargs = make_kwargs_vsphere(provider_data, provider) else: kwargs = make_kwargs_vsphere(cfme_data, provider) kwargs['ovf_tool_username'] = credentials['host_default']['username'] kwargs['ovf_tool_password'] = credentials['host_default']['password'] if name is None: name = cfme_data['basic_info']['appliance_template'] logger.info("VSPHERE:%r Start uploading Template: %r", provider, name) if not check_kwargs(**kwargs): return False if name in client.list_template(): if kwargs.get('upload'): # Wrapper for ovftool - sometimes it just won't work for i in range(0, NUM_OF_TRIES_OVFTOOL): logger.info("VSPHERE:%r ovftool try #%r", provider, i) upload_result = upload_ova(hostname, username, password, name, kwargs.get('datastore'), kwargs.get('cluster'), kwargs.get('datacenter'), url, provider, kwargs.get('proxy'), kwargs.get('ovf_tool_client'), kwargs['ovf_tool_username'], kwargs['ovf_tool_password']) if upload_result: break else: logger.error( "VSPHERE:%r Ovftool failed upload after multiple tries", provider) return if kwargs.get('disk'): if not add_disk(client, name, provider): logger.error( '"VSPHERE:%r FAILED adding disk to VM, exiting', provider) return False if kwargs.get('template'): try: client.mark_as_template(vm_name=name) logger.info("VSPHERE:%r Successfully templatized machine", provider) except Exception: logger.exception("VSPHERE:%r FAILED to templatize machine", provider) return False if not provider_data: logger.info("VSPHERE:%r Adding template %r to trackerbot", provider, name) trackerbot.trackerbot_add_provider_template( stream, provider, name) else: logger.info("VSPHERE:%r template %r already exists", provider, name) if provider_data and name in client.list_template(): logger.info( "VSPHERE:%r Template and provider_data exist, Deploy %r", provider, name) vm_name = 'test_{}_{}'.format(name, fauxfactory.gen_alphanumeric(8)) deploy_args = { 'provider': provider, 'vm_name': vm_name, 'template': name, 'deploy': True } getattr(__import__('clone_template'), "main")(**deploy_args) except Exception: logger.exception('VSPHERE:%r Exception during upload_template', provider) return False finally: logger.info("VSPHERE:%r End uploading Template: %r", provider, name)
def run_command( self, command, timeout=RUNCMD_TIMEOUT, reraise=False, ensure_host=False, ensure_user=False): """Run a command over SSH. Args: command: The command. Supports taking dicts as version picking. timeout: Timeout after which the command execution fails. reraise: Does not muffle the paramiko exceptions in the log. ensure_host: Ensure that the command is run on the machine with the IP given, not any container or such that we might be using by default. ensure_user: Ensure that the command is run as the user we logged in, so in case we are not root, setting this to True will prevent from running sudo. Returns: A :py:class:`SSHResult` instance. """ if isinstance(command, dict): command = version.pick(command) original_command = command uses_sudo = False logger.info("Running command %r", command) if self.is_pod and not ensure_host: # This command will be executed in the context of the host provider command = 'oc rsh {} bash -c {}'.format(self._container, quote( 'source /etc/default/evm; ' + command)) ensure_host = True elif self.is_container and not ensure_host: command = 'docker exec {} bash -c {}'.format(self._container, quote( 'source /etc/default/evm; ' + command)) if self.username != 'root' and not ensure_user: # We need sudo command = 'sudo -i bash -c {command}'.format(command=quote(command)) uses_sudo = True if command != original_command: logger.info("> Actually running command %r", command) command += '\n' output = [] try: session = self.get_transport().open_session() if uses_sudo: # We need a pseudo-tty for sudo session.get_pty() if timeout: session.settimeout(float(timeout)) session.exec_command(command) stdout = session.makefile() stderr = session.makefile_stderr() while True: if session.recv_ready: for line in stdout: output.append(line) if self._streaming: self.f_stdout.write(line) if session.recv_stderr_ready: for line in stderr: output.append(line) if self._streaming: self.f_stderr.write(line) if session.exit_status_ready(): break exit_status = session.recv_exit_status() return SSHResult(exit_status, ''.join(output)) except paramiko.SSHException: if reraise: raise else: logger.exception('Exception happened during SSH call') except socket.timeout: logger.exception( "Command %r timed out. Output before it failed was:\n%r", command, ''.join(output)) raise # Returning two things so tuple unpacking the return works even if the ssh client fails # Return whatever we have in the output return SSHResult(1, ''.join(output))
def force_navigate(page_name, _tries=0, *args, **kwargs): """force_navigate(page_name) Given a page name, attempt to navigate to that page no matter what breaks. Args: page_name: Name a page from the current :py:data:`ui_navigate.nav_tree` tree to navigate to. """ if _tries > 2: # Need at least three tries: # 1: login_admin handles an alert or CannotContinueWithNavigation appears. # 2: Everything should work. If not, NavigationError. raise exceptions.NavigationError(page_name) _tries += 1 logger.debug('force_navigate to %s, try %d' % (page_name, _tries)) # circular import prevention: cfme.login uses functions in this module from cfme import login # Import the top-level nav menus for convenience from cfme.web_ui import menu # browser fixture should do this, but it's needed for subsequent calls ensure_browser_open() # Clear any running "spinnies" try: execute_script('miqSparkleOff();') except: # miqSparkleOff undefined, so it's definitely off. pass # Set this to True in the handlers below to trigger a browser restart recycle = False # remember the current user, if any current_user = login.current_user() try: # What we'd like to happen... if not current_user: # default to admin user login.login_admin() else: # we recycled and want to log back in login.login(current_user.username, current_user.password) logger.info('Navigating to %s' % page_name) menu.nav.go_to(page_name, *args, **kwargs) except (KeyboardInterrupt, ValueError): # KeyboardInterrupt: Don't block this while navigating # ValueError: ui_navigate.go_to can't handle this page, give up raise except UnexpectedAlertPresentException: if _tries == 1: # There was an alert, accept it and try again handle_alert(wait=0) force_navigate(page_name, _tries, *args, **kwargs) else: # There was still an alert when we tried again, shoot the browser in the head logger.debug('Unxpected alert, recycling browser') recycle = True except (ErrorInResponseException, InvalidSwitchToTargetException): # Unable to switch to the browser at all, need to recycle logger.info('Invalid browser state, recycling browser') recycle = True except exceptions.CannotContinueWithNavigation as e: # The some of the navigation steps cannot succeed logger.info( 'Cannot continue with navigation due to: %s; Recycling browser' % str(e)) recycle = True except (NoSuchElementException, InvalidElementStateException, WebDriverException): from cfme.web_ui import cfme_exception as cfme_exc # To prevent circular imports # If the page is blocked, then recycle... if is_displayed("//div[@id='blocker_div']"): logger.warning("Page was blocked with blocker div, recycling.") recycle = True elif cfme_exc.is_cfme_exception(): logger.exception( "CFME Exception before force_navigate started!: `{}`".format( cfme_exc.cfme_exception_text())) recycle = True elif is_displayed("//body/div[@class='dialog' and ./h1 and ./p]"): # Rails exception detection logger.exception( "Rails exception before force_navigate started!: {}:{} at {}". format( text("//body/div[@class='dialog']/h1").encode("utf-8"), text("//body/div[@class='dialog']/p").encode("utf-8"), current_url())) recycle = True elif elements("//ul[@id='maintab']/li[@class='inactive']") and not\ elements("//ul[@id='maintab']/li[@class='active']/ul/li"): # If upstream and is the bottom part of menu is not displayed logger.exception("Detected glitch from BZ#1112574. HEADSHOT!") recycle = True else: logger.error( "Could not determine the reason for failing the navigation. Reraising." ) raise if recycle: browser().quit( ) # login.current_user() will be retained for next login logger.debug('browser killed on try %d' % _tries) # If given a "start" nav destination, it won't be valid after quitting the browser kwargs.pop("start", None) force_navigate(page_name, _tries, *args, **kwargs)
def run(**kwargs): for provider in list_provider_keys("scvmm"): kwargs = make_kwargs_scvmm(cfme_data, provider, kwargs.get('image_url'), kwargs.get('template_name')) check_kwargs(**kwargs) mgmt_sys = cfme_data['management_systems'][provider] host_fqdn = mgmt_sys['hostname_fqdn'] creds = credentials[mgmt_sys['credentials']] # For powershell to work, we need to extract the User Name from the Domain user = creds['username'].split('\\') if len(user) == 2: username_powershell = user[1] else: username_powershell = user[0] username_scvmm = creds['domain'] + "\\" + creds['username'] scvmm_args = { "hostname": mgmt_sys['ipaddress'], "username": username_powershell, "password": creds['password'], "domain": creds['domain'], "provisioning": mgmt_sys['provisioning'] } client = SCVMMSystem(**scvmm_args) url = kwargs.get('image_url') # Template name equals either user input of we extract the name from the url new_template_name = kwargs.get('template_name') if new_template_name is None: new_template_name = os.path.basename(url)[:-4] logger.info("SCVMM:{} Make Template out of the VHD {}", provider, new_template_name) # use_library is either user input or we use the cfme_data value library = kwargs.get('library', mgmt_sys['template_upload'].get('vhds', None)) logger.info("SCVMM:{} Template Library: {}", provider, library) # The VHD name changed, match the template_name. new_vhd_name = new_template_name + '.vhd' network = mgmt_sys['template_upload'].get('network', None) os_type = mgmt_sys['template_upload'].get('os_type', None) cores = mgmt_sys['template_upload'].get('cores', None) ram = mgmt_sys['template_upload'].get('ram', None) # Uses PowerShell Get-SCVMTemplate to return a list of templates and aborts if exists. if not client.does_template_exist(new_template_name): if kwargs.get('upload'): logger.info("SCVMM:{} Uploading VHD image to Library VHD folder.", provider) upload_vhd(client, url, library, new_vhd_name) if kwargs.get('template'): logger.info("SCVMM:{} Make Template out of the VHD {}", provider, new_template_name) make_template( client, host_fqdn, new_template_name, library, network, os_type, username_scvmm, cores, ram ) try: wait_for(lambda: client.does_template_exist(new_template_name), fail_condition=False, delay=5) logger.info("SCVMM:{} template {} uploaded success", provider, new_template_name) logger.info("SCVMM:{} Add template {} to trackerbot", provider, new_template_name) trackerbot.trackerbot_add_provider_template(kwargs.get('stream'), provider, kwargs.get('template_name')) except Exception: logger.exception("SCVMM:{} Exception verifying the template {}", provider, new_template_name) else: logger.info("SCVMM: A Template with that name already exists in the SCVMMLibrary")
def vm(request, provider, local_setup_provider, small_template, vm_name): if provider.type == "rhevm": kwargs = {"cluster": provider.data["default_cluster"]} elif provider.type == "virtualcenter": kwargs = {} elif provider.type == "openstack": kwargs = {} if 'small_template_flavour' in provider.data: kwargs = { "flavour_name": provider.data.get('small_template_flavour') } elif provider.type == "scvmm": kwargs = { "host_group": provider.data.get("provisioning", {}).get("host_group", "All Hosts") } else: kwargs = {} try: deploy_template(provider.key, vm_name, template_name=small_template, allow_skip="default", power_on=True, **kwargs) except TimedOutError as e: logger.exception(e) try: provider.mgmt.delete_vm(vm_name) except TimedOutError: logger.warning("Could not delete VM {}!".format(vm_name)) finally: # If this happened, we should skip all tests from this provider in this module pytest.skip( "{} is quite likely overloaded! Check its status!\n{}: {}". format(provider.key, type(e).__name__, str(e))) @request.addfinalizer def _finalize(): """if getting REST object failed, we would not get the VM deleted! So explicit teardown.""" logger.info("Shutting down VM with name {}".format(vm_name)) if provider.mgmt.is_vm_suspended(vm_name): logger.info( "Powering up VM {} to shut it down correctly.".format(vm_name)) provider.mgmt.start_vm(vm_name) if provider.mgmt.is_vm_running(vm_name): logger.info("Powering off VM {}".format(vm_name)) provider.mgmt.stop_vm(vm_name) if provider.mgmt.does_vm_exist(vm_name): logger.info("Deleting VM {} in {}".format( vm_name, provider.mgmt.__class__.__name__)) provider.mgmt.delete_vm(vm_name) # Make it appear in the provider provider.refresh_provider_relationships() # Get the REST API object api = wait_for( lambda: get_vm_object(vm_name), message="VM object {} appears in CFME".format(vm_name), fail_condition=None, num_sec=600, delay=15, )[0] return VMWrapper(provider, vm_name, api)
def _error_callback(self, exception): """异常回调函数""" try: raise exception # 抛出异常后,才能被日志进行完整记录下来 except Exception as e: logger.exception(e)
def main(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) # required options (heh) parser.add_argument('--provider', help='provider key in cfme_data') parser.add_argument('--template', help='the name of the template to clone') parser.add_argument('--vm_name', help='the name of the VM to create') # generic options parser.add_argument('--destroy', dest='destroy', action='store_true', help='Destroy the destination VM') parser.add_argument('--configure', default=False, action='store_true', help='configure the VM after provisioning') parser.add_argument('--no-cleanup', default=True, action='store_false', dest='cleanup', help="don't clean up the vm on clone failure") parser.add_argument('--log', dest='loglevel', default='WARNING', help='Set the log level') parser.add_argument('--outfile', dest='outfile', help='Write provisioning details to the named file', default='') # sub options organized for provider types rhev_parser = parser.add_argument_group('rhev') rhev_parser.add_argument('--cluster', default=None, help='the name of the VM on which to act') rhev_parser.add_argument('--place_policy_host', default=None, help='the host for the vm to start on') rhev_parser.add_argument('--place_policy_aff', default=None, help='the affinity of the vm on a host') cloud_parser = parser.add_argument_group('cloud') cloud_parser.add_argument('--flavor', default=None, help='ec2/rhos flavor') openstack_parser = parser.add_argument_group('openstack') openstack_parser.add_argument('--floating-ip-pool', default=None, help='openstack floating ip pool to use') args = parser.parse_args() # get_mgmt validates, since it will explode without an existing key or type provider = get_mgmt(args.provider) provider_dict = cfme_data['management_systems'][args.provider] provider_type = provider_dict['type'] # Used by the cloud provs flavors = cfme_data['appliance_provisioning']['default_flavors'].get( provider_type, []) logger.info('Connecting to {}'.format(args.provider)) if args.destroy: # TODO: destroy should be its own script # but it's easy enough to just hijack the parser here # This returns True if destroy fails to give POSIXy exit codes (0 is good, False is 0, etc) return not destroy_vm(provider, args.vm_name) deploy_args = { 'vm_name': args.vm_name, 'template': args.template, } # Try to snag defaults from cfme_data here for each provider type if provider_type == 'rhevm': cluster = provider_dict.get('default_cluster', args.cluster) if cluster is None: raise Exception( '--cluster is required for rhev instances and default is not set' ) deploy_args['cluster'] = cluster if args.place_policy_host and args.place_policy_aff: deploy_args['placement_policy_host'] = args.place_policy_host deploy_args[ 'placement_policy_affinity'] = args.rhev_place_policy_aff elif provider_type == 'ec2': # ec2 doesn't have an api to list available flavors, so the first flavor is the default try: flavor = args.flavor or flavors[0] except IndexError: raise Exception( '--flavor is required for EC2 instances and default is not set' ) deploy_args['instance_type'] = flavor elif provider_type == 'openstack': # filter openstack flavors based on what's available available_flavors = provider.list_flavor() flavors = filter(lambda f: f in available_flavors, flavors) try: flavor = args.flavor or flavors[0] except IndexError: raise Exception('--flavor is required for RHOS instances and ' 'default is not set or unavailable on provider') # flavour? Thanks, psav... deploy_args['flavour_name'] = flavor if 'network' in provider_dict: # support rhos4 network names deploy_args['network_name'] = provider_dict['network'] provider_pools = [ p.name for p in provider.api.floating_ip_pools.list() ] try: # TODO: If there are multiple pools, have a provider default in cfme_data floating_ip_pool = args.floating_ip_pool or provider_pools[0] except IndexError: raise Exception('No floating IP pools available on provider') if floating_ip_pool is not None: deploy_args['floating_ip_pool'] = floating_ip_pool elif provider_type == "virtualcenter": if "allowed_datastores" in provider_dict: deploy_args["allowed_datastores"] = provider_dict[ "allowed_datastores"] elif provider_type == 'scvmm': deploy_args["host_group"] = provider_dict["provisioning"]['host_group'] # Do it! try: logger.info('Cloning {} to {} on {}'.format(args.template, args.vm_name, args.provider)) provider.deploy_template(**deploy_args) except Exception as e: logger.exception(e) logger.error('Clone failed') if args.cleanup: logger.info('attempting to destroy {}'.format(args.vm_name)) destroy_vm(provider, args.vm_name) return 12 if provider.is_vm_running(args.vm_name): logger.info("VM {} is running".format(args.vm_name)) else: logger.error("VM is not running") return 10 ip, time_taken = wait_for(provider.get_ip_address, [args.vm_name], num_sec=1200, fail_condition=None) logger.info('IP Address returned is {}'.format(ip)) if args.configure: logger.info('Configuring appliance, this can take a while.') app = Appliance(args.provider, args.vm_name) app.configure() if args.outfile: with open(args.outfile, 'w') as outfile: outfile.write("appliance_ip_address={}\n".format(ip)) # In addition to the outfile, drop the ip address on stdout for easy parsing print(ip)
def login(user, submit_method=_js_auth_fn): """ Login to CFME with the given username and password. Optionally, submit_method can be press_enter_after_password to use the enter key to login, rather than clicking the button. Args: user: The username to fill in the username field. password: The password to fill in the password field. submit_method: A function to call after the username and password have been input. Raises: RuntimeError: If the login fails, ie. if a flash message appears """ if not user: username = conf.credentials['default']['username'] password = conf.credentials['default']['password'] cred = Credential(principal=username, secret=password) user = User(credential=cred) if not logged_in() or user.credential.principal is not current_username(): if logged_in(): logout() # workaround for strange bug where we are logged out # as soon as we click something on the dashboard sel.sleep(1.0) logger.debug('Logging in as user %s', user.credential.principal) try: fill( form, { 'username': user.credential.principal, 'password': user.credential.secret }) except sel.InvalidElementStateException as e: logger.warning("Got an error. Details follow.") msg = str(e).lower() if "element is read-only" in msg: logger.warning( "Got a read-only login form, will reload the browser.") # Reload browser quit() ensure_browser_open() sel.sleep(1.0) sel.wait_for_ajax() # And try filling the form again fill( form, { 'username': user.credential.principal, 'password': user.credential.secret }) else: logger.warning("Unknown error, reraising.") logger.exception(e) raise with sel.ajax_timeout(90): submit_method() flash.assert_no_errors() user.full_name = _full_name() store.user = user
@property def paramsMongo(self): return self.mongo[MONGO_DB_NAME][MONGO_PARAM_SET_NAME] @property def sessionMongo(self): return self.mongo[MONGO_DB_NAME][MONGO_SESSION_SET_NAME] @property def fpMongo(self): return self.mongo[MONGO_DB_NAME][MONGO_FP_SET_NAME] def initExtimeIndex(self): """初始化数据库:检查/创建控制过期时间的索引""" isindex = False # 判断集合以及过期时间索引是否存在 for index in self.sessionMongo.list_indexes(): if 'session_createtime' in index: isindex = True if not isindex: # 如果不存在 : 创建session集合的过期时间索引 self.sessionMongo.create_index([('session_createtime', 1)], expireAfterSeconds=SESSION_EXTIME) logger.info('session_set过期时间设置成功, 过期时间:{}'.format(SESSION_EXTIME)) try: singleMongo = SingleMongo() except Exception as e: logger.exception(e) raise Exception('<进程终止> redis ERROR')
def check_for_badness(self, fn, _tries, nav_args, *args, **kwargs): if getattr(fn, '_can_skip_badness_test', False): # self.log_message('Op is a Nop! ({})'.format(fn.__name__)) return if self.VIEW: self.view.flush_widget_cache() go_kwargs = kwargs.copy() go_kwargs.update(nav_args) self.appliance.browser.open_browser() self.appliance.browser.widgetastic.dismiss_any_alerts() # check for MiqQE javascript patch on first try and patch the appliance if necessary if self.appliance.is_miqqe_patch_candidate and not self.appliance.miqqe_patch_applied: self.appliance.patch_with_miqqe() self.appliance.browser.quit_browser() _tries -= 1 self.go(_tries, *args, **go_kwargs) br = self.appliance.browser try: br.widgetastic.execute_script('miqSparkleOff();', silent=True) except: # Diaper OK (mfalesni) # miqSparkleOff undefined, so it's definitely off. pass # Check if the page is blocked with blocker_div. If yes, let's headshot the browser right # here if ( br.widgetastic.is_displayed("//div[@id='blocker_div' or @id='notification']") or br.widgetastic.is_displayed(".modal-backdrop.fade.in")): logger.warning("Page was blocked with blocker div on start of navigation, recycling.") self.appliance.browser.quit_browser() self.go(_tries, *args, **go_kwargs) # Check if modal window is displayed if (br.widgetastic.is_displayed( "//div[contains(@class, 'modal-dialog') and contains(@class, 'modal-lg')]")): logger.warning("Modal window was open; closing the window") br.widgetastic.click( "//button[contains(@class, 'close') and contains(@data-dismiss, 'modal')]") # Check if jQuery present try: br.widgetastic.execute_script("jQuery", silent=True) except Exception as e: if "jQuery" not in str(e): logger.error("Checked for jQuery but got something different.") logger.exception(e) # Restart some workers logger.warning("Restarting UI and VimBroker workers!") with self.appliance.ssh_client as ssh: # Blow off the Vim brokers and UI workers ssh.run_rails_command("\"(MiqVimBrokerWorker.all + MiqUiWorker.all).each &:kill\"") logger.info("Waiting for web UI to come back alive.") sleep(10) # Give it some rest self.appliance.wait_for_web_ui() self.appliance.browser.quit_browser() self.appliance.browser.open_browser() self.go(_tries, *args, **go_kwargs) # Same with rails errors rails_e = get_rails_error() if rails_e is not None: logger.warning("Page was blocked by rails error, renavigating.") logger.error(rails_e) # RHEL7 top does not know -M and -a logger.debug('Top CPU consumers:') logger.debug(store.current_appliance.ssh_client.run_command( 'top -c -b -n1 | head -30').output) logger.debug('Top Memory consumers:') logger.debug(store.current_appliance.ssh_client.run_command( 'top -c -b -n1 -o "%MEM" | head -30').output) # noqa logger.debug('Managed known Providers:') logger.debug( '%r', [prov.key for prov in store.current_appliance.managed_known_providers]) self.appliance.browser.quit_browser() self.appliance.browser.open_browser() self.go(_tries, *args, **go_kwargs) # If there is a rails error past this point, something is really awful # Set this to True in the handlers below to trigger a browser restart recycle = False # Set this to True in handlers to restart evmserverd on the appliance # Includes recycling so you don't need to specify recycle = False restart_evmserverd = False from cfme import login try: self.log_message( "Invoking {}, with {} and {}".format(fn.func_name, args, kwargs), level="debug") return fn(*args, **kwargs) except (KeyboardInterrupt, ValueError): # KeyboardInterrupt: Don't block this while navigating raise except UnexpectedAlertPresentException: if _tries == 1: # There was an alert, accept it and try again br.widgetastic.handle_alert(wait=0) self.go(_tries, *args, **go_kwargs) else: # There was still an alert when we tried again, shoot the browser in the head logger.debug('Unxpected alert, recycling browser') recycle = True except (ErrorInResponseException, InvalidSwitchToTargetException): # Unable to switch to the browser at all, need to recycle logger.info('Invalid browser state, recycling browser') recycle = True except exceptions.CFMEExceptionOccured as e: # We hit a Rails exception logger.info('CFME Exception occured') logger.exception(e) recycle = True except exceptions.CannotContinueWithNavigation as e: # The some of the navigation steps cannot succeed logger.info('Cannot continue with navigation due to: {}; ' 'Recycling browser'.format(str(e))) recycle = True except (NoSuchElementException, InvalidElementStateException, WebDriverException, StaleElementReferenceException) as e: from cfme.web_ui import cfme_exception as cfme_exc # To prevent circular imports # First check - if jquery is not found, there can be also another # reason why this happened so do not put the next branches in elif if isinstance(e, WebDriverException) and "jQuery" in str(e): # UI failed in some way, try recycling the browser logger.exception( "UI failed in some way, jQuery not found, (probably) recycling the browser.") recycle = True # If the page is blocked, then recycle... if ( br.widgetastic.is_displayed("//div[@id='blocker_div' or @id='notification']") or br.widgetastic.is_displayed(".modal-backdrop.fade.in")): logger.warning("Page was blocked with blocker div, recycling.") recycle = True elif cfme_exc.is_cfme_exception(): logger.exception("CFME Exception before force navigate started!: {}".format( cfme_exc.cfme_exception_text())) recycle = True elif br.widgetastic.is_displayed("//body/h1[normalize-space(.)='Proxy Error']"): # 502 logger.exception("Proxy error detected. Killing browser and restarting evmserverd.") req = br.widgetastic.elements("/html/body/p[1]//a") req = br.widgetastic.text(req[0]) if req else "No request stated" reason = br.widgetastic.elements("/html/body/p[2]/strong") reason = br.widgetastic.text(reason[0]) if reason else "No reason stated" logger.info("Proxy error: {} / {}".format(req, reason)) restart_evmserverd = True elif br.widgetastic.is_displayed("//body[./h1 and ./p and ./hr and ./address]"): # 503 and similar sort of errors title = br.widgetastic.text("//body/h1") body = br.widgetastic.text("//body/p") logger.exception("Application error {}: {}".format(title, body)) sleep(5) # Give it a little bit of rest recycle = True elif br.widgetastic.is_displayed("//body/div[@class='dialog' and ./h1 and ./p]"): # Rails exception detection logger.exception("Rails exception before force navigate started!: %r:%r at %r", br.widgetastic.text("//body/div[@class='dialog']/h1"), br.widgetastic.text("//body/div[@class='dialog']/p"), getattr(manager.browser, 'current_url', "error://dead-browser") ) recycle = True elif br.widgetastic.elements("//ul[@id='maintab']/li[@class='inactive']") and not\ br.widgetastic.elements("//ul[@id='maintab']/li[@class='active']/ul/li"): # If upstream and is the bottom part of menu is not displayed logger.exception("Detected glitch from BZ#1112574. HEADSHOT!") recycle = True elif not login.logged_in(): # Session timeout or whatever like that, login screen appears. logger.exception("Looks like we are logged out. Try again.") recycle = True else: logger.error("Could not determine the reason for failing the navigation. " + " Reraising. Exception: {}".format(str(e))) logger.debug(store.current_appliance.ssh_client.run_command( 'systemctl status evmserverd').output) raise if restart_evmserverd: logger.info("evmserverd restart requested") self.appliance.restart_evm_service() self.appliance.wait_for_web_ui() self.go(_tries, *args, **go_kwargs) if recycle or restart_evmserverd: self.appliance.browser.quit_browser() logger.debug('browser killed on try {}'.format(_tries)) # If given a "start" nav destination, it won't be valid after quitting the browser self.go(_tries, *args, **go_kwargs)
def setup_a_provider(prov_class=None, prov_type=None, validate=True, check_existing=True, required_keys=None): """Sets up a single provider robustly. Does some counter-badness measures. Args: prov_class: "infra", "cloud", "container" or "middleware" prov_type: "ec2", "virtualcenter" or any other valid type validate: Whether to validate the provider. check_existing: Whether to check if the provider already exists. required_keys: A set of required keys for the provider data to have """ if not required_keys: required_keys = [] if prov_class in {'infra', 'cloud', 'container', 'middleware'}: if prov_class == "infra": potential_providers = list_infra_providers() elif prov_class == "cloud": potential_providers = list_cloud_providers() elif prov_class == 'container': potential_providers = list_container_providers() elif prov_class == 'middleware': potential_providers = list_middleware_providers() # else not required because guarded by if if prov_type: providers = [] for provider in potential_providers: if providers_data[provider]['type'] == prov_type: providers.append(provider) else: providers = potential_providers else: providers = list_infra_providers() final_providers = [] for provider in providers: if all(key in providers_data[provider] for key in required_keys): final_providers.append(provider) providers = final_providers # Check if the provider was behaving badly in the history if problematic_providers: filtered_providers = [ provider for provider in providers if provider not in problematic_providers] if not filtered_providers: # problematic_providers took all of the providers, so start over with clean list # (next chance for bad guys) and use the original list. This will then slow down a # little bit but make it more reliable. problematic_providers.clear() store.terminalreporter.write_line( "Reached the point where all possible providers forthis case are marked as bad. " "Clearing the bad provider list for a fresh start and next chance.", yellow=True) else: providers = filtered_providers # If there is a provider that we want to specifically avoid ... # If there is only a single provider, then do not do any filtering # Specify `do_not_prefer` in provider's yaml to make it an object of avoidance. if len(providers) > 1: filtered_providers = [ provider for provider in providers if not providers_data[provider].get("do_not_prefer", False)] if filtered_providers: # If our filtering yielded any providers, use them, otherwise do not bother with that providers = filtered_providers # If there is already a suitable provider, don't try to setup a new one. already_existing = filter(is_provider_setup, providers) random.shuffle(already_existing) # Make the provider load more even by random chaice. not_already_existing = filter(lambda x: not is_provider_setup(x), providers) random.shuffle(not_already_existing) # Make the provider load more even by random chaice. # So, make this one loop and it tries the existing providers first, then the nonexisting for provider in already_existing + not_already_existing: try: if provider in already_existing: store.terminalreporter.write_line( "Trying to reuse provider {}\n".format(provider), green=True) else: store.terminalreporter.write_line( "Trying to set up provider {}\n".format(provider), green=True) return setup_provider(provider, validate=validate, check_existing=check_existing) except Exception as e: # In case of a known provider error: logger.exception(e) message = "Provider {} is behaving badly, marking it as bad. {}: {}".format( provider, type(e).__name__, str(e)) logger.warning(message) store.terminalreporter.write_line(message + "\n", red=True) problematic_providers.add(provider) prov_object = get_crud(provider) if prov_object.exists: # Remove it in order to not explode on next calls prov_object.delete(cancel=False) prov_object.wait_for_delete() message = "Provider {} was deleted because it failed to set up.".format(provider) logger.warning(message) store.terminalreporter.write_line(message + "\n", red=True) else: raise Exception("No providers could be set up matching the params")