def remove_label(self, name, silent_failure=False): """Remove label by name. :var: name: name of label :var: silent_failure: whether to raise an error or not in case of failure. Returns: :py:type:`bool` pass or fail Raises: :py:class:`LabelNotFoundException`. """ json_content = self._get_json() if name not in json_content['metadata'].get('labels', {}).keys(): failure_signature = 'Could not find label "{}", labels: {}' \ .format(name, json_content['metadata']['labels']) if silent_failure: logger.warning(failure_signature) return False else: raise exceptions.LabelNotFoundException(failure_signature) self.provider.cli.run_command( 'oc label {} {} {}-'.format( self._cli_resource_type, ('sha256:{}'.format(self.sha256) if (self.__class__.__name__ == 'Image') else self.name), name ) ) return True
def test_suspend( self, provider, vm_name, verify_vm_running, mgmt_sys_api_clients, register_event, load_vm_details): """ Test suspend operation from a vm details page. Verify vm transitions to suspended. """ vm_details = load_vm_details vm_details.wait_for_vm_state_change('on', 10) state_chg_time = vm_details.last_pwr_state_change register_event(get_sys_type(provider), "vm", vm_name, ["vm_suspend_req", "vm_suspend"]) vm_details.power_button.suspend() try: vm_details.wait_for_vm_state_change('suspended', 10) except TimedOutError: logger.warning('working around bz977489 by clicking the refresh button') vm_details.config_button.refresh_relationships() vm_details.wait_for_vm_state_change('suspended', 5) Assert.equal(vm_details.power_state, 'suspended', "power state incorrect") Assert.not_equal(vm_details.last_pwr_state_change, state_chg_time, "last state chg time failed to update") Assert.true(mgmt_sys_api_clients[provider].is_vm_suspended(vm_name), "vm not suspended")
def cleanup_host(): try: logger.info('Cleaning up host %s on provider %s' % (prov_host_name, provider_crud.key)) mgmt_system = provider_crud.get_mgmt_system() host_list = mgmt_system.list_host() if host_provisioning['ip_addr'] in host_list: wait_for(mgmt_system.is_host_connected, [host_provisioning['ip_addr']]) mgmt_system.remove_host_from_cluster(host_provisioning['ip_addr']) ipmi = test_host.get_ipmi() ipmi.power_off() # During host provisioning,the host name gets changed from what's specified at creation # time.If host provisioning succeeds,the original name is reverted to,otherwise the # changed names are retained upon failure renamed_host_name1 = "{} ({})".format('IPMI', host_provisioning['ipmi_address']) renamed_host_name2 = "{} ({})".format('VMware ESXi', host_provisioning['ip_addr']) host_list_ui = host.get_all_hosts() if host_provisioning['hostname'] in host_list_ui: test_host.delete(cancel=False) host.wait_for_host_delete(test_host) elif renamed_host_name1 in host_list_ui: host_renamed_obj1 = host.Host(name=renamed_host_name1) host_renamed_obj1.delete(cancel=False) host.wait_for_host_delete(host_renamed_obj1) elif renamed_host_name2 in host_list_ui: host_renamed_obj2 = host.Host(name=renamed_host_name2) host_renamed_obj2.delete(cancel=False) host.wait_for_host_delete(host_renamed_obj2) except: # The mgmt_sys classes raise Exception :\ logger.warning('Failed to clean up host %s on provider %s' % (prov_host_name, provider_crud.key))
def collect_diskinfo(self): global workercinfo parts = psutil.disk_partitions() setval = [] devices = {} for part in parts: # deal with each partition if not part.device in devices: devices[part.device] = 1 diskval = {} diskval['device'] = part.device diskval['mountpoint'] = part.mountpoint try: usage = psutil.disk_usage(part.mountpoint) diskval['total'] = usage.total diskval['used'] = usage.used diskval['free'] = usage.free diskval['percent'] = usage.percent if(part.mountpoint.startswith('/opt/docklet/local/volume')): # the mountpoint indicate that the data is the disk used information of a container names = re.split('/',part.mountpoint) container = names[len(names)-1] if not container in workercinfo.keys(): workercinfo[container] = {} workercinfo[container]['disk_use'] = diskval setval.append(diskval) # make a list except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) #print(output) #print(diskparts) return setval
def cleanup_vm(vm_name, provider): try: logger.info('Cleaning up VM %s on provider %s', vm_name, provider.key) provider.mgmt.delete_vm(vm_name) except: # The mgmt_sys classes raise Exception :\ logger.warning('Failed to clean up VM %s on provider %s', vm_name, provider.key)
def cleanup_vm(vm_name, provider_key, provider_mgmt): try: logger.info('Cleaning up VM %s on provider %s' % (vm_name, provider_key)) provider_mgmt.delete_vm(vm_name + "_0001") except: # The mgmt_sys classes raise Exception :\ logger.warning('Failed to clean up VM %s on provider %s' % (vm_name, provider_key))
def dialog(): dialog_name = "dialog_" + fauxfactory.gen_alphanumeric() element_data = dict( ele_label="ele_" + fauxfactory.gen_alphanumeric(), ele_name=fauxfactory.gen_alphanumeric(), ele_desc="my ele desc", choose_type="Text Box", default_text_box="default value" ) service_dialog = ServiceDialog(label=dialog_name, description="my dialog", submit=True, cancel=True, tab_label="tab_" + fauxfactory.gen_alphanumeric(), tab_desc="my tab desc", box_label="box_" + fauxfactory.gen_alphanumeric(), box_desc="my box desc") service_dialog.create(element_data) flash.assert_success_message('Dialog "{}" was added'.format(dialog_name)) yield service_dialog # fixture cleanup try: service_dialog.delete() except NoSuchElementException or TimeoutException: logger.warning('test_catalog_item: dialog yield fixture cleanup, dialog "{}" not ' 'found'.format(dialog_name))
def test_suspend(self, test_vm, verify_vm_running, soft_assert, register_event, bug): """Tests suspend Metadata: test_flag: power_control, provision """ test_vm.wait_for_vm_state_change( desired_state=test_vm.STATE_ON, timeout=720, from_details=True) last_boot_time = test_vm.get_detail(properties=("Power Management", "Last Boot Time")) register_event( test_vm.provider.type, "vm", test_vm.name, ["vm_suspend_req", "vm_suspend"]) test_vm.power_control_from_cfme(option=test_vm.SUSPEND, cancel=False, from_details=True) flash.assert_message_contain("Suspend initiated") pytest.sel.force_navigate( 'infrastructure_provider', context={'provider': test_vm.provider}) if_scvmm_refresh_provider(test_vm.provider) try: test_vm.wait_for_vm_state_change( desired_state=test_vm.STATE_SUSPENDED, timeout=450, from_details=True) except TimedOutError as e: if test_vm.provider.type == "rhevm": logger.warning('working around bz1174858, ignoring timeout') else: raise e soft_assert( test_vm.provider.mgmt.is_vm_suspended( test_vm.name), "vm not suspended") # BUG - https://bugzilla.redhat.com/show_bug.cgi?id=1101604 if test_vm.provider.type != "rhevm": new_last_boot_time = test_vm.get_detail( properties=("Power Management", "Last Boot Time")) soft_assert(new_last_boot_time == last_boot_time, "ui: {} should == orig: {}".format(new_last_boot_time, last_boot_time))
def get_search_filter_spec(self, *args, **kwargs): # A datastore traversal spec is missing from this method in psphere. # psav has opened a PR to add it, but until it gets merged we'll need to come behind # psphere and add it in just like his PR does # https://github.com/jkinred/psphere/pull/18/files pfs = super(_PsphereClient, self).get_search_filter_spec(*args, **kwargs) select_sets = pfs.objectSet[0].selectSet missing_ss = 'datacenter_datastore_traversal_spec' ss_names = [ss.name for ss in select_sets] if missing_ss not in ss_names: logger.trace('Injecting %s into psphere search filter spec', missing_ss) # pull out the folder traversal spec traversal specs fts_ts = pfs.objectSet[0].selectSet[0] # and get the select set from the traversal spec fts_ss = fts_ts.selectSet[0] # add ds selection spec to folder traversal spec dsss = self.create('SelectionSpec', name=missing_ss) fts_ts.selectSet.append(dsss) # add ds traversal spec to search filter object set select spec dsts = self.create('TraversalSpec') dsts.name = 'datacenter_datastore_traversal_spec' dsts.type = 'Datacenter' dsts.path = 'datastoreFolder' dsts.selectSet = [fts_ss] select_sets.append(dsts) else: logger.warning('%s already in psphere search filer spec, not adding it', missing_ss) return pfs
def run(self): global monitor_hosts global monitor_vnodes while not self.thread_stop: for worker in monitor_hosts.keys(): monitor_hosts[worker]['running'] = False workers = self.nodemgr.get_nodeips() for worker in workers: try: ip = worker workerrpc = self.nodemgr.ip_to_rpc(worker) # fetch data info = list(eval(workerrpc.workerFetchInfo(self.master_ip))) #logger.info(info[0]) # store data in monitor_hosts and monitor_vnodes monitor_hosts[ip] = info[0] for container in info[1].keys(): owner = get_owner(container) if not owner in monitor_vnodes.keys(): monitor_vnodes[owner] = {} monitor_vnodes[owner][container] = info[1][container] for user in info[2].keys(): if not user in monitor_vnodes.keys(): continue else: monitor_vnodes[user]['net_stats'] = info[2][user] self.net_billings(user, info[2][user]['bytes_total']) except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) time.sleep(2) #logger.info(History.query.all()) #logger.info(VNode.query.all()) return
def param_check(metafunc, argvalues): """Helper function to check if parametrizing is necessary If argvalues is empty, the test being represented by metafunc will be skipped in collection. See usage in :py:func:`parametrize` Args: metafunc: metafunc objects from pytest_generate_tests argvalues: argvalues list for use in metafunc.parametrize Returns: True if this test should be parametrized """ if any(argvalues): return True else: # module and class are optional, but function isn't modname = getattr(metafunc.module, '__name__', None) classname = getattr(metafunc.cls, '__name__', None) funcname = metafunc.function.__name__ test_name = '.'.join(filter(None, (modname, classname, funcname))) logger.warning('Parametrization for %s yielded no values, skipping' % test_name)
def instance(setup_providers, provider_key, provider_mgmt, provisioning, provider_crud): # tries to delete the VM that gets created here vm_name = 'test_image_prov_%s' % generate_random_string() image = provisioning['image']['name'] note = ('Testing provisioning from image %s to vm %s on provider %s' % (image, vm_name, provider_crud.key)) instance = prov.Instance( name=vm_name, email='*****@*****.**', first_name='Image', last_name='Provisioner', notes=note, instance_type=provisioning['instance_type'], availability_zone=provisioning['availability_zone'], security_groups=[provisioning['security_group']], provider_mgmt=provider_mgmt, provider=provider_crud, guest_keypair="shared", template=prov.Template(image)) instance.create() yield instance try: logger.info('Cleaning up VM %s on provider %s' % (vm_name, provider_key)) provider_mgmt.delete_vm(vm_name) except: # The mgmt_sys classes raise Exception :\ logger.warning('Failed to clean up VM %s on provider %s' % (vm_name, provider_key))
def test_start_from_suspend( self, testing_vm, verify_vm_suspended, soft_assert): """Tests start from suspend Metadata: test_flag: power_control, provision """ try: testing_vm.provider.refresh_provider_relationships() testing_vm.wait_for_vm_state_change( desired_state=testing_vm.STATE_SUSPENDED, timeout=450, from_details=True) except TimedOutError: if testing_vm.provider.one_of(RHEVMProvider): logger.warning('working around bz1174858, ignoring timeout') else: raise last_boot_time = testing_vm.get_detail(properties=("Power Management", "Last Boot Time")) testing_vm.power_control_from_cfme(option=testing_vm.POWER_ON, cancel=False, from_details=True) flash.assert_message_contain("Start initiated") if_scvmm_refresh_provider(testing_vm.provider) testing_vm.wait_for_vm_state_change( desired_state=testing_vm.STATE_ON, timeout=720, from_details=True) wait_for_last_boot_timestamp_refresh(testing_vm, last_boot_time, timeout=600) soft_assert( testing_vm.provider.mgmt.is_vm_running(testing_vm.name), "vm not running")
def checkskip(metafunc, argvalues): """Helper function to check if parametrizing yielded results If argvalues is empty, the test being represented by metafunc will be skipped in collection. Args: metafunc: metafunc objects from pytest_generate_tests argvalues: argvalues list for use in metafunc.parametrize Returns: True if this test should be skipped due to empty argvalues """ if not argvalues: # module and class are optional, but function isn't modname = getattr(metafunc.module, '__name__', None) classname = getattr(metafunc.cls, '__name__', None) funcname = metafunc.function.__name__ test_name = '.'.join(filter(None, (modname, classname, funcname))) logger.warning('Parametrization for %s yielded no values, skipping' % test_name) # Raising pytest.skip in collection halts future fixture evaluation, # and preemptively filters this test out of the test results pytest.skip(msg="Parametrize yielded no values")(metafunc.function) return True
def template(provider, provisioning, dialog_name): template_type = provisioning['stack_provisioning']['template_type'] if provider.type == 'azure': template_name = 'azure-single-vm-from-user-image' else: template_name = fauxfactory.gen_alphanumeric() template = OrchestrationTemplate(template_type=template_type, template_name=template_name) if provider.type == "ec2": method = AWS_TEMPLATE.replace('CloudFormation', random_desc()) elif provider.type == "openstack": method = HEAT_TEMPLATE.replace('Simple', random_desc()) template.create(method) if provider.type != "azure": template.create_service_dialog_from_template(dialog_name, template.template_name) yield template try: template.delete() except CandidateNotFound as ex: logger.warning('Exception deleting template fixture, continuing: {}'.format(ex.message)) pass
def fill(self, value): if isinstance(value, date): date_str = value.strftime('%m/%d/%Y') else: date_str = str(value) self.move_to() # need to write to a readonly field: resort to evil if self.browser.get_attribute("ng-model", self) is not None: # self.set_angularjs_value(self, date_str) raise NotImplementedError else: self.browser.set_attribute("value", date_str, self) # Now when we set the value, we need to simulate a change event. if self.browser.get_attribute("data-date-autoclose", self): # New one script = "$(arguments[0]).trigger('changeDate');" else: # Old one script = "$(arguments[0]).change();" try: self.browser.execute_script(script, self.browser.element(self)) except WebDriverException as e: logger.warning( "An exception was raised during handling of the Cal #{}'s change event:\n{}" .format(self.name, str(e))) self.browser.plugin.ensure_page_safe() return True
def _cleanup_templates(): try: stack_data['vm_name'].delete_from_provider() except Exception as ex: logger.warning('Exception while checking/deleting stack, continuing: {}' .format(ex.message)) pass
def test_contents(appliance, soft_assert): """Test title of each document.""" view = navigate_to(appliance.server, 'Documentation') cur_ver = appliance.version for doc_type, title in doc_titles.items(): doc_widget = getattr(view.links, doc_type, None) if not doc_widget: logger.warning('Skipping contents check for document: "{}: {}", no widget to read' .format(doc_type, title)) href = view.browser.get_attribute(attr='href', locator=doc_widget.link.locator) data = requests.get(href, verify=False) pdf_titlepage_text_low = pdf_get_text(StringIO(data.content), [0]).lower() # don't include the word 'guide' expected = [title] if cur_ver == version.LATEST: expected.append('manageiq') else: expected.append('cloudforms') maj_min = '{}.{}'.format(cur_ver.version[0], cur_ver.version[1]) expected.append(version.get_product_version(maj_min)) for exp_str in expected: soft_assert(exp_str in pdf_titlepage_text_low, "{} not in {}" .format(exp_str, pdf_titlepage_text_low))
def cleanup_vm(vm_name, provider_key, provider_mgmt): try: logger.info('Cleaning up VM {} on provider {}'.format(vm_name, provider_key)) provider_mgmt.delete_vm(vm_name) except Exception as e: logger.warning('Failed to clean up VM {} on provider {}: {}'.format(vm_name, provider_key, str(e)))
def test_labels_remove(provider, soft_assert, random_labels): # Removing the labels for instance, label_name, label_value, results_status, _ in random_labels: if results_status: instance.remove_label(label_name) else: logger.warning('Cannot remove label ({} = {}) for {} {}. (failed to add it previously)' .format(label_name, label_value, instance.__class__.__name__, instance.name)) provider.refresh_provider_relationships() # Verify that the labels removed successfully from UI: for instance, label_name, label_value, results_status, _ in random_labels: if results_status: soft_assert( wait_for( lambda: not check_labels_in_ui(instance, label_name, label_value), num_sec=120, delay=10, fail_func=instance.summary.reload, message='Verifying label ({} = {}) for {} {} removed' .format(label_name, label_value, instance.__class__.__name__, instance.name), silent_failure=True), 'Label ({} = {}) for {} {} found in UI (but should be removed).' .format(label_name, label_value, instance.__class__.__name__, instance.name) )
def test_suspend(self, provider_init, test_vm, verify_vm_running, soft_assert, register_event): test_vm.wait_for_vm_state_change( desired_state=Vm.STATE_ON, timeout=720, from_details=True) last_boot_time = test_vm.get_detail(properties=("Power Management", "Last Boot Time")) register_event( test_vm.provider_crud.get_yaml_data()['type'], "vm", test_vm.name, ["vm_suspend_req", "vm_suspend"]) test_vm.power_control_from_cfme(option=Vm.SUSPEND, cancel=False, from_details=True) flash.assert_message_contain("Suspend initiated") pytest.sel.force_navigate( 'infrastructure_provider', context={'provider': test_vm.provider_crud}) try: test_vm.wait_for_vm_state_change( desired_state='suspended', timeout=600, from_details=True) except TimedOutError: logger.warning('working around bz977489 by clicking the refresh button') test_vm.refresh_relationships() test_vm.wait_for_vm_state_change( desired_state=Vm.STATE_SUSPENDED, timeout=300, from_details=True) soft_assert( test_vm.provider_crud.get_mgmt_system().is_vm_suspended( test_vm.name), "vm not suspended") # BUG - https://bugzilla.redhat.com/show_bug.cgi?id=1101604 if not isinstance(test_vm.provider_crud, RHEVMProvider): new_last_boot_time = test_vm.get_detail( properties=("Power Management", "Last Boot Time")) soft_assert(new_last_boot_time == last_boot_time, "ui: " + new_last_boot_time + " should == orig: " + last_boot_time)
def run(self): global workercinfo global workerinfo cnt = 0 while not self.thread_stop: self.collect_net_stats() containers = self.list_container() countR = 0 conlist = [] for container in containers: # collect data of each container if not container == '': conlist.append(container) if not container in workercinfo.keys(): workercinfo[container] = {} try: success= self.collect_containerinfo(container) if(success): countR += 1 except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) containers_num = len(containers)-1 concnt = {} concnt['total'] = containers_num concnt['running'] = countR workerinfo['containers'] = concnt time.sleep(self.interval) if cnt == 0: # update containers list on the worker each 5 times workerinfo['containerslist'] = conlist cnt = (cnt+1)%5 if self.test: break return
def patch_file(self, local_path, remote_path, md5=None): """ Patches a single file on the appliance Args: local_path: Path to patch (diff) file remote_path: Path to file to be patched (on the appliance) md5: MD5 checksum of the original file to check if it has changed Returns: True if changes were applied, False if patching was not necessary Note: If there is a .bak file present and the file-to-be-patched was not patched by the current patch-file, it will be used to restore it first. Recompiling assets and restarting appropriate services might be required. """ logger.info('Patching {remote_path}'.format(remote_path=remote_path)) # Upload diff to the appliance diff_remote_path = os_path.join('/tmp/', os_path.basename(remote_path)) self.put_file(local_path, diff_remote_path) # If already patched with current file, exit logger.info('Checking if already patched') rc, out = self.run_command( 'patch {} {} -f --dry-run -R'.format(remote_path, diff_remote_path)) if rc == 0: return False # If we have a .bak file available, it means the file is already patched # by some older patch; in that case, replace the file-to-be-patched by the .bak first logger.info("Checking if {}.bak is available".format(remote_path)) rc, out = self.run_command('test -e {}.bak'.format(remote_path)) if rc == 0: logger.info( "{}.bak found; using it to replace {}".format(remote_path, remote_path)) rc, out = self.run_command('mv {}.bak {}'.format(remote_path, remote_path)) if rc != 0: raise Exception( "Unable to replace {} with {}.bak".format(remote_path, remote_path)) else: logger.info("{}.bak not found".format(remote_path)) # If not patched and there's MD5 checksum available, check it if md5: logger.info("MD5 sum check in progress for {remote_path}".format( remote_path=remote_path)) rc, out = self.run_command('md5sum -c - <<< "{} {}"'.format(md5, remote_path)) if rc == 0: logger.info('MD5 sum check result: file not changed') else: logger.warning('MD5 sum check result: file has been changed!') # Create the backup and patch rc, out = self.run_command( 'patch {} {} -f -b -z .bak'.format(remote_path, diff_remote_path)) if rc != 0: raise Exception("Unable to patch file {}: {}".format(remote_path, out)) return True
def _wait_for_state_refresh(): try: navigate_to(instance, 'Details') return state_change_time != instance.get_detail(properties=("Power Management", "State Changed On")) except NameError: logger.warning('NameError caught while waiting for state change, continuing') return False
def get_containerslist(self): try: res = self.info['containerslist'] except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) res = {} return res
def get_osinfo(self): try: res = self.info['osinfo'] except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) res = {} return res
def get_cpuconfig(self): try: res = self.info['cpuconfig'] except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) res = {} return res
def get_concpuinfo(self): try: res = self.info['concpupercent'] except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) res = {} return res
def get_user_net_stats(owner): global monitor_vnodes try: res = monitor_vnodes[owner]['net_stats'] except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) res = {} return res
def get_basic_info(self): global monitor_vnodes try: res = monitor_vnodes[self.owner][self.con_id]['basic_info'] except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) res = {} return res
def finish_task(self, task_idx, running_time, billing): if task_idx not in self.tasks.keys(): logger.error( 'Task_idx %s not in job. user:%s job_name:%s job_id:%s' % (task_idx, self.user, self.job_name, self.job_id)) return [] logger.debug( "Task(idx:%s) of BatchJob(id:%s) has finished(running_time=%d,billing=%d). Update dependency..." % (task_idx, self.job_id, running_time, billing)) old_status = self.tasks[task_idx]['status'] if old_status == 'stopping': logger.info("Task(idx:%s) of BatchJob(id:%s) has been stopped." % (task_idx, self.job_id)) return self.tasks_cnt[old_status] -= 1 self.tasks[task_idx]['status'] = 'finished' self.tasks[task_idx]['db'] = Batchtask.query.get( self.tasks[task_idx]['id']) self.tasks[task_idx]['db'].status = 'finished' self.tasks[task_idx]['db'].tried_times += 1 self.tasks[task_idx]['db'].running_time = running_time self.tasks[task_idx]['db'].end_time = datetime.now() self.tasks[task_idx]['db'].billing = billing self.tasks[task_idx]['db'].failed_reason = "" self.job_db = Batchjob.query.get(self.job_id) self.job_db.billing += billing self.tasks_cnt['finished'] += 1 if task_idx not in self.dependency_out.keys(): self._update_job_status() self.log_status() return [] ret_tasks = [] for out_idx in self.dependency_out[task_idx]: try: self.tasks[out_idx]['dependency'].remove(task_idx) except Exception as err: logger.warning(traceback.format_exc()) continue if (self.tasks[out_idx]['status'] == 'pending' and len(self.tasks[out_idx]['dependency']) == 0): self.tasks_cnt['pending'] -= 1 self.tasks_cnt['scheduling'] += 1 self.tasks[out_idx]['status'] = 'scheduling' self.tasks[out_idx]['db'] = Batchtask.query.get( self.tasks[out_idx]['id']) self.tasks[out_idx]['db'].status = 'scheduling' task_name = self.job_id + '_' + out_idx ret_tasks.append([ task_name, self.tasks[out_idx]['config'], self.job_priority ]) self._update_job_status() self.log_status() return ret_tasks
def insert_one(self, proxy): """2.1 实现插入的功能""" count = self.proxies.count_documents({'_id': proxy.ip}) if count == 0: # 我们使用proxy的ip作为MongonDB中的数据库主键:_id dic = proxy.__dict__ dic['_id'] = proxy.ip self.proxies.insert_one(dic) logger.info("插入新的代理{}".format(proxy)) else: logger.warning("已经存在的代理:{}".format(proxy))
def insert_one(self, proxy): '''实现插入功能''' count = self.proxies.count_documents({'_id': proxy.ip}) if count == 0: # 我们使用proxy.ip作为MongoDB中的数据主键:_id dic = proxy.__dict__ dic['_id'] = proxy.ip self.proxies.insert_one(dic) logger.info(f"插入新代理: {proxy}") else: logger.warning(f"已存在的代理: {proxy}")
def vm_name(provider): # also tries to delete the VM that gets made with this name vm_name = 'test_servicecatalog-{}'.format(fauxfactory.gen_alphanumeric()) yield vm_name try: logger.info('Cleaning up VM %s on provider %s', vm_name, provider.key) provider.mgmt.delete_vm(vm_name) except: # The mgmt_sys classes raise Exception :\ logger.warning('Failed to clean up VM %s on provider %s', vm_name, provider.key)
def _finished(): try: a_provider.mgmt.delete_vm(vm_name) except Exception: # vm can be deleted/retired by test logger.warning("Failed to delete vm '{}'.".format(vm_name)) try: rest_api.collections.services.get(name=rest_service.name).action.delete() except ValueError: # service can be deleted by test logger.warning("Failed to delete service '{}'.".format(rest_service.name))
def tenant(provider, setup_provider): tenant = Tenant(name=fauxfactory.gen_alphanumeric(8), provider=provider) yield tenant try: tenant.delete() except Exception: logger.warning( 'Exception while attempting to delete tenant fixture, continuing') pass
def insert_one(self, proxy): # 插入 count = self.proxies.count_documents({'_id': proxy.ip}) if count == 0: #使用ip作为主键:_id dic = proxy.__dict__ dic['_id'] = proxy.ip self.proxies.insert_one(dic) logger.info("插入新的代理:{}".format(proxy)) else: logger.warning("已经存在代理:{}".format(proxy))
def html_parser(content): """ parser html """ result_list = [] try: soup = BeautifulSoup(content, "html.parser") script_tag_list = soup.find_all('script') for script_tag in script_tag_list: if script_tag.get('src'): result_list.append({"type": "link", "url": script_tag.get('src')}) link_tag_list = soup.find_all('a') for link_tag in link_tag_list: if link_tag.get('href'): result_list.append({"type": "link", "url": link_tag.get('href')}) form_tag_list = soup.find_all('form') for form_tag in form_tag_list: if form_tag.get('action'): result_list.append({"type": "link", "url": form_tag.get('action')}) iframe_tag_list = soup.find_all('iframe') for iframe_tag in iframe_tag_list: if iframe_tag.get('src'): result_list.append({"type": "link", "url": iframe_tag.get('src')}) # for script if not soup.body: line_break = content.count('\n') if line_break < 5: # 混淆代码不扫描 return result_list # match for url in match_content('(((ht|f)tps?):\/\/)?[\w\-]+(\.[\w\-]+)+([\w\-.,@?^=%&:/~+#]*[\w\-@?^=%&/~+#])?', content): result_list.append({"type": "link", "url": url}) for url in match_content('(?<=(\"|\'|\`))\/[a-zA-Z0-9_?&=\/\-\#\.]*(?=(\"|\'|\`))', content): result_list.append({"type": "link", "url": url}) except: logger.warning('[AST] something error, {}'.format(traceback.format_exc())) return result_list
def vm_name(provider): # also tries to delete the VM that gets made with this name vm_name = random_vm_name('scat') yield vm_name try: logger.info('Cleaning up VM %s on provider %s', vm_name, provider.key) provider.mgmt.delete_vm(vm_name) except: # The mgmt_sys classes raise Exception :\ logger.warning('Failed to clean up VM %s on provider %s', vm_name, provider.key)
def insert_one(self, proxy): """2.1 实现插入功能""" count = self.proxies.count_documents({'_id': proxy.ip}) if count == 0: dict = proxy.__dict__ dict['_id'] = proxy.ip self.proxies.insert_one(dict) logger.warning('已插入代理IP:{}'.format(proxy)) else: logger.warning('已存在代理IP:{}'.format(proxy))
def get_status(self): try: isexist = self.info['running'] except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) isexist = False if(isexist): return 'RUNNING' else: return 'STOPPED'
def find_quadicon( self, do_not_navigate=False, mark=False, refresh=True, from_any_provider=False, use_search=True): """Find and return a quadicon belonging to a specific vm Args: from_any_provider: Whether to look for it anywhere (root of the tree). Useful when looking up archived or orphaned VMs Returns: :py:class:`cfme.web_ui.Quadicon` instance Raises: VmOrInstanceNotFound """ quadicon = Quadicon(self.name, self.quadicon_type) if not do_not_navigate: if from_any_provider: sel.force_navigate(self.ALL_LIST_LOCATION) elif self.is_vm: self.provider.load_all_provider_vms() else: self.provider.load_all_provider_templates() toolbar.select('Grid View') else: # Search requires navigation, we shouldn't use it then use_search = False if refresh: sel.refresh() if not paginator.page_controls_exist(): if self.is_vm: raise VmOrInstanceNotFound("VM '{}' not found in UI!".format(self.name)) else: raise TemplateNotFound("Template '{}' not found in UI!".format(self.name)) # this is causing some issues in 5.5.0.9, commenting out for a bit # paginator.results_per_page(1000) if use_search: try: if not search.has_quick_search_box(): # We don't use provider-specific page (vm_templates_provider_branch) here # as those don't list archived/orphaned VMs if self.is_vm: sel.force_navigate(self.provider.instances_page_name) else: sel.force_navigate(self.provider.templates_page_name) search.normal_search(self.name) except Exception as e: logger.warning("Failed to use search: %s", str(e)) for page in paginator.pages(): if sel.is_displayed(quadicon, move_to=True): if mark: sel.check(quadicon.checkbox()) return quadicon else: raise VmOrInstanceNotFound("VM '{}' not found in UI!".format(self.name))
def insert_one(self, proxy): #增加功能 # 使用IP作为主键 is_exist = self.proxies.count_documents({'_id': proxy.ip}) if is_exist == 0: dic = proxy.__dict__ dic['_id'] = proxy.ip self.proxies.insert_one(dic) logger.info("新代理插入成功{}".format(proxy)) else: logger.warning("代理已经存在{}".format(proxy))
def clean_text(self, text): """ filter char where not in alphabet with ' ' """ clean_txt = '' for char in text: if char in self.dict: clean_txt += char else: logger.warning(f"alphabet has no {char}") clean_txt += ' ' return clean_txt
def get_all_pattern_strings_from_a_url(url, pattern): logger.info("Start to get all object names from a url.") result = [] try: page = urllib.request.urlopen(url) contents = page.read().decode('utf-8') compiled_pattern = re.compile(pattern) result = compiled_pattern.findall(contents) except urllib.error.URLError as e: logger.warning(e) logger.info("End to get all object names.") return result
def test_vm(small_template, provider): vm = VM.factory(random_vm_name('retire'), provider, template_name=small_template) vm.create_on_provider(find_in_cfme=True, allow_skip="default") yield vm try: if provider.mgmt.does_vm_exist(vm.name): provider.mgmt.delete_vm(vm.name) except Exception: logger.warning('Failed to delete vm from provider: {}'.format(vm.name))
def scale_out_cluster(self,clustername,username, image,user_info, setting): if not self.is_cluster(clustername,username): return [False, "cluster:%s not found" % clustername] workers = self.nodemgr.get_nodeips() if (len(workers) == 0): logger.warning("no workers to start containers, scale out failed") return [False, "no workers are running"] image_json = json.dumps(image) [status, result] = self.networkmgr.acquire_userips_cidr(username) gateway = self.networkmgr.get_usergw(username) #vlanid = self.networkmgr.get_uservlanid(username) self.networkmgr.printpools() if not status: return [False, result] ip = result[0] [status, clusterinfo] = self.get_clusterinfo(clustername,username) clusterid = clusterinfo['clusterid'] clusterpath = self.fspath + "/global/users/" + username + "/clusters/" + clustername hostpath = self.fspath + "/global/users/" + username + "/hosts/" + str(clusterid) + ".hosts" cid = clusterinfo['nextcid'] workerip = workers[random.randint(0, len(workers)-1)] oneworker = self.nodemgr.ip_to_rpc(workerip) lxc_name = username + "-" + str(clusterid) + "-" + str(cid) hostname = "host-" + str(cid) proxy_server_ip = clusterinfo['proxy_server_ip'] proxy_public_ip = clusterinfo['proxy_public_ip'] uid = json.loads(user_info)["data"]["id"] [success, message] = oneworker.create_container(lxc_name, proxy_public_ip, username, uid, json.dumps(setting), clustername, clusterid, str(cid), hostname, ip, gateway, image_json) if success is False: self.networkmgr.release_userips(username, ip) logger.info("create container failed, so scale out failed") return [False, message] if clusterinfo['status'] == "running": self.networkmgr.check_usergre(username, uid, workerip, self.nodemgr, self.distributedgw=='True') oneworker.start_container(lxc_name) oneworker.start_services(lxc_name, ["ssh"]) # TODO: need fix namesplit = lxc_name.split('-') portname = namesplit[1] + '-' + namesplit[2] oneworker.recover_usernet(portname, uid, proxy_server_ip, workerip==proxy_server_ip) logger.info("scale out success") hostfile = open(hostpath, 'a') hostfile.write(ip.split("/")[0] + "\t" + hostname + "\t" + hostname + "." + clustername + "\n") hostfile.close() [success,vcluster] = self.get_vcluster(clustername,username) if not success: return [False, "Fail to write info."] vcluster.nextcid = int(clusterinfo['nextcid']) + 1 vcluster.size = int(clusterinfo['size']) + 1 vcluster.containers.append(Container(lxc_name,hostname,ip,workerip,image['name'],datetime.datetime.now(),setting)) #{'containername':lxc_name, 'hostname':hostname, 'ip':ip, 'host':workerip, 'image':image['name'], 'lastsave':datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 'setting': setting}) db.session.add(vcluster) db.session.commit() return [True, clusterinfo]
def insert_one(self, proxy): """2.1 实现插入功能""" count = self.proxies.count_documents({'_id': proxy.ip}) if count == 0: # 我们使用proxy.ip作为, MongoDB中数据的主键: _id dic = proxy.__dict__ dic['_id'] = proxy.ip self.proxies.insert_one(dic) logger.info('插入新的代理:{}'.format(proxy)) else: logger.warning("已经存在的代理:{}".format(proxy))
def insert_one(self, proxy): # 2.1 实现插入的功能 count = self.proxies.count_documents({'_id': proxy.ip}) if count == 0: # 使用proxy的ip作为MongoDB中的主键_id dict = proxy.__dict__ dict['_id'] = proxy.ip self.proxies.insert_one(dict) logger.info(f"插入新的代理{proxy}") else: logger.warning(f"已存在代理:{proxy}")
def find_quadicon( self, do_not_navigate=False, mark=False, refresh=True, from_any_provider=False, use_search=True): """Find and return a quadicon belonging to a specific vm Args: from_any_provider: Whether to look for it anywhere (root of the tree). Useful when looking up archived or orphaned VMs Returns: :py:class:`cfme.web_ui.Quadicon` instance Raises: VmOrInstanceNotFound """ quadicon = Quadicon(self.name, self.quadicon_type) if not do_not_navigate: if from_any_provider: # TODO implement as navigate_to when cfme.infra.virtual_machines has destination navigate_to(self, 'All') elif self.is_vm: navigate_to(self, 'AllForProvider', use_resetter=False) else: navigate_to(self, 'AllForProvider', use_resetter=False) toolbar.select('Grid View') else: # Search requires navigation, we shouldn't use it then use_search = False if refresh: sel.refresh() if not paginator.page_controls_exist(): if self.is_vm: raise VmOrInstanceNotFound("VM '{}' not found in UI!".format(self.name)) else: raise TemplateNotFound("Template '{}' not found in UI!".format(self.name)) paginator.results_per_page(1000) if use_search: try: if not search.has_quick_search_box(): # TODO rework search for archived/orphaned VMs if self.is_vm: navigate_to(self, 'AllForProvider', use_resetter=False) else: navigate_to(self, 'AllForProvider', use_resetter=False) search.normal_search(self.name) except Exception as e: logger.warning("Failed to use search: %s", str(e)) for page in paginator.pages(): if sel.is_displayed(quadicon, move_to=True): if mark: sel.check(quadicon.checkbox()) return quadicon else: raise VmOrInstanceNotFound("VM '{}' not found in UI!".format(self.name))
def insert_one(self, proxy): '''插入一条数据到集合''' # 查看此代理IP是否存在集合中 count = self.proxies.count_documents({"_id": proxy.ip}) if count == 0: # 集合中不存在此代理 # 使用proxy.ip作为MongoDB数据库集合的主键: _id dic = proxy.__dict__ # 转换proxy对象数据为字典类型数据 dic["_id"] = proxy.ip self.proxies.insert_one(dic) logger.info("插入代理IP:{}".format(proxy.ip)) else: logger.warning("已经存在的代理IP:{}".format(proxy.ip))
def _read_xml(self, filename): """ Read XML :param filename: :return: """ path = os.path.join(self.rules_path, filename) try: tree = eT.parse(path) return tree.getroot() except Exception as e: logger.warning('parse xml failed ({file})'.format(file=path)) return None
def insert_one(self, proxy): """实现代理数据插入功能""" # 检测需要插入的代理是否已存在 count = self.proxies.count_documents({'_id': proxy.ip}) if count == 0: # 使用proxy.ip作为Mongodb中数据的主键:_id dic = proxy.__dict__ dic['_id'] = proxy.ip self.proxies.insert_one(dic) logger.info('插入新代理:{}'.format(proxy)) else: logger.warning('已存在代理:{}'.format(proxy))
def dismiss_any_alerts(): """Loops until there are no further alerts present to dismiss. Useful for handling the cases where the alert pops up multiple times. """ try: while is_alert_present(): alert = get_alert() logger.warning("Dismissing additional alert with text: %s", alert.text) alert.dismiss() except NoAlertPresentException: # Just in case. is_alert_present should be reliable, but still. pass
def run_command(self, *args, **kwargs): logger.info('{} - Running SSH Command#{} : {}'.format( self.hostname, self._command_counter, args[0])) results = self.ssh_client.run_command(*args, **kwargs) results_short = results[:max((self.log_line_limit, len(results)))] if results.success: logger.info('{} - Command#{} - Succeed: {}'.format( self.hostname, self._command_counter, results_short)) else: logger.warning('{} - Command#{} - Failed: {}'.format( self.hostname, self._command_counter, results_short)) self._command_counter += 1 return results
def catalog_item(dialog, catalog): cat_item = CatalogItem(item_type="Generic", name='test_item_' + fauxfactory.gen_alphanumeric(), description="my catalog item", display_in=True, catalog=catalog, dialog=dialog) yield cat_item # fixture cleanup try: cat_item.delete() except NoSuchElementException: logger.warning('test_catalog_item: catalog_item yield fixture cleanup, catalog item "{}" ' 'not found'.format(cat_item.name))
def catalog(): catalog_name = "test_cat_" + fauxfactory.gen_alphanumeric() cat = Catalog(name=catalog_name, description="my catalog") cat.create() yield cat # fixture cleanup try: cat.delete() except NoSuchElementException: logger.warning( 'test_catalog_item: catalog yield fixture cleanup, catalog "{}" not ' 'found'.format(catalog_name))
def find_elements(self, *args): """ 重新封装查找元素,智能等待 查找到元素后进行下一步,时间20s """ try: WebDriverWait(self.driver, 30).until( EC.visibility_of_all_elements_located(args)) time.sleep(1) return self.driver.find_elements(*args) except Exception as msg: logger.warning("元素查找失败:%s" % msg) return None
def get_content(self, filepath): filepath = os.path.normpath(self.get_path(filepath)) if filepath in self.pre_result: f = codecs.open(filepath, 'r+', encoding='utf-8', errors='ignore') content = f.read() f.close() return content else: logger.warning("[AST] file {} parser not found...".format(filepath)) return False
def get_child_files(self, filepath): filepath = os.path.normpath(filepath) if filepath in self.pre_result and "child_files" in self.pre_result[filepath]: return self.pre_result[filepath]['child_files'] elif os.path.join(self.target_directory, filepath) in self.pre_result and "child_files" in self.pre_result[ os.path.join(self.target_directory, filepath)]: return self.pre_result[os.path.join(self.target_directory, filepath)]['child_files'] else: logger.warning("[AST] file {} object or child files not found...".format(filepath)) return False