def coverage_report_jenkins(jenkins_url, jenkins_jobs, jenkins_user, jenkins_token, appliance_ip, appliance_version, wave_size): """Aggregate coverage data from jenkins job(s) and upload to sonarqube""" if appliance_ip is None and appliance_version is None: ValueError('Must specify either --appliance-ip or --find-appliance') if appliance_ip is not None and appliance_version is not None: ValueError('--appliance-ip and --find-appliance are mutually exclusive options') # Find appliance using sprout if asked to do so: if appliance_version is not None: # TODO: Upstream support group = 'downstream-{}z'.format(''.join(appliance_version.split('.')[:2])) sprout = SproutClient.from_config() logger.info('requesting an appliance from sprout for %s/%s', group, appliance_version) pool_id = sprout.request_appliances( group, version=appliance_version, lease_time=env.sonarqube.scanner_lease) logger.info('Requested pool %s', pool_id) result = None try: while not result or not (result['fulfilled'] and result['finished']): result = sprout.request_check(pool_id) appliance_ip = result['appliances'][0]['ip_address'] logger.info('Received an appliance with IP address: %s', appliance_ip) with IPAppliance(hostname=appliance_ip) as appliance: exit(aggregate_coverage( appliance, jenkins_url, jenkins_user, jenkins_token, jenkins_jobs, wave_size)) finally: with diaper: sprout.destroy_pool(pool_id) else: # Use and existing appliance. with IPAppliance(hostname=appliance_ip) as appliance: exit(aggregate_coverage( appliance, jenkins_url, jenkins_user, jenkins_token, jenkins_jobs, wave_size))
def main(): parser = argparse.ArgumentParser( epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('--address', help='hostname or ip address of target appliance', default=None) parser.add_argument('--sdk_url', help='url to download sdk pkg', default=cfme_data.get("basic_info", {}).get("netapp_sdk_url")) parser.add_argument('--restart', help='restart evmserverd after installation ' + '(required for proper operation)', action="store_true") args = parser.parse_args() if not args.address: appliance = get_or_create_current_appliance() else: appliance = IPAppliance(address=args.address) print('Address: {}'.format(appliance.address)) print('SDK URL: {}'.format(args.sdk_url)) print('Restart: {}'.format(args.restart)) appliance.install_netapp_sdk(sdk_url=args.sdk_url, reboot=args.restart, log_callback=log)
def test_ipappliance_use_baseurl(): ip_a = IPAppliance() ip_a_parsed = urlparse(ip_a.url) env_parsed = urlparse(store.base_url) assert (ip_a_parsed.scheme, ip_a_parsed.netloc) == (env_parsed.scheme, env_parsed.netloc) assert ip_a.address in store.base_url
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', nargs='?', default=None, help='hostname or ip address of target appliance') parser.add_argument('db_address', help='hostname or ip address of external database') parser.add_argument('--database', default='vmdb_production', help='name of the external database') parser.add_argument('--region', default=0, type=int, help='region to assign to the new DB') parser.add_argument('--username', default=credentials['database']['username'], help='username for external database') parser.add_argument('--password', default=credentials['database']['password'], help='password for external database') args = parser.parse_args() print('Initializing Appliance External DB') ip_a = IPAppliance(hostname=args.address) status, out = ip_a.db.enable_external(args.db_address, args.region, args.database, args.username, args.password) if status != 0: print('Enabling DB failed with error:') print(out) sys.exit(1) else: print('DB Enabled, evm watchdog should start the UI shortly.')
def call_appliance(ip_address, action, args, kwargs): # Given a provider class, find the named method and call it with # *args. This could possibly be generalized for other CLI tools. target_obj = IPAppliance(hostname=ip_address) fields_to_traverse, action = action.split('.')[:-1], action.split('.')[-1] # Iterate over non-callables, such as appliance.db for field in fields_to_traverse: try: target_obj = getattr(target_obj, field) except AttributeError: raise Exception( 'Field "{}" not found for object "{}"'.format(field, target_obj)) try: call = getattr(target_obj, action) except AttributeError: raise Exception('Action "{}" not found'.format(action)) # The final obj may or may not be a callable if not callable(call): return call else: try: argspec = inspect.getargspec(call) except TypeError: return call(*args, **kwargs) else: if argspec.keywords is not None or 'log_callback' in argspec.args: kwargs['log_callback'] = generate_log_callback(ip_address) return call(*args, **kwargs)
def main(): parser = argparse.ArgumentParser( epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('--address', help='hostname or ip address of target appliance', default=None) parser.add_argument('--vddk_url', help='url to download vddk pkg') parser.add_argument('--reboot', help='reboot after installation ' + '(required for proper operation)', action="store_true") parser.add_argument('--force', help='force installation if version detected', action="store_true") args = parser.parse_args() if not args.address: appliance = get_or_create_current_appliance() else: appliance = IPAppliance(hostname=urlparse(args.address).netloc) appliance.install_vddk(reboot=args.reboot, force=args.force, vddk_url=args.vddk_url, log_callback=log)
def get_appliance(appliance_ip): """Checks an appliance is not None and if so, loads the appropriate things""" from cfme.utils.appliance import IPAppliance, get_or_create_current_appliance if not appliance_ip: app = get_or_create_current_appliance() else: app = IPAppliance(hostname=appliance_ip) return app
def collection_appliance(self): # if parallelized, this is decided in sessionstart and written to the conf if store.parallelizer_role == 'slave': from cfme.utils.appliance import IPAppliance return IPAppliance(conf['.ui-coverage']['collection_appliance']) else: # otherwise, coverage only happens on one appliance return store.current_appliance
def get_appliance(appliance_ip): """Checks an appliance is not None and if so, loads the appropriate things""" from cfme.utils.appliance import IPAppliance, load_appliances_from_config, stack if not appliance_ip: app = load_appliances_from_config(env)[0] else: app = IPAppliance(hostname=appliance_ip) stack.push(app) # ensure safety from bad code, phase out later return app
def test_ipappliance_use_baseurl(appliance): if isinstance(appliance, DummyAppliance): pytest.xfail("Dummy appliance cant provide base_url") ip_a = IPAppliance() ip_a_parsed = urlparse(ip_a.url) env_parsed = urlparse(store.base_url) assert (ip_a_parsed.scheme, ip_a_parsed.netloc) == (env_parsed.scheme, env_parsed.netloc) assert ip_a.address in store.base_url
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance', nargs='?', default=None) parser.add_argument('--with_ssl', help='update for ssl connections', action="store_true") args = parser.parse_args() ip_a = IPAppliance(args.address) return ip_a.loosen_pgssl()
def main(): parser = argparse.ArgumentParser( epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', nargs="?", default=None, help='hostname or ip address of target appliance') args = parser.parse_args() ip_a = IPAppliance(hostname=args.address) ip_a.fix_ntp_clock() print("Time was set")
def setup_replication_env(cfme_version, provider, lease, sprout_poolid, desc): lease_time = tot_time(lease) """Multi appliance setup with multi region and replication from remote to global""" required_app_count = 2 sprout_client = SproutClient.from_config() if sprout_poolid: if sprout_client.call_method('pool_exists', sprout_poolid): sprout_pool = sprout_client.call_method('request_check', sprout_poolid) if len(sprout_pool['appliances']) >= required_app_count: print("Processing pool...") apps = [] for app in sprout_pool['appliances']: apps.append(IPAppliance(app['ip_address'])) sprout_client.set_pool_description(sprout_poolid, desc) else: sys.exit("Pool does not meet the minimum size requirements!") else: sys.exit("Pool not found!") else: print("Provisioning appliances") apps, request_id = provision_appliances(count=required_app_count, cfme_version=cfme_version, provider=provider, lease_time=lease_time) print("Appliance pool lease time is {}".format(lease)) sprout_client.set_pool_description(request_id, desc) print("Configuring replicated environment") ip0 = apps[0].hostname ip1 = apps[1].hostname opt = '5' if cfme_version >= "5.8" else '8' command_set0 = ('ap', '', opt, '1', '1', 'y', '1', 'n', '99', pwd, TimedCommand(pwd, 360), '') apps[0].appliance_console.run_commands(command_set0) apps[0].wait_for_evm_service() apps[0].wait_for_web_ui() print("Global region appliance provisioned and configured {}".format(ip0)) command_set1 = ('ap', '', opt, '2', ip0, '', pwd, '', '1', 'y', '1', 'n', '1', pwd, TimedCommand(pwd, 360), '') apps[1].appliance_console.run_commands(command_set1) apps[1].wait_for_evm_service() apps[1].wait_for_web_ui() print("Remote region appliance provisioned and configured {}".format(ip1)) print("Setup - Replication on remote appliance") apps[1].set_pglogical_replication(replication_type=':remote') print("Setup - Replication on global appliance") apps[0].set_pglogical_replication(replication_type=':global') apps[0].add_pglogical_replication_subscription(apps[1].hostname) print("Done!")
def provision_appliances(self, count=1, preconfigured=False, version=None, stream=None, provider=None, provider_type=None, lease_time=60, ram=None, cpu=None, **kwargs): # provisioning may take more time than it is expected in some cases wait_time = kwargs.get('wait_time', 900) # If we specify version, stream is ignored because we will get that specific version if version: stream = get_stream(version) # If we specify stream but not version, sprout will give us latest version of that stream elif stream: pass # If we dont specify either, we will get the same version as current appliance else: stream = get_stream(current_appliance.version) version = current_appliance.version.vstring request_id = self.call_method('request_appliances', preconfigured=preconfigured, version=version, provider_type=provider_type, group=stream, provider=provider, lease_time=lease_time, ram=ram, cpu=cpu, count=count, **kwargs) wait_for(lambda: self.call_method('request_check', str(request_id))[ 'finished'], num_sec=wait_time, message='provision {} appliance(s) from sprout'.format(count)) data = self.call_method('request_check', str(request_id)) logger.debug(data) appliances = [] for appliance in data['appliances']: app_args = { 'hostname': appliance['ip_address'], 'project': appliance['project'], 'container': appliance['container'], 'db_host': appliance['db_host'] } appliances.append(IPAppliance(**app_args)) return appliances, request_id
def main(): parser = argparse.ArgumentParser( epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('hostname', help='hostname or ip address of target appliance') parser.add_argument('start', action="store_true", default=False, help='Start Merkyl?') args = parser.parse_args() ip_a = IPAppliance(hostname=args.hostname) ip_a.deploy_merkyl(args.start)
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('hostname', nargs='?', default=None, help='hostname or ip address of target appliance') parser.add_argument('source', nargs='?', default='ManageIQ', help='Source Domain name') parser.add_argument('dest', nargs='?', default='Default', help='Destination Domain name') args = parser.parse_args() ip_a = IPAppliance(hostname=args.hostname) status, out = ip_a.clone_domain(args.source, args.dest) return status
def main(): parser = argparse.ArgumentParser( epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', nargs='?', default=None, help='hostname or ip address of target appliance') args = parser.parse_args() ip_a = IPAppliance(hostname=args.address) status = ip_a.precompile_assets() if status == 0: ip_a.evmserverd.restart() print("EVM service restarted, UI should be available shortly") return status
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('url', nargs='?', default=None, help='URL of target appliance, e.g. "https://ip_or_host/"') parser.add_argument('--num-sec', default=600, type=int, dest='num_sec', help='Maximum number of seconds to wait before giving up, default 600 (10 minutes)') args = parser.parse_args() if args.url: ip_a = IPAppliance.from_url(args.url) else: ip_a = IPAppliance() result = ip_a.wait_for_web_ui(timeout=args.num_sec) if not result: return 1
def temp_extdb_pod_appliance(appliance, provider, extdb_template, template_tags, create_external_database, appliance_data): db_host, db_name = create_external_database project = 'test-pod-extdb-{t}'.format( t=fauxfactory.gen_alphanumeric().lower()) provision_data = { 'template': extdb_template['name'], 'tags': template_tags, 'vm_name': project, 'template_params': { 'DATABASE_IP': db_host, 'DATABASE_NAME': db_name }, 'running_pods': set(provider.mgmt.required_project_pods) - {'postgresql'} } try: data = provider.mgmt.deploy_template(**provision_data) params = appliance_data.copy() params['db_host'] = data['external_ip'] params['project'] = project params['hostname'] = data['url'] def is_api_available(appliance): try: return appliance.rest_api.collections.providers.all except Exception: pass with IPAppliance(**params) as appliance: # framework will try work with default appliance if browser restarts w/o this # workaround appliance.is_pod = True stack.push(appliance) holder = config.pluginmanager.get_plugin(PLUGIN_KEY) holder.held_appliance = appliance # workaround, appliance looks ready but api may return errors wait_for(is_api_available, func_args=[appliance], num_sec=30) yield appliance stack.pop() finally: if provider.mgmt.does_vm_exist(project): provider.mgmt.delete_vm(project)
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') parser.add_argument('--region', default=0, type=int, help='region to assign to the new DB') args = parser.parse_args() print('Initializing Appliance Internal DB') ip_a = IPAppliance(args.address) status, out = ip_a.db.enable_internal(args.region) if status != 0: print('Enabling DB failed with error:') print(out) sys.exit(1) else: print('DB Enabled, evm watchdog should start the UI shortly.')
def call_appliance(ip_address, action, args, kwargs): # Given a provider class, find the named method and call it with # *args. This could possibly be generalized for other CLI tools. appliance = IPAppliance(ip_address) try: call = getattr(appliance, action) except AttributeError: raise Exception('Action "{}" not found'.format(action)) if isinstance(getattr(type(appliance), action), property): return call else: try: argspec = inspect.getargspec(call) except TypeError: return call(*args, **kwargs) else: if argspec.keywords is not None or 'log_callback' in argspec.args: kwargs['log_callback'] = generate_log_callback(ip_address) return call(*args, **kwargs)
def main(): parser = argparse.ArgumentParser( epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') parser.add_argument("-u", "--url", help="url(s) to use for update", dest="urls", action="append") parser.add_argument( "-c", "--cleanup", help="Whether to cleanup /etc/yum.repos.d before start", dest="cleanup", action="store_true") parser.add_argument("--no_wait_ui", help="Whether to NOT wait for UI after reboot", dest="no_wait_ui", action="store_false") parser.add_argument('--reboot', help='reboot after installation ' + '(required for proper operation)', action="store_true", default=False) args = parser.parse_args() ip_a = IPAppliance(hostname=args.address) # Don't reboot here, so we can print updates to the console when we do res = ip_a.update_rhel(*args.urls, reboot=False, streaming=True, cleanup=args.cleanup) if res.rc == 0: if args.reboot: print('Rebooting') ip_a.reboot(wait_for_miq_ready=args.no_wait_ui) print('Appliance update complete') return res.rc
def provision_appliances(self, count=1, preconfigured=False, version=None, stream=None, provider=None, provider_type=None, lease_time=120, ram=None, cpu=None): # If we specify version, stream is ignored because we will get that specific version if version: stream = get_stream(version) # If we specify stream but not version, sprout will give us latest version of that stream elif stream: pass # If we dont specify either, we will get the same version as current appliance else: stream = get_stream(current_appliance.version) version = current_appliance.version.vstring request_id = self.call_method('request_appliances', preconfigured=preconfigured, version=version, provider_type=provider_type, group=stream, provider=provider, lease_time=lease_time, ram=ram, cpu=cpu, count=count) wait_for(lambda: self.call_method('request_check', str(request_id))[ 'finished'], num_sec=300, message='provision {} appliance(s) from sprout'.format(count)) data = self.call_method('request_check', str(request_id)) logger.debug(data) appliances = [] for appliance in data['appliances']: appliances.append(IPAppliance(hostname=appliance['ip_address'])) return appliances, request_id
def claim_appliance_and_aggregate(jenkins_url, jenkins_jobs, version, jenkins_user, jenkins_token, wave_size): # TODO: Upstream support group = 'downstream-' + ''.join(version.split('.')[:2]) + 'z' sprout = SproutClient.from_config() logger.info('requesting an appliance from sprout for %s/%s', group, version) pool_id = sprout.request_appliances(group, version=version, lease_time=env.sonarqube.scanner_lease) logger.info('Requested pool %s', pool_id) result = None try: while not result or not (result['fulfilled'] and result['finished']): result = sprout.request_check(pool_id) appliance_ip = result['appliances'][0]['ip_address'] logger.info('Received an appliance with IP address: %s', appliance_ip) with IPAppliance(hostname=appliance_ip) as appliance: exit( aggregate_coverage(appliance, jenkins_url, jenkins_user, jenkins_token, jenkins_jobs, wave_size)) finally: with diaper: sprout.destroy_pool(pool_id)
def pytest_configure(config): if config.getoption("appliances"): return if not config.getoption('--use-sprout'): return provision_request = SproutProvisioningRequest.from_config(config) mgr = config._sprout_mgr = SproutManager() requested_appliances = mgr.request_appliances(provision_request) config.option.appliances[:] = [] appliances = config.option.appliances # Push an appliance to the stack to have proper reference for test collection # FIXME: this is a bad hack based on the need for controll of collection partitioning appliance_stack.push( IPAppliance(address=requested_appliances[0]["ip_address"])) log.info("Appliances were provided:") for appliance in requested_appliances: url = "https://{}/".format(appliance["ip_address"]) appliances.append(url) log.info("- %s is %s", url, appliance['name']) mgr.reset_timer() # Set the base_url for collection purposes on the first appliance conf.runtime["env"]["base_url"] = appliances[0] # Retrieve and print the template_name for Jenkins to pick up template_name = requested_appliances[0]["template_name"] conf.runtime["cfme_data"]["basic_info"][ "appliance_template"] = template_name log.info("appliance_template: %s", template_name) with project_path.join('.appliance_template').open('w') as template_file: template_file.write( 'export appliance_template="{}"'.format(template_name)) log.info("Sprout setup finished.") config.pluginmanager.register(ShutdownPlugin())
def setup_multiregion_env(cfme_version, provider_type, provider, lease, sprout_poolid, desc, remote_nodes, add_prov): lease_time = tot_time(lease) provider_type = None if provider else provider_type """Multi appliance setup with multi region and replication from remote to global""" sprout_client = SproutClient.from_config() required_app_count = 1 # global app required_app_count += remote_nodes if sprout_poolid: if sprout_client.call_method('pool_exists', sprout_poolid): sprout_pool = sprout_client.call_method('request_check', sprout_poolid) if len(sprout_pool['appliances']) >= required_app_count: print("Processing pool...") apps = [] for app in sprout_pool['appliances']: apps.append(IPAppliance(app['ip_address'])) sprout_client.set_pool_description(sprout_poolid, desc) else: sys.exit("Pool does not meet the minimum size requirements!") else: sys.exit("Pool not found!") else: print("Provisioning appliances") apps, request_id = provision_appliances(count=required_app_count, cfme_version=cfme_version, provider_type=provider_type, provider=provider, lease_time=lease_time) print("Appliance pool lease time is {}".format(lease)) sprout_client.set_pool_description(request_id, desc) print("Appliances Provisioned") print("Configuring Replicated Environment") global_app = apps[0] gip = global_app.hostname remote_apps = apps[1:] print("Global Appliance Configuration") app_creds = { "username": credentials["database"]["username"], "password": credentials["database"]["password"], "sshlogin": credentials["ssh"]["username"], "sshpass": credentials["ssh"]["password"], } app_params = dict(region=99, dbhostname='localhost', username=app_creds['username'], password=app_creds['password'], dbname='vmdb_production', dbdisk=global_app.unpartitioned_disks[0]) global_app.appliance_console_cli.configure_appliance_internal(**app_params) global_app.evmserverd.wait_for_running() global_app.wait_for_web_ui() print("Done: Global @ {}".format(gip)) for num, app in enumerate(remote_apps): region_n = str((num + 1) * 10) print("Remote Appliance Configuration") app_params = dict(region=region_n, dbhostname='localhost', username=app_creds['username'], password=app_creds['password'], dbname='vmdb_production', dbdisk=app.unpartitioned_disks[0], fetch_key=gip, sshlogin=app_creds['sshlogin'], sshpass=app_creds['sshpass']) app.appliance_console_cli.configure_appliance_internal_fetch_key( **app_params) app.evmserverd.wait_for_running() app.wait_for_web_ui() print("Done: Remote @ {}, region: {}".format(app.hostname, region_n)) print("Configuring Replication") print("Setup - Replication on remote appliance") app.set_pglogical_replication(replication_type=':remote') print("Setup - Replication on global appliance") global_app.set_pglogical_replication(replication_type=':global') for app in remote_apps: global_app.add_pglogical_replication_subscription(app.hostname) random.shuffle(remote_apps) if add_prov: for app, prov_id in zip(cycle(remote_apps), add_prov): stack.push(app) prov = get_crud(prov_id) print("Adding provider {} to appliance {}".format( prov_id, app.hostname)) prov.create_rest() stack.pop() print("Done!")
def test_ipappliance_from_address(): address = '1.2.3.4' ip_a = IPAppliance(address) assert ip_a.address == address assert ip_a.url == 'https://{}/'.format(address)
def test_ipappliance_managed_providers(infra_provider): ip_a = IPAppliance() assert infra_provider in ip_a.managed_known_providers
def temp_pod_ansible_appliance(provider, appliance_data, template_tags): tags = template_tags params = appliance_data.copy() project = 'test-pod-ansible-{t}'.format( t=fauxfactory.gen_alphanumeric().lower()) try: with ssh.SSHClient( hostname=params['openshift_creds']['hostname'], username=params['openshift_creds']['ssh']['username'], password=params['openshift_creds']['ssh']['password'], oc_username=params['openshift_creds']['username'], oc_password=params['openshift_creds']['password'], project=project, is_pod=True) as ssh_client: # copying ansible configuration file to openshift server fulfilled_config = ansible_config.format( host=provider.provider_data['hostname'], subdomain=provider.provider_data['base_url'], proj=project, app_ui_url=tags['cfme-openshift-app-ui']['url'], app_ui_tag=tags['cfme-openshift-app-ui']['tag'], app_url=tags['cfme-openshift-app']['url'], app_tag=tags['cfme-openshift-app']['tag'], ansible_url=tags['cfme-openshift-embedded-ansible']['url'], ansible_tag=tags['cfme-openshift-embedded-ansible']['tag'], httpd_url=tags['cfme-openshift-httpd']['url'], httpd_tag=tags['cfme-openshift-httpd']['tag'], memcached_url=tags['cfme-openshift-memcached']['url'], memcached_tag=tags['cfme-openshift-memcached']['tag'], db_url=tags['cfme-openshift-postgresql']['url'], db_tag=tags['cfme-openshift-postgresql']['tag']) logger.info( "ansible config file:\n {conf}".format(conf=fulfilled_config)) with tempfile.NamedTemporaryFile('w') as f: f.write(fulfilled_config) f.flush() os.fsync(f.fileno()) remote_file = os.path.join('/tmp', f.name) ssh_client.put_file(f.name, remote_file, ensure_host=True) # run ansible deployment ansible_cmd = ('/usr/bin/ansible-playbook -v -i {inventory_file} ' '/usr/share/ansible/openshift-ansible/playbooks/' 'openshift-management/config.yml').format( inventory_file=remote_file) cmd_result = ssh_client.run_command(ansible_cmd, ensure_host=True) logger.info(u"deployment result: {result}".format( result=cmd_result.output)) ssh_client.run_command('rm -f {f}'.format(f=remote_file)) assert cmd_result.success # retrieve data of created appliance assert provider.mgmt.is_vm_running( project), "Appliance was not deployed correctly" params['db_host'] = provider.mgmt.expose_db_ip(project) params['project'] = project params['hostname'] = provider.mgmt.get_appliance_url(project) # create instance of appliance with IPAppliance(**params) as appliance: # framework will try work with default appliance if browser restarts w/o this # workaround holder = config.pluginmanager.get_plugin(PLUGIN_KEY) holder.held_appliance = appliance yield appliance finally: if provider.mgmt.does_vm_exist(project): provider.mgmt.delete_vm(project)
parser = argparse.ArgumentParser() parser.add_argument('jenkins_url') parser.add_argument('jenkins_job_name') parser.add_argument('version') parser.add_argument('--jenkins-user', default=None) parser.add_argument('--jenkins-token', default=None) args = parser.parse_args() # TODO: Upstream support group = 'downstream-' + ''.join(args.version.split('.')[:2]) + 'z' sprout = SproutClient.from_config() logger.info('requesting an appliance from sprout for %s/%s', group, args.version) pool_id = sprout.request_appliances(group, version=args.version, lease_time=env.sonarqube.scanner_lease) logger.info('Requested pool %s', pool_id) result = None try: while not result or not (result['fulfilled'] and result['finished']): result = sprout.request_check(pool_id) appliance_ip = result['appliances'][0]['ip_address'] logger.info('received an appliance with IP address: %s', appliance_ip) with IPAppliance(hostname=appliance_ip) as appliance: exit( coverage_report_jenkins(appliance, args.jenkins_url, args.jenkins_user, args.jenkins_token, args.jenkins_job_name)) finally: with diaper: sprout.destroy_pool(pool_id)