def pytest_configure(config): if store.parallelizer_role == 'master' or trackerbot.conf.get('url') is None: return # A further optimization here is to make the calls to trackerbot per provider # and perhaps only pull the providers that are needed, however that will need # to ensure that the tests that just randomly use providers adhere to the filters # which may be too tricky right now. count = 0 if not config.getoption('use_template_cache'): store.terminalreporter.line("Loading templates from trackerbot...", green=True) provider_templates = trackerbot.provider_templates(trackerbot.api()) for provider in list_provider_keys(): TEMPLATES[provider] = provider_templates.get(provider, []) config.cache.set('miq-trackerbot/{}'.format(provider), TEMPLATES[provider]) count += len(TEMPLATES[provider]) else: store.terminalreporter.line("Using templates from cache...", green=True) provider_templates = None for provider in list_provider_keys(): templates = config.cache.get('miq-trackerbot/{}'.format(provider), None) if templates is None: store.terminalreporter.line( "Loading templates for {} from source as not in cache".format( provider), green=True) if not provider_templates: provider_templates = trackerbot.provider_templates(trackerbot.api()) templates = provider_templates.get(provider, []) config.cache.set('miq-trackerbot/{}'.format(provider), templates) count += len(templates) TEMPLATES[provider] = templates store.terminalreporter.line(" Loaded {} templates successfully!".format(count), green=True)
def extdb_template(appliance): try: api = trackerbot.api() stream = get_stream(appliance.version.vstring) template_data = trackerbot.latest_template(api, stream) return api.template(template_data['latest_template'] + '-extdb').get() except BaseException: pytest.skip("trackerbot is unreachable")
def template_tags(appliance): try: api = trackerbot.api() stream = get_stream(appliance.version.vstring) template_data = trackerbot.latest_template(api, stream) template = api.template(template_data['latest_template']).get() # TODO: fix this in trackerbot by adding appropriate serialization to Template tags = json.loads(template['custom_data'].replace("'", '"').replace('u"', '"')) return tags['TAGS'] except BaseException: pytest.skip("trackerbot is unreachable")
def template_tags(appliance): try: api = trackerbot.api() stream = get_stream(appliance.version.vstring) template_data = trackerbot.latest_template(api, stream) template = api.template(template_data['latest_template']).get() # TODO: fix this in trackerbot by adding appropriate serialization to Template tags = json.loads(template['custom_data'].replace("'", '"').replace( 'u"', '"')) return tags['TAGS'] except BaseException: pytest.skip("trackerbot is unreachable")
def main(**kwargs): # get_mgmt validates, since it will explode without an existing key or type if kwargs.get('deploy'): kwargs['configure'] = True kwargs['outfile'] = 'appliance_ip_address_1' providers = provider_data['management_systems'] provider_dict = provider_data['management_systems'][kwargs['provider']] credentials =\ {'username': provider_dict['username'], 'password': provider_dict['password'], 'tenant': provider_dict['template_upload'].get('tenant_admin', 'admin'), 'auth_url': provider_dict.get('auth_url'), } provider = get_mgmt(kwargs['provider'], providers=providers, credentials=credentials) provider_type = provider_data['management_systems'][kwargs['provider']]['type'] deploy_args = { 'vm_name': kwargs['vm_name'], 'template': kwargs['template'], } else: provider = get_mgmt(kwargs['provider']) provider_dict = cfme_data['management_systems'][kwargs['provider']] provider_type = provider_dict['type'] deploy_args = { 'vm_name': kwargs['vm_name'], 'template': kwargs['template'], } yaml_flavor = [ provider_dict.get('sprout', {}).get('flavor_name') or provider_dict.get('provisioning', {}).get('instance_type') or provider_dict.get('template_upload', {}).get('flavor_name') ] # None if none of them are set logger.info('Connecting to %s', kwargs['provider']) if kwargs.get('destroy'): # TODO: destroy should be its own script # but it's easy enough to just hijack the parser here # This returns True if destroy fails to give POSIXy exit codes (0 is good, False is 0, etc) return not destroy_vm(provider, deploy_args['vm_name']) # Try to snag defaults from cfme_data here for each provider type if provider_type == 'rhevm': cluster = provider_dict.get('default_cluster', kwargs.get('cluster')) if cluster is None: raise Exception('--cluster is required for rhev instances and default is not set') deploy_args['cluster'] = cluster if kwargs.get('place_policy_host') and kwargs.get('place_policy_aff'): deploy_args['placement_policy_host'] = kwargs['place_policy_host'] deploy_args['placement_policy_affinity'] = kwargs['place_policy_aff'] elif provider_type == 'ec2': # ec2 doesn't have an api to list available flavors, so the first flavor is the default try: # c3.xlarge has 4 CPU cores and 7.5GB RAM - minimal requirements for CFME Appliance flavor = kwargs.get('flavor', 'c3.xlarge') except IndexError: raise Exception('--flavor is required for EC2 instances and default is not set') deploy_args['instance_type'] = flavor deploy_args['key_name'] = "shared" # we want to override default cloud-init which disables root login and password login cloud_init_dict = { 'chpasswd': { 'expire': False, 'list': '{}:{}\n'.format(cred['ssh']['username'], cred['ssh']['password']) }, 'disable_root': False, 'ssh_pwauth': True } cloud_init = "#cloud-config\n{}".format(yaml.safe_dump(cloud_init_dict, default_flow_style=False)) deploy_args['user_data'] = cloud_init elif provider_type == 'openstack': # filter openstack flavors based on what's available available_flavors = provider.list_flavor() logger.info("Available flavors on provider: %s", available_flavors) generic_flavors = [f for f in yaml_flavor if f in available_flavors] try: # TODO py3 filter needs next() instead of indexing flavor = (kwargs.get('flavor', yaml_flavor) or generic_flavors[0]) except IndexError: raise Exception('flavor is required for RHOS instances and ' 'default is not set or unavailable on provider') logger.info('Selected flavor: %s', flavor) deploy_args['flavor_name'] = flavor network_name = (kwargs.get('network_name') or provider_dict.get('sprout', {}).get('network_name')) logger.info('Selected Network: %s', network_name) if network_name is not None: deploy_args['network_name'] = network_name provider_pools = [p.name for p in provider.api.floating_ip_pools.list()] try: # TODO: If there are multiple pools, have a provider default in cfme_data floating_ip_pool = kwargs.get('floating_ip_pool') or provider_pools[0] except IndexError: raise Exception('No floating IP pools available on provider') if floating_ip_pool is not None: logger.info('Selected floating ip pool: %s', floating_ip_pool) deploy_args['floating_ip_pool'] = floating_ip_pool elif provider_type == "virtualcenter": if "allowed_datastores" in provider_dict: deploy_args["allowed_datastores"] = provider_dict["allowed_datastores"] elif provider_type == 'scvmm': deploy_args["host_group"] = provider_dict["provisioning"]['host_group'] elif provider_type == 'gce': deploy_args['ssh_key'] = '{user_name}:{public_key}'.format( user_name=cred['ssh']['ssh-user'], public_key=cred['ssh']['public_key']) elif provider_type == 'openshift': trackerbot = api() raw_tags = trackerbot.providertemplate().get(provider=kwargs['provider'], template=deploy_args['template'])['objects'] raw_tags = raw_tags[-1]['template'].get('custom_data', "{}") deploy_args["tags"] = yaml.safe_load(raw_tags)['TAGS'] # Do it! try: logger.info( 'Cloning %s to %s on %s', deploy_args['template'], deploy_args['vm_name'], kwargs['provider'] ) # TODO: change after openshift wrapanapi refactor output = None # 'output' is only used for openshift providers if isinstance(provider, Openshift): output = provider.deploy_template(**deploy_args) else: template = provider.get_template(deploy_args['template']) template.deploy(**deploy_args) except Exception as e: logger.exception(e) logger.error('template deploy failed') if kwargs.get('cleanup'): logger.info('attempting to destroy %s', deploy_args['vm_name']) destroy_vm(provider, deploy_args['vm_name']) return 12 if not provider.does_vm_exist(deploy_args['vm_name']): logger.error('provider.deploy_template failed without exception') return 12 # TODO: change after openshift wrapanapi refactor if isinstance(provider, Openshift): if provider.is_vm_running(deploy_args['vm_name']): logger.info('VM %s is running', deploy_args['vm_name']) else: logger.error('VM %s is not running', deploy_args['vm_name']) return 10 else: vm_mgmt = provider.get_vm(deploy_args['vm_name']) vm_mgmt.ensure_state(VmState.RUNNING, timeout='5m') if provider_type == 'gce': try: attach_gce_disk(vm_mgmt) except Exception: logger.exception("Failed to attach db disk") destroy_vm(provider, deploy_args['vm_name']) return 10 if provider_type == 'openshift': vm_ip = output['url'] else: try: vm_ip, _ = wait_for( find_pingable, func_args=[vm_mgmt], fail_condition=None, delay=5, num_sec=300 ) except TimedOutError: msg = 'Timed out waiting for reachable depot VM IP' logger.exception(msg) return 10 try: if kwargs.get('configure'): logger.info('Configuring appliance, this can take a while.') if kwargs.get('deploy'): app = IPAppliance(hostname=vm_ip) else: app_args = (kwargs['provider'], deploy_args['vm_name']) app_kwargs = {} if provider_type == 'openshift': ocp_creds = cred[provider_dict['credentials']] ssh_creds = cred[provider_dict['ssh_creds']] app_kwargs = { 'project': output['project'], 'db_host': output['external_ip'], 'container': 'cloudforms-0', 'hostname': vm_ip, 'openshift_creds': { 'hostname': provider_dict['hostname'], 'username': ocp_creds['username'], 'password': ocp_creds['password'], 'ssh': { 'username': ssh_creds['username'], 'password': ssh_creds['password'], }, } } app = Appliance.from_provider(*app_args, **app_kwargs) if provider_type == 'ec2': wait_for( cloud_init_done, func_args=[app], num_sec=600, handle_exception=True, delay=5) if provider_type == 'gce': app.configure_gce() elif provider_type == 'openshift': # openshift appliances don't need any additional configuration pass else: app.configure() logger.info('Successfully Configured the appliance.') except Exception as e: logger.exception(e) logger.error('Appliance Configuration Failed') if not kwargs.get('deploy'): app = Appliance.from_provider(kwargs['provider'], deploy_args['vm_name']) ssh_client = app.ssh_client() result = ssh_client.run_command('find /root/anaconda-post.log') if result.success: ssh_client.get_file('/root/anaconda-post.log', log_path.join('anaconda-post.log').strpath) ssh_client.close() destroy_vm(app.provider.mgmt, deploy_args['vm_name']) return 10 if kwargs.get('outfile') or kwargs.get('deploy'): # todo: to get rid of those scripts in jenkins or develop them from scratch with open(kwargs['outfile'], 'w') as outfile: if provider_type == 'openshift': output_data = { 'appliances': [ { 'project': output['project'], 'db_host': output['external_ip'], 'hostname': vm_ip, 'container': 'cloudforms-0', 'openshift_creds': { 'hostname': provider_dict['hostname'], 'username': ocp_creds['username'], 'password': ocp_creds['password'], 'ssh': { 'username': ssh_creds['username'], 'password': ssh_creds['password'], } }, }, ], } else: output_data = { 'appliances': [{'hostname': vm_ip}] } yaml_data = yaml.safe_dump(output_data, default_flow_style=False) outfile.write(yaml_data) # In addition to the outfile, drop the ip address on stdout for easy parsing print(yaml_data)
import fauxfactory import pika import requests from slumber.exceptions import HttpClientError import dockerbot from cfme.utils.appliance import Appliance from cfme.utils.conf import docker as docker_conf from cfme.utils.log import setup_logger from cfme.utils.trackerbot import api token = docker_conf.get('gh_token') owner = docker_conf.get('gh_owner') repo = docker_conf.get('gh_repo') tapi = api() CONT_LIMIT = docker_conf.get('workers') DEBUG = docker_conf.get('debug', False) logger, _ = setup_logger(logging.getLogger('prt')) # Disable pika logs logging.getLogger("pika").propagate = False def send_message_to_bot(msg): required_fields = set(['rabbitmq_url', 'gh_queue', 'gh_channel', 'gh_message_type']) if not required_fields.issubset(docker_conf.viewkeys()): logger.warn("Skipping - docker.yaml doesn't have {}".format(required_fields))
def main(trackerbot_url, mark_usable=None, selected_provider=None): api = trackerbot.api(trackerbot_url) thread_q = [] thread_lock = Lock() template_providers = defaultdict(list) all_providers = (set(list_provider_keys()) if not selected_provider else set(selected_provider)) unresponsive_providers = set() # Queue up list_template calls for provider_key in all_providers: ipaddress = cfme_data.management_systems[provider_key].get('ipaddress') if ipaddress and not net.is_pingable(ipaddress): continue thread = Thread(target=get_provider_templates, args=(provider_key, template_providers, unresponsive_providers, thread_lock)) thread_q.append(thread) thread.start() # Join the queued calls for thread in thread_q: thread.join() seen_templates = set() if mark_usable is None: usable = {} else: usable = {'usable': mark_usable} existing_provider_templates = [ pt['id'] for pt in trackerbot.depaginate( api, api.providertemplate.get())['objects'] ] # Find some templates and update the API for template_name, providers in template_providers.items(): template_name = str(template_name) template_info = TemplateName.parse_template(template_name) # Don't want sprout templates if template_info.group_name in ('sprout', 'rhevm-internal'): logger.info('Ignoring %s from group %s', template_name, template_info.group_name) continue seen_templates.add(template_name) group = trackerbot.Group(template_info.group_name, stream=template_info.stream) try: template = trackerbot.Template(template_name, group, template_info.datestamp) except ValueError: logger.exception('Failure parsing provider %s template: %s', provider_key, template_name) continue for provider_key in providers: provider = trackerbot.Provider(provider_key) if '{}_{}'.format(template_name, provider_key) in existing_provider_templates: logger.info('Template %s already tracked for provider %s', template_name, provider_key) continue try: trackerbot.mark_provider_template(api, provider, template, **usable) logger.info( 'Added %s template %s on provider %s (datestamp: %s)', template_info.group_name, template_name, provider_key, template_info.datestamp) except SlumberHttpBaseException: logger.exception('%s: exception marking template %s', provider, template) # Remove provider relationships where they no longer exist, skipping unresponsive providers, # and providers not known to this environment for pt in trackerbot.depaginate(api, api.providertemplate.get())['objects']: key, template_name = pt['provider']['key'], pt['template']['name'] if key not in template_providers[ template_name] and key not in unresponsive_providers: if key in all_providers: logger.info("Cleaning up template %s on %s", template_name, key) trackerbot.delete_provider_template(api, key, template_name) else: logger.info( "Skipping template cleanup %s on unknown provider %s", template_name, key) # Remove templates that aren't on any providers anymore for template in trackerbot.depaginate(api, api.template.get())['objects']: if not template['providers']: logger.info("Deleting template %s (no providers)", template['name']) api.template(template['name']).delete()
help= 'mark template as not usable (templates are marked usable by default') parse_mark.add_argument( '-d', '--diagnose', dest='diagnose', action='store_true', default=False, help='attempt to diagnose an unusable template and submit the result') parse_retest = subs.add_parser('retest', help='flag a tested template for retesting') parse_retest.set_defaults(func=retest) parse_retest.add_argument('provider_key') parse_retest.add_argument('template') args = parser.parse_args() api = trackerbot.api(args.trackerbot_url) func_map = { get: lambda: get(api, args.request_type), latest: lambda: latest(api, args.stream, args.provider_key), mark: lambda: mark(api, args.provider_key, args.template, args.usable, args. diagnose), retest: lambda: retest(api, args.provider_key, args.template), } sys.exit(func_map[args.func]())
def main(**kwargs): # get_mgmt validates, since it will explode without an existing key or type if kwargs.get('deploy'): kwargs['configure'] = True kwargs['outfile'] = 'appliance_ip_address_1' providers = provider_data['management_systems'] provider_dict = provider_data['management_systems'][kwargs['provider']] credentials =\ {'username': provider_dict['username'], 'password': provider_dict['password'], 'tenant': provider_dict['template_upload'].get('tenant_admin', 'admin'), 'auth_url': provider_dict.get('auth_url'), } provider = get_mgmt(kwargs['provider'], providers=providers, credentials=credentials) flavors = provider_dict['template_upload'].get('flavors', ['m1.medium']) provider_type = provider_data['management_systems'][kwargs['provider']]['type'] deploy_args = { 'vm_name': kwargs['vm_name'], 'template': kwargs['template'], } else: provider = get_mgmt(kwargs['provider']) provider_dict = cfme_data['management_systems'][kwargs['provider']] provider_type = provider_dict['type'] flavors = cfme_data['appliance_provisioning']['default_flavors'].get(provider_type, []) deploy_args = { 'vm_name': kwargs['vm_name'], 'template': kwargs['template'], } logger.info('Connecting to %s', kwargs['provider']) if kwargs.get('destroy'): # TODO: destroy should be its own script # but it's easy enough to just hijack the parser here # This returns True if destroy fails to give POSIXy exit codes (0 is good, False is 0, etc) return not destroy_vm(provider, deploy_args['vm_name']) # Try to snag defaults from cfme_data here for each provider type if provider_type == 'rhevm': cluster = provider_dict.get('default_cluster', kwargs.get('cluster')) if cluster is None: raise Exception('--cluster is required for rhev instances and default is not set') deploy_args['cluster'] = cluster if kwargs.get('place_policy_host') and kwargs.get('place_policy_aff'): deploy_args['placement_policy_host'] = kwargs['place_policy_host'] deploy_args['placement_policy_affinity'] = kwargs['place_policy_aff'] elif provider_type == 'ec2': # ec2 doesn't have an api to list available flavors, so the first flavor is the default try: # c3.xlarge has 4 CPU cores and 7.5GB RAM - minimal requirements for CFME Appliance flavor = kwargs.get('flavor', 'c3.xlarge') except IndexError: raise Exception('--flavor is required for EC2 instances and default is not set') deploy_args['instance_type'] = flavor deploy_args['key_name'] = "shared" # we want to override default cloud-init which disables root login and password login cloud_init_dict = { 'chpasswd': { 'expire': False, 'list': '{}:{}\n'.format(cred['ssh']['username'], cred['ssh']['password']) }, 'disable_root': False, 'ssh_pwauth': True } cloud_init = "#cloud-config\n{}".format(yaml.safe_dump(cloud_init_dict, default_flow_style=False)) deploy_args['user_data'] = cloud_init elif provider_type == 'openstack': # filter openstack flavors based on what's available available_flavors = provider.list_flavor() logger.info("Available flavors on provider: %s", available_flavors) generic_flavors = filter(lambda f: f in available_flavors, flavors) try: flavor = (kwargs.get('flavor') or provider_dict.get('sprout', {}).get('flavor_name') or generic_flavors[0]) except IndexError: raise Exception('--flavor is required for RHOS instances and ' 'default is not set or unavailable on provider') logger.info('Selected flavor: %s', flavor) deploy_args['flavor_name'] = flavor if 'network' in provider_dict: # support rhos4 network names deploy_args['network_name'] = provider_dict['network'] provider_pools = [p.name for p in provider.api.floating_ip_pools.list()] try: # TODO: If there are multiple pools, have a provider default in cfme_data floating_ip_pool = kwargs.get('floating_ip_pool') or provider_pools[0] except IndexError: raise Exception('No floating IP pools available on provider') if floating_ip_pool is not None: deploy_args['floating_ip_pool'] = floating_ip_pool elif provider_type == "virtualcenter": if "allowed_datastores" in provider_dict: deploy_args["allowed_datastores"] = provider_dict["allowed_datastores"] elif provider_type == 'scvmm': deploy_args["host_group"] = provider_dict["provisioning"]['host_group'] elif provider_type == 'gce': deploy_args['ssh_key'] = '{user_name}:{public_key}'.format( user_name=cred['ssh']['ssh-user'], public_key=cred['ssh']['public_key']) elif provider_type == 'openshift': trackerbot = api() raw_tags = trackerbot.providertemplate().get(provider=kwargs['provider'], template=deploy_args['template'])['objects'] raw_tags = raw_tags[-1]['template'].get('custom_data', "{}") deploy_args["tags"] = yaml.safe_load(raw_tags)['TAGS'] # Do it! try: logger.info( 'Cloning %s to %s on %s', deploy_args['template'], deploy_args['vm_name'], kwargs['provider'] ) # TODO: change after openshift wrapanapi refactor output = None # 'output' is only used for openshift providers if isinstance(provider, Openshift): output = provider.deploy_template(**deploy_args) else: template = provider.get_template(deploy_args['template']) template.deploy(**deploy_args) except Exception as e: logger.exception(e) logger.error('template deploy failed') if kwargs.get('cleanup'): logger.info('attempting to destroy %s', deploy_args['vm_name']) destroy_vm(provider, deploy_args['vm_name']) return 12 if not provider.does_vm_exist(deploy_args['vm_name']): logger.error('provider.deploy_template failed without exception') return 12 # TODO: change after openshift wrapanapi refactor if isinstance(provider, Openshift): if provider.is_vm_running(deploy_args['vm_name']): logger.info('VM %s is running', deploy_args['vm_name']) else: logger.error('VM %s is not running', deploy_args['vm_name']) return 10 else: vm = provider.get_vm(deploy_args['vm_name']) vm.ensure_state(VmState.RUNNING, timeout='5m') if provider_type == 'gce': try: attach_gce_disk(vm) except Exception: logger.exception("Failed to attach db disk") destroy_vm(provider, deploy_args['vm_name']) return 10 if provider_type == 'openshift': ip = output['url'] else: try: ip, _ = wait_for(lambda: vm.ip, num_sec=1200, fail_condition=None) logger.info('IP Address returned is %s', ip) except Exception as e: logger.exception(e) logger.error('IP address not returned') return 10 try: if kwargs.get('configure'): logger.info('Configuring appliance, this can take a while.') if kwargs.get('deploy'): app = IPAppliance(hostname=ip) else: app_args = (kwargs['provider'], deploy_args['vm_name']) app_kwargs = {} if provider_type == 'openshift': ocp_creds = cred[provider_dict['credentials']] ssh_creds = cred[provider_dict['ssh_creds']] app_kwargs = { 'project': output['project'], 'db_host': output['external_ip'], 'container': 'cloudforms-0', 'hostname': ip, 'openshift_creds': { 'hostname': provider_dict['hostname'], 'username': ocp_creds['username'], 'password': ocp_creds['password'], 'ssh': { 'username': ssh_creds['username'], 'password': ssh_creds['password'], }, } } app = Appliance.from_provider(*app_args, **app_kwargs) if provider_type == 'ec2': wait_for( cloud_init_done, func_args=[app], num_sec=600, handle_exception=True, delay=5) if provider_type == 'gce': app.configure_gce() elif provider_type == 'openshift': # openshift appliances don't need any additional configuration pass else: app.configure() logger.info('Successfully Configured the appliance.') except Exception as e: logger.exception(e) logger.error('Appliance Configuration Failed') if not kwargs.get('deploy'): app = Appliance.from_provider(kwargs['provider'], deploy_args['vm_name']) ssh_client = app.ssh_client() result = ssh_client.run_command('find /root/anaconda-post.log') if result.success: ssh_client.get_file('/root/anaconda-post.log', log_path.join('anaconda-post.log').strpath) ssh_client.close() destroy_vm(app.provider, deploy_args['vm_name']) return 10 if kwargs.get('outfile') or kwargs.get('deploy'): # todo: to get rid of those scripts in jenkins or develop them from scratch with open(kwargs['outfile'], 'w') as outfile: if provider_type == 'openshift': output_data = { 'appliances': [ { 'project': output['project'], 'db_host': output['external_ip'], 'hostname': ip, 'container': 'cloudforms-0', 'openshift_creds': { 'hostname': provider_dict['hostname'], 'username': ocp_creds['username'], 'password': ocp_creds['password'], 'ssh': { 'username': ssh_creds['username'], 'password': ssh_creds['password'], } }, }, ], } else: output_data = { 'appliances': [{'hostname': ip}] } yaml_data = yaml.safe_dump(output_data, default_flow_style=False) outfile.write(yaml_data) # In addition to the outfile, drop the ip address on stdout for easy parsing print(yaml_data)
import traceback import dockerbot import json import requests import pika from cfme.utils.conf import docker as docker_conf from cfme.utils.appliance import Appliance from cfme.utils.trackerbot import api from cfme.utils.log import setup_logger from slumber.exceptions import HttpClientError token = docker_conf['gh_token'] owner = docker_conf['gh_owner'] repo = docker_conf['gh_repo'] tapi = api() CONT_LIMIT = docker_conf['workers'] DEBUG = docker_conf.get('debug', False) logger = setup_logger(logging.getLogger('prt')) # Disable pika logs logging.getLogger("pika").propagate = False def send_message_to_bot(msg): required_fields = set( ['rabbitmq_url', 'gh_queue', 'gh_channel', 'gh_message_type']) if not required_fields.issubset(docker_conf.viewkeys()):
def main(trackerbot_url, mark_usable=None, selected_provider=None, **kwargs): tb_api = trackerbot.api(trackerbot_url) all_providers = set( selected_provider or [key for key, data in cfme_data.management_systems.items() if 'disabled' not in data.get('tags', [])]) bad_providers = manager.Queue() # starmap the list of provider_keys into templates_on_provider # return is list of ProvTemplate tuples with ThreadPool(8) as pool: mgmt_templates = pool.starmap( templates_on_provider, ((provider_key, bad_providers) for provider_key in all_providers) ) # filter out the misbehaving providers bad_provider_keys = [] while not bad_providers.empty(): bad_provider_keys.append(bad_providers.get()) logger.warning('Filtering out providers that failed template query: %s', bad_provider_keys) working_providers = set([key for key in all_providers if key not in bad_provider_keys]) # Flip mgmt_templates into dict keyed on template name, listing providers # [ # {prov1: [t1, t2]}, # {prov2: [t1, t3]}, # ] # # mgmt_providertemplates should look like: # { # t1: [prov1, prov2], # t2: [prov1], # t3: [prov2] # } mgmt_providertemplates = defaultdict(list) # filter out any empty results from pulling mgmt_templates for prov_templates in [mt for mt in mgmt_templates if mt is not None]: # expecting one key (provider), one value (list of templates) for prov_key, templates in prov_templates.items(): for template in templates: mgmt_providertemplates[template].append(prov_key) logger.debug('DEBUG: template_providers: %r', mgmt_providertemplates) logger.debug('DEBUG: working_providers: %r', working_providers) usable = {'usable': mark_usable} if mark_usable is not None else {} # init these outside conditions/looping to be safe in reporting ignored_providertemplates = defaultdict(list) tb_pts_to_add = list() tb_pts_to_delete = list() tb_templates_to_delete = list() # ADD PROVIDERTEMPLATES # add all parseable providertemplates from what is actually on providers for template_name, provider_keys in mgmt_providertemplates.items(): # drop empty names, or sprout groups # go over templates pulled from provider mgmt interfaces, if template_name.strip() == '': logger.info('Ignoring empty name template on providers %s', provider_keys) template_info = TemplateName.parse_template(template_name) template_group = template_info.group_name # Don't want sprout templates, or templates that aren't parsable cfme/MIQ if template_group in GROUPS_TO_IGNORE: ignored_providertemplates[template_group].append(template_name) continue tb_pts_to_add = [ (template_group, provider_key, template_name, None, # custom_data usable) for provider_key in provider_keys ] logger.info('Threading add providertemplate records to trackerbot for %s', template_name) with ThreadPool(8) as pool: # thread for each template, passing the list of providers with the template add_results = pool.starmap( trackerbot.add_provider_template, tb_pts_to_add ) if not all([True if result in [None, True] else False for result in add_results]): # ignore results that are None, warn for any false results from adding logger.warning('Trackerbot providertemplate add failed, see logs') for group, names in ignored_providertemplates.items(): logger.info('Skipped group [%s] templates %r', group, names) # REMOVE PROVIDERTEMPLATES # Remove provider relationships where they no longer exist, skipping unresponsive providers, # and providers not known to this environment logger.info('Querying providertemplate records from Trackerbot for ones to delete') pts = trackerbot.depaginate( tb_api, tb_api.providertemplate.get(provider_in=working_providers) )['objects'] for pt in pts: key = pt['provider']['key'] pt_name, pt_group = pt['template']['name'], pt['template']['group']['name'] if pt_group in GROUPS_TO_IGNORE or key not in mgmt_providertemplates[pt_name]: logger.info("Marking trackerbot providertemplate for delete: %s::%s", key, pt_name) tb_pts_to_delete.append(ProvTemplate(key, pt_name)) with ThreadPool(8) as pool: # thread for each delete_provider_template call pool.starmap( trackerbot.delete_provider_template, ((tb_api, pt.provider_key, pt.template_name) for pt in tb_pts_to_delete)) # REMOVE TEMPLATES # Remove templates that aren't on any providers anymore for template in trackerbot.depaginate(tb_api, tb_api.template.get())['objects']: template_name = template['name'] if not template['providers'] and template_name.strip(): logger.info("Deleting trackerbot template %s (no providers)", template_name) tb_templates_to_delete.append(template_name) tb_api.template(template_name).delete() # WRITE REPORT with open(kwargs.get('outfile'), 'a') as report: add_header = '##### ProviderTemplate records added: #####\n' del_header = '##### ProviderTemplate records deleted: #####\n' report.write(add_header) add_message = tabulate( sorted([(ptadd[0], ptadd[1], ptadd[2]) for ptadd in tb_pts_to_add], key=lambda ptadd: ptadd[0]), headers=['Group', 'Provider', 'Template'], tablefmt='orgtbl' ) report.write('{}\n\n'.format(add_message)) report.write(del_header) del_message = tabulate( sorted([(ptdel.provider_key, ptdel.template_name) for ptdel in tb_pts_to_delete], key=lambda ptdel: ptdel[0]), headers=['Provider', 'Template'], tablefmt='orgtbl' ) report.write(del_message) logger.info('%s %s', add_header, add_message) logger.info('%s %s', del_header, del_message) return 0
def main(trackerbot_url, mark_usable=None): api = trackerbot.api(trackerbot_url) thread_q = [] thread_lock = Lock() template_providers = defaultdict(list) all_providers = set(list_provider_keys()) unresponsive_providers = set() # Queue up list_template calls for provider_key in all_providers: ipaddress = cfme_data['management_systems'][provider_key].get('ipaddress') if ipaddress and not net.is_pingable(ipaddress): continue thread = Thread(target=get_provider_templates, args=(provider_key, template_providers, unresponsive_providers, thread_lock)) thread_q.append(thread) thread.start() # Join the queued calls for thread in thread_q: thread.join() seen_templates = set() if mark_usable is None: usable = {} else: usable = {'usable': mark_usable} existing_provider_templates = [ pt['id'] for pt in trackerbot.depaginate(api, api.providertemplate.get())['objects']] # Find some templates and update the API for template_name, providers in template_providers.items(): template_name = str(template_name) group_name, datestamp, stream = trackerbot.parse_template(template_name) # Don't want sprout templates if group_name in ('sprout', 'rhevm-internal'): print('Ignoring {} from group {}'.format(template_name, group_name)) continue seen_templates.add(template_name) group = trackerbot.Group(group_name, stream=stream) template = trackerbot.Template(template_name, group, datestamp) for provider_key in providers: provider = trackerbot.Provider(provider_key) if '{}_{}'.format(template_name, provider_key) in existing_provider_templates: print('Template {} already tracked for provider {}'.format( template_name, provider_key)) continue try: trackerbot.mark_provider_template(api, provider, template, **usable) print('Added {} template {} on provider {} (datestamp: {})'.format( group_name, template_name, provider_key, datestamp)) except SlumberHttpBaseException as ex: print("{}\t{}".format(ex.response.status_code, ex.content)) # Remove provider relationships where they no longer exist, skipping unresponsive providers, # and providers not known to this environment for pt in trackerbot.depaginate(api, api.providertemplate.get())['objects']: provider_key, template_name = pt['provider']['key'], pt['template']['name'] if provider_key not in template_providers[template_name] \ and provider_key not in unresponsive_providers: if provider_key in all_providers: print("Cleaning up template {} on {}".format(template_name, provider_key)) trackerbot.delete_provider_template(api, provider_key, template_name) else: print("Skipping template cleanup {} on unknown provider {}".format( template_name, provider_key)) # Remove templates that aren't on any providers anymore for template in trackerbot.depaginate(api, api.template.get())['objects']: if not template['providers']: print("Deleting template {} (no providers)".format(template['name'])) api.template(template['name']).delete()
def trackerbot(): return api(trackerbot_url=settings.HUBBER_URL.rstrip('/') + '/api/')
lines = report.readlines() template_missing = filter(lambda x: "MISSING" in x, lines) template_passed = filter(lambda x: "PASSED" in x, lines) template_failed = filter(lambda x: "FAILED" in x, lines) if template_failed: status = "FAILED" if template_missing and not (template_passed or template_failed): report.close() sys.exit( "Template is MISSING....Please verify uploads....") print("template_tester_results report generated:{}".format(status)) else: print("No Templates tested on: {}".format(datetime.datetime.now())) if __name__ == '__main__': args = parse_cmd_line() trackerbot_api = args.trackerbot_url or cfme_data.get('trackerbot', {}).get('api', None) if not trackerbot_api: sys.exit('No trackerbot URL provided or in cfme_data.yaml') if not args.stream or not args.appliance_template: sys.exit("stream and appliance_template " "cannot be None, specify the stream as --stream <stream-name>" "and template as --template <template-name>") generate_html_report(trackerbot.api(trackerbot_api), args.stream, args.output, args.appliance_template)
def main(**kwargs): # get_mgmt validates, since it will explode without an existing key or type if kwargs.get('deploy'): kwargs['configure'] = True kwargs['outfile'] = 'appliance_ip_address_1' providers = provider_data['management_systems'] provider_dict = provider_data['management_systems'][kwargs['provider']] credentials =\ {'username': provider_dict['username'], 'password': provider_dict['password'], 'tenant': provider_dict['template_upload'].get('tenant_admin', 'admin'), 'auth_url': provider_dict.get('auth_url'), } provider = get_mgmt(kwargs['provider'], providers=providers, credentials=credentials) flavors = provider_dict['template_upload'].get('flavors', ['m1.medium']) provider_type = provider_data['management_systems'][kwargs['provider']]['type'] deploy_args = { 'vm_name': kwargs['vm_name'], 'template': kwargs['template'], } else: provider = get_mgmt(kwargs['provider']) provider_dict = cfme_data['management_systems'][kwargs['provider']] provider_type = provider_dict['type'] flavors = cfme_data['appliance_provisioning']['default_flavors'].get(provider_type, []) deploy_args = { 'vm_name': kwargs['vm_name'], 'template': kwargs['template'], } logger.info('Connecting to {}'.format(kwargs['provider'])) if kwargs.get('destroy'): # TODO: destroy should be its own script # but it's easy enough to just hijack the parser here # This returns True if destroy fails to give POSIXy exit codes (0 is good, False is 0, etc) return not destroy_vm(provider, deploy_args['vm_name']) # Try to snag defaults from cfme_data here for each provider type if provider_type == 'rhevm': cluster = provider_dict.get('default_cluster', kwargs.get('cluster')) if cluster is None: raise Exception('--cluster is required for rhev instances and default is not set') deploy_args['cluster'] = cluster if kwargs.get('place_policy_host') and kwargs.get('place_policy_aff'): deploy_args['placement_policy_host'] = kwargs['place_policy_host'] deploy_args['placement_policy_affinity'] = kwargs['place_policy_aff'] elif provider_type == 'ec2': # ec2 doesn't have an api to list available flavors, so the first flavor is the default try: # c3.xlarge has 4 CPU cores and 7.5GB RAM - minimal requirements for CFME Appliance flavor = kwargs.get('flavor', 'c3.xlarge') except IndexError: raise Exception('--flavor is required for EC2 instances and default is not set') deploy_args['instance_type'] = flavor deploy_args['key_name'] = "shared" # we want to override default cloud-init which disables root login and password login cloud_init_dict = { 'chpasswd': { 'expire': False, 'list': '{}:{}\n'.format(cred['ssh']['username'], cred['ssh']['password']) }, 'disable_root': 0, 'ssh_pwauth': 1 } cloud_init = "#cloud-config\n{}".format(yaml.safe_dump(cloud_init_dict, default_flow_style=False)) deploy_args['user_data'] = cloud_init elif provider_type == 'openstack': # filter openstack flavors based on what's available available_flavors = provider.list_flavor() flavors = filter(lambda f: f in available_flavors, flavors) try: flavor = kwargs.get('flavor') or flavors[0] except IndexError: raise Exception('--flavor is required for RHOS instances and ' 'default is not set or unavailable on provider') # flavour? Thanks, psav... deploy_args['flavour_name'] = flavor if 'network' in provider_dict: # support rhos4 network names deploy_args['network_name'] = provider_dict['network'] provider_pools = [p.name for p in provider.api.floating_ip_pools.list()] try: # TODO: If there are multiple pools, have a provider default in cfme_data floating_ip_pool = kwargs.get('floating_ip_pool') or provider_pools[0] except IndexError: raise Exception('No floating IP pools available on provider') if floating_ip_pool is not None: deploy_args['floating_ip_pool'] = floating_ip_pool elif provider_type == "virtualcenter": if "allowed_datastores" in provider_dict: deploy_args["allowed_datastores"] = provider_dict["allowed_datastores"] elif provider_type == 'scvmm': deploy_args["host_group"] = provider_dict["provisioning"]['host_group'] elif provider_type == 'gce': deploy_args['ssh_key'] = '{user_name}:{public_key}'.format( user_name=cred['ssh']['ssh-user'], public_key=cred['ssh']['public_key']) elif provider_type == 'openshift': trackerbot = api() raw_tags = trackerbot.providertemplate().get(provider=kwargs['provider'], template=deploy_args['template'])['objects'] raw_tags = raw_tags[-1]['template'].get('custom_data', "{}") deploy_args["tags"] = yaml.safe_load(raw_tags.replace("u'", '"').replace("'", '"'))['TAGS'] # Do it! try: logger.info('Cloning {} to {} on {}'.format(deploy_args['template'], deploy_args['vm_name'], kwargs['provider'])) output = provider.deploy_template(**deploy_args) except Exception as e: logger.exception(e) logger.error('provider.deploy_template failed') if kwargs.get('cleanup'): logger.info('attempting to destroy {}'.format(deploy_args['vm_name'])) destroy_vm(provider, deploy_args['vm_name']) return 12 if not provider.does_vm_exist(deploy_args['vm_name']): logger.error('provider.deploy_template failed without exception') return 12 if provider.is_vm_running(deploy_args['vm_name']): logger.info("VM {} is running".format(deploy_args['vm_name'])) else: logger.error("VM is not running") return 10 if provider_type == 'openshift': ip = output['url'] else: try: ip, _ = wait_for(provider.get_ip_address, [deploy_args['vm_name']], num_sec=1200, fail_condition=None) logger.info('IP Address returned is {}'.format(ip)) except Exception as e: logger.exception(e) logger.error('IP address not returned') return 10 try: if kwargs.get('configure'): logger.info('Configuring appliance, this can take a while.') if kwargs.get('deploy'): app = IPAppliance(hostname=ip) else: app_args = (kwargs['provider'], deploy_args['vm_name']) app_kwargs = {} if provider_type == 'openshift': ocp_creds = cred[provider_dict['credentials']] ssh_creds = cred[provider_dict['ssh_creds']] app_kwargs = { 'project': output['project'], 'db_host': output['external_ip'], 'hostname': ip, 'openshift_creds': { 'hostname': provider_dict['hostname'], 'username': ocp_creds['username'], 'password': ocp_creds['password'], 'ssh': { 'username': ssh_creds['username'], 'password': ssh_creds['password'], }, } } app = Appliance.from_provider(*app_args, **app_kwargs) if provider_type == 'gce': with app as ipapp: ipapp.configure_gce() elif provider_type == 'openshift': # openshift appliances don't need any additional configuration pass else: app.configure() logger.info('Successfully Configured the appliance.') except Exception as e: logger.exception(e) logger.error('Appliance Configuration Failed') if not kwargs.get('deploy'): app = Appliance.from_provider(kwargs['provider'], deploy_args['vm_name']) ssh_client = app.ssh_client() status, output = ssh_client.run_command('find /root/anaconda-post.log') if status == 0: ssh_client.get_file('/root/anaconda-post.log', log_path.join('anaconda-post.log').strpath) ssh_client.close() return 10 if kwargs.get('outfile') or kwargs.get('deploy'): # todo: to get rid of those scripts in jenkins or develop them from scratch with open(kwargs['outfile'], 'w') as outfile: if provider_type == 'openshift': output_data = { 'appliances': [ { 'project': output['project'], 'db_host': output['external_ip'], 'hostname': ip, 'openshift_creds': { 'hostname': provider_dict['hostname'], 'username': ocp_creds['username'], 'password': ocp_creds['password'], 'ssh': { 'username': ssh_creds['username'], 'password': ssh_creds['password'], } }, }, ], } else: output_data = { 'appliances': [{'hostname': ip}] } yaml_data = yaml.safe_dump(output_data, default_flow_style=False) outfile.write(yaml_data) # In addition to the outfile, drop the ip address on stdout for easy parsing print(yaml_data)
def main(trackerbot_url, mark_usable=None, selected_provider=None, **kwargs): tb_api = trackerbot.api(trackerbot_url) all_providers = set(selected_provider or [ key for key, data in cfme_data.management_systems.items() if 'disabled' not in data.get('tags', []) ]) bad_providers = manager.Queue() # starmap the list of provider_keys into templates_on_provider # return is list of ProvTemplate tuples with ThreadPool(8) as pool: mgmt_templates = pool.starmap(templates_on_provider, ((provider_key, bad_providers) for provider_key in all_providers)) # filter out the misbehaving providers bad_provider_keys = [] while not bad_providers.empty(): bad_provider_keys.append(bad_providers.get()) logger.warning('Filtering out providers that failed template query: %s', bad_provider_keys) working_providers = { key for key in all_providers if key not in bad_provider_keys } # Flip mgmt_templates into dict keyed on template name, listing providers # [ # {prov1: [t1, t2]}, # {prov2: [t1, t3]}, # ] # # mgmt_providertemplates should look like: # { # t1: [prov1, prov2], # t2: [prov1], # t3: [prov2] # } mgmt_providertemplates = defaultdict(list) # filter out any empty results from pulling mgmt_templates for prov_templates in [mt for mt in mgmt_templates if mt is not None]: # expecting one key (provider), one value (list of templates) for prov_key, templates in prov_templates.items(): for template in templates: mgmt_providertemplates[template].append(prov_key) logger.debug('DEBUG: template_providers: %r', mgmt_providertemplates) logger.debug('DEBUG: working_providers: %r', working_providers) usable = {'usable': mark_usable} if mark_usable is not None else {} # init these outside conditions/looping to be safe in reporting ignored_providertemplates = defaultdict(list) tb_pts_to_add = list() tb_pts_to_delete = list() tb_templates_to_delete = list() # ADD PROVIDERTEMPLATES # add all parseable providertemplates from what is actually on providers for template_name, provider_keys in mgmt_providertemplates.items(): # drop empty names, or sprout groups # go over templates pulled from provider mgmt interfaces, if template_name.strip() == '': logger.info('Ignoring empty name template on providers %s', provider_keys) template_info = TemplateName.parse_template(template_name) template_group = template_info.group_name # Don't want sprout templates, or templates that aren't parsable cfme/MIQ if template_group in GROUPS_TO_IGNORE: ignored_providertemplates[template_group].append(template_name) continue tb_pts_to_add = [ ( template_group, provider_key, template_name, None, # custom_data usable) for provider_key in provider_keys ] logger.info( 'Threading add providertemplate records to trackerbot for %s', template_name) with ThreadPool(8) as pool: # thread for each template, passing the list of providers with the template add_results = pool.starmap(trackerbot.add_provider_template, tb_pts_to_add) if not all( [True if result in [None, True] else False for result in add_results]): # ignore results that are None, warn for any false results from adding logger.warning('Trackerbot providertemplate add failed, see logs') for group, names in ignored_providertemplates.items(): logger.info('Skipped group [%s] templates %r', group, names) # REMOVE PROVIDERTEMPLATES # Remove provider relationships where they no longer exist, skipping unresponsive providers, # and providers not known to this environment logger.info( 'Querying providertemplate records from Trackerbot for ones to delete') pts = trackerbot.depaginate( tb_api, tb_api.providertemplate.get(provider_in=working_providers))['objects'] for pt in pts: key = pt['provider']['key'] pt_name, pt_group = pt['template']['name'], pt['template']['group'][ 'name'] if pt_group in GROUPS_TO_IGNORE or key not in mgmt_providertemplates[ pt_name]: logger.info( "Marking trackerbot providertemplate for delete: %s::%s", key, pt_name) tb_pts_to_delete.append(ProvTemplate(key, pt_name)) with ThreadPool(8) as pool: # thread for each delete_provider_template call pool.starmap(trackerbot.delete_provider_template, ((tb_api, pt.provider_key, pt.template_name) for pt in tb_pts_to_delete)) # REMOVE TEMPLATES # Remove templates that aren't on any providers anymore for template in trackerbot.depaginate(tb_api, tb_api.template.get())['objects']: template_name = template['name'] if not template['providers'] and template_name.strip(): logger.info("Deleting trackerbot template %s (no providers)", template_name) tb_templates_to_delete.append(template_name) tb_api.template(template_name).delete() # WRITE REPORT with open(kwargs.get('outfile'), 'a') as report: add_header = '##### ProviderTemplate records added: #####\n' del_header = '##### ProviderTemplate records deleted: #####\n' report.write(add_header) add_message = tabulate(sorted([(ptadd[0], ptadd[1], ptadd[2]) for ptadd in tb_pts_to_add], key=lambda ptadd: ptadd[0]), headers=['Group', 'Provider', 'Template'], tablefmt='orgtbl') report.write(f'{add_message}\n\n') report.write(del_header) del_message = tabulate(sorted( [(ptdel.provider_key, ptdel.template_name) for ptdel in tb_pts_to_delete], key=lambda ptdel: ptdel[0]), headers=['Provider', 'Template'], tablefmt='orgtbl') report.write(del_message) logger.info('%s %s', add_header, add_message) logger.info('%s %s', del_header, del_message) return 0
report.seek(0, 0) lines = report.readlines() template_missing = filter(lambda (x): "MISSING" in x, lines) template_passed = filter(lambda (x): "PASSED" in x, lines) template_failed = filter(lambda (x): "FAILED" in x, lines) if template_failed: status = "FAILED" if template_missing and not (template_passed or template_failed): report.close() sys.exit("Template is MISSING....Please verify uploads....") print("template_tester_results report generated:{}".format(status)) else: print("No Templates tested on: {}".format(datetime.datetime.now())) if __name__ == '__main__': args = parse_cmd_line() trackerbot_api = args.trackerbot_url or cfme_data.get('trackerbot', {}).get('api', None) if not trackerbot_api: sys.exit('No trackerbot URL provided or in cfme_data.yaml') if not args.stream or not args.appliance_template: sys.exit("stream and appliance_template " "cannot be None, specify the stream as --stream <stream-name>" "and template as --template <template-name>") generate_html_report(trackerbot.api(trackerbot_api), args.stream, args.output, args.appliance_template)
help='get a teamplate based on provider type') parse_latest = subs.add_parser('latest', help='get the latest usable template for a provider') parse_latest.set_defaults(func=latest) parse_latest.add_argument('stream', help='template stream (e.g. upstream, downstream-52z') parse_latest.add_argument('provider_key', nargs='?', default=None) parse_mark = subs.add_parser('mark', help='mark a tested template') parse_mark.set_defaults(func=mark) parse_mark.add_argument('provider_key') parse_mark.add_argument('template') parse_mark.add_argument('-n', '--not-usable', dest='usable', action='store_false', default=True, help='mark template as not usable (templates are marked usable by default') parse_mark.add_argument('-d', '--diagnose', dest='diagnose', action='store_true', default=False, help='attempt to diagnose an unusable template and submit the result') parse_retest = subs.add_parser('retest', help='flag a tested template for retesting') parse_retest.set_defaults(func=retest) parse_retest.add_argument('provider_key') parse_retest.add_argument('template') args = parser.parse_args() api = trackerbot.api(args.trackerbot_url) func_map = { get: lambda: get(api, args.request_type), latest: lambda: latest(api, args.stream, args.provider_key), mark: lambda: mark(api, args.provider_key, args.template, args.usable, args.diagnose), retest: lambda: retest(api, args.provider_key, args.template), } sys.exit(func_map[args.func]())
def main(trackerbot_url, mark_usable=None, selected_provider=None): api = trackerbot.api(trackerbot_url) thread_q = [] thread_lock = Lock() template_providers = defaultdict(list) all_providers = (set(list_provider_keys()) if not selected_provider else set(selected_provider)) unresponsive_providers = set() # Queue up list_template calls for provider_key in all_providers: ipaddress = cfme_data.management_systems[provider_key].get('ipaddress') if ipaddress and not net.is_pingable(ipaddress): continue thread = Thread(target=get_provider_templates, args=(provider_key, template_providers, unresponsive_providers, thread_lock)) thread_q.append(thread) thread.start() # Join the queued calls for thread in thread_q: thread.join() seen_templates = set() if mark_usable is None: usable = {} else: usable = {'usable': mark_usable} existing_provider_templates = [ pt['id'] for pt in trackerbot.depaginate(api, api.providertemplate.get())['objects']] # Find some templates and update the API for template_name, providers in template_providers.items(): template_name = str(template_name) template_info = TemplateName.parse_template(template_name) # it turned out that some providers like ec2 may have templates w/o names. # this is easy protection against such issue. if not template_name.strip(): logger.warn('Ignoring template w/o name on provider %s', provider_key) continue # Don't want sprout templates if template_info.group_name in ('sprout', 'rhevm-internal'): logger.info('Ignoring %s from group %s', template_name, template_info.group_name) continue seen_templates.add(template_name) group = trackerbot.Group(template_info.group_name, stream=template_info.stream) try: template = trackerbot.Template(template_name, group, template_info.datestamp) except ValueError: logger.exception('Failure parsing provider %s template: %s', provider_key, template_name) continue for provider_key in providers: provider = trackerbot.Provider(provider_key) if '{}_{}'.format(template_name, provider_key) in existing_provider_templates: logger.info('Template %s already tracked for provider %s', template_name, provider_key) continue try: trackerbot.mark_provider_template(api, provider, template, **usable) logger.info('Added %s template %s on provider %s (datestamp: %s)', template_info.group_name, template_name, provider_key, template_info.datestamp) except SlumberHttpBaseException: logger.exception('%s: exception marking template %s', provider, template) # Remove provider relationships where they no longer exist, skipping unresponsive providers, # and providers not known to this environment for pt in trackerbot.depaginate(api, api.providertemplate.get())['objects']: key, template_name = pt['provider']['key'], pt['template']['name'] if key not in template_providers[template_name] and key not in unresponsive_providers: if key in all_providers: logger.info("Cleaning up template %s on %s", template_name, key) trackerbot.delete_provider_template(api, key, template_name) else: logger.info("Skipping template cleanup %s on unknown provider %s", template_name, key) # Remove templates that aren't on any providers anymore for template in trackerbot.depaginate(api, api.template.get())['objects']: if not template['providers'] and template['name'].strip(): logger.info("Deleting template %s (no providers)", template['name']) api.template(template['name']).delete()