def prepare_template_verify_version(self, template_id): template = Template.objects.get(id=template_id) template.set_status("Verifying version.") appliance = CFMEAppliance.from_provider(template.provider_name, template.name, container=template.container) appliance.ipapp.wait_for_ssh() try: true_version = appliance.version except Exception as e: template.set_status( "Some SSH error happened during appliance version check.") self.retry(args=(template_id, ), exc=e, countdown=20, max_retries=5) supposed_version = Version(template.version) if true_version is None or true_version.vstring == 'master': return if true_version != supposed_version: # Check if the difference is not just in the suffixes, which can be the case ... t = str(true_version) s = str(supposed_version) if supposed_version.version == true_version.version or t.startswith(s): # The two have same version but different suffixes, apply the suffix to the template obj # OR also a case - when the supposed version is incomplete so we will use the detected # version. with transaction.atomic(): template.version = t template.save(update_fields=['version']) if template.parent_template is not None: # In case we have a parent template, update the version there too. if template.version != template.parent_template.version: pt = template.parent_template pt.version = template.version pt.save(update_fields=['version']) return # no need to continue with spamming process # SPAM SPAM SPAM! with transaction.atomic(): mismatch_in_db = MismatchVersionMailer.objects.filter( provider=template.provider, template_name=template.original_name, supposed_version=supposed_version, actual_version=true_version) if not mismatch_in_db: mismatch = MismatchVersionMailer( provider=template.provider, template_name=template.original_name, supposed_version=supposed_version, actual_version=true_version) mismatch.save() # Run the task to mail the problem from .maintainance import mailer_version_mismatch mailer_version_mismatch.delay() raise Exception("Detected version mismatch!") template.set_status("Version verification is over")
def call_appliance(provider_name, vm_name, action, *args): # Given a provider class, find the named method and call it with # *args. This could possibly be generalized for other CLI tools. appliance = Appliance.from_provider(provider_name, vm_name) try: call = getattr(appliance, action) except AttributeError: raise Exception('Action "{}" not found'.format(action)) if isinstance(getattr(type(appliance), action), property): return call else: return call(*process_args(args))
def configure_docker_template(self, template_id, pull_url): template = Template.objects.get(id=template_id) template.set_status("Waiting for SSH.") appliance = CFMEAppliance.from_provider(template.provider_name, template.name, container=template.container) appliance.ipapp.wait_for_ssh() with appliance.ipapp.ssh_client as ssh: template.set_status("Setting the pull URL.") ssh.run_command( 'echo "export CFME_URL={}" > /etc/cfme_pull_url'.format(pull_url), ensure_host=True) template.set_status("Pulling the {}.".format(pull_url)) ssh.run_command('docker pull {}'.format(pull_url), ensure_host=True) template.set_status('Pulling finished.')
def prepare_template_configure(self, template_id): template = Template.objects.get(id=template_id) template.set_status("Customization started.") appliance = CFMEAppliance.from_provider(template.provider_name, template.name, container=template.container) try: appliance.configure(log_callback=lambda s: template.set_status( "Customization progress: {}".format(s)), on_openstack=template.provider.provider_data.get( 'type', None) == 'openstack') except Exception as e: template.set_status("Could not properly configure the CFME. Retrying.") self.retry(args=(template_id, ), exc=e, countdown=10, max_retries=5) else: template.set_status("Template configuration was done.")
def vm_reaper(): """ Iterates through each task in the db which has not yet been cleaned and runs the reaper This function iterates through each task. If the task is either failed or passed, ie, the task has completed, then the VM is cleaned up and then the docker container. If both of these operations occur, then the cleanup is set to True. """ tasks = tapi.task().get(cleanup=False, limit=0)['objects'] for task in tasks: if task['result'] in ["failed", "passed", "invalid"]: vm_cleanup = False docker_cleanup = False if task['provider'] == "Sprout" and task['vm_name'] == "Sprout": vm_cleanup = True else: if task['provider'] and task['vm_name']: logger.info('Cleaning up {} on {}'.format( task['vm_name'], task['provider'])) if task['vm_name'] == "None": vm_cleanup = True else: appliance = Appliance.from_provider( task['provider'], task['vm_name']) try: if appliance.does_vm_exist(): logger.info("Destroying {}".format( appliance.vm_name)) appliance.destroy() vm_cleanup = True except Exception: logger.info('Exception occured cleaning up') containers = dockerbot.dc.containers(all=True) for container in containers: if task['tid'] in container['Names'][0]: logger.info('Cleaning up docker container {}'.format( container['Id'])) dockerbot.dc.remove_container(container['Id'], force=True) docker_cleanup = True break else: docker_cleanup = True if docker_cleanup and vm_cleanup: tapi.task(task['tid']).patch({'cleanup': True})
def vm_reaper(): """ Iterates through each task in the db which has not yet been cleaned and runs the reaper This function iterates through each task. If the task is either failed or passed, ie, the task has completed, then the VM is cleaned up and then the docker container. If both of these operations occur, then the cleanup is set to True. """ tasks = tapi.task().get(cleanup=False, limit=0)['objects'] for task in tasks: if task['result'] in ["failed", "passed", "invalid"]: vm_cleanup = False docker_cleanup = False if task['provider'] == "Sprout" and task['vm_name'] == "Sprout": vm_cleanup = True else: if task['provider'] and task['vm_name']: logger.info('Cleaning up {} on {}'.format(task['vm_name'], task['provider'])) if task['vm_name'] == "None": vm_cleanup = True else: appliance = Appliance.from_provider(task['provider'], task['vm_name']) try: if appliance.does_vm_exist(): logger.info("Destroying {}".format(appliance.vm_name)) appliance.destroy() vm_cleanup = True except Exception: logger.info('Exception occured cleaning up') containers = dockerbot.dc.containers(all=True) for container in containers: if task['tid'] in container['Names'][0]: logger.info('Cleaning up docker container {}'.format(container['Id'])) dockerbot.dc.remove_container(container['Id'], force=True) docker_cleanup = True break else: docker_cleanup = True if docker_cleanup and vm_cleanup: tapi.task(task['tid']).patch({'cleanup': True})
def main(**kwargs): # get_mgmt validates, since it will explode without an existing key or type if kwargs.get('deploy'): kwargs['configure'] = True kwargs['outfile'] = 'appliance_ip_address_1' providers = provider_data['management_systems'] provider_dict = provider_data['management_systems'][kwargs['provider']] credentials =\ {'username': provider_dict['username'], 'password': provider_dict['password'], 'tenant': provider_dict['template_upload'].get('tenant_admin', 'admin'), 'auth_url': provider_dict.get('auth_url'), } provider = get_mgmt(kwargs['provider'], providers=providers, credentials=credentials) flavors = provider_dict['template_upload'].get('flavors', ['m1.medium']) provider_type = provider_data['management_systems'][kwargs['provider']]['type'] deploy_args = { 'vm_name': kwargs['vm_name'], 'template': kwargs['template'], } else: provider = get_mgmt(kwargs['provider']) provider_dict = cfme_data['management_systems'][kwargs['provider']] provider_type = provider_dict['type'] flavors = cfme_data['appliance_provisioning']['default_flavors'].get(provider_type, []) deploy_args = { 'vm_name': kwargs['vm_name'], 'template': kwargs['template'], } logger.info('Connecting to {}'.format(kwargs['provider'])) if kwargs.get('destroy'): # TODO: destroy should be its own script # but it's easy enough to just hijack the parser here # This returns True if destroy fails to give POSIXy exit codes (0 is good, False is 0, etc) return not destroy_vm(provider, deploy_args['vm_name']) # Try to snag defaults from cfme_data here for each provider type if provider_type == 'rhevm': cluster = provider_dict.get('default_cluster', kwargs.get('cluster')) if cluster is None: raise Exception('--cluster is required for rhev instances and default is not set') deploy_args['cluster'] = cluster if kwargs.get('place_policy_host') and kwargs.get('place_policy_aff'): deploy_args['placement_policy_host'] = kwargs['place_policy_host'] deploy_args['placement_policy_affinity'] = kwargs['place_policy_aff'] elif provider_type == 'ec2': # ec2 doesn't have an api to list available flavors, so the first flavor is the default try: # c3.xlarge has 4 CPU cores and 7.5GB RAM - minimal requirements for CFME Appliance flavor = kwargs.get('flavor', 'c3.xlarge') except IndexError: raise Exception('--flavor is required for EC2 instances and default is not set') deploy_args['instance_type'] = flavor deploy_args['key_name'] = "shared" # we want to override default cloud-init which disables root login and password login cloud_init_dict = { 'chpasswd': { 'expire': False, 'list': '{}:{}\n'.format(cred['ssh']['username'], cred['ssh']['password']) }, 'disable_root': 0, 'ssh_pwauth': 1 } cloud_init = "#cloud-config\n{}".format(yaml.safe_dump(cloud_init_dict, default_flow_style=False)) deploy_args['user_data'] = cloud_init elif provider_type == 'openstack': # filter openstack flavors based on what's available available_flavors = provider.list_flavor() flavors = filter(lambda f: f in available_flavors, flavors) try: flavor = kwargs.get('flavor') or flavors[0] except IndexError: raise Exception('--flavor is required for RHOS instances and ' 'default is not set or unavailable on provider') # flavour? Thanks, psav... deploy_args['flavour_name'] = flavor if 'network' in provider_dict: # support rhos4 network names deploy_args['network_name'] = provider_dict['network'] provider_pools = [p.name for p in provider.api.floating_ip_pools.list()] try: # TODO: If there are multiple pools, have a provider default in cfme_data floating_ip_pool = kwargs.get('floating_ip_pool') or provider_pools[0] except IndexError: raise Exception('No floating IP pools available on provider') if floating_ip_pool is not None: deploy_args['floating_ip_pool'] = floating_ip_pool elif provider_type == "virtualcenter": if "allowed_datastores" in provider_dict: deploy_args["allowed_datastores"] = provider_dict["allowed_datastores"] elif provider_type == 'scvmm': deploy_args["host_group"] = provider_dict["provisioning"]['host_group'] elif provider_type == 'gce': deploy_args['ssh_key'] = '{user_name}:{public_key}'.format( user_name=cred['ssh']['ssh-user'], public_key=cred['ssh']['public_key']) # Do it! try: logger.info('Cloning {} to {} on {}'.format(deploy_args['template'], deploy_args['vm_name'], kwargs['provider'])) provider.deploy_template(**deploy_args) except Exception as e: logger.exception(e) logger.error('provider.deploy_template failed') if kwargs.get('cleanup'): logger.info('attempting to destroy {}'.format(deploy_args['vm_name'])) destroy_vm(provider, deploy_args['vm_name']) return 12 if not provider.does_vm_exist(deploy_args['vm_name']): logger.error('provider.deploy_template failed without exception') return 12 if provider.is_vm_running(deploy_args['vm_name']): logger.info("VM {} is running".format(deploy_args['vm_name'])) else: logger.error("VM is not running") return 10 try: ip, time_taken = wait_for(provider.get_ip_address, [deploy_args['vm_name']], num_sec=1200, fail_condition=None) logger.info('IP Address returned is {}'.format(ip)) except Exception as e: logger.exception(e) logger.error('IP address not returned') return 10 try: if kwargs.get('configure'): logger.info('Configuring appliance, this can take a while.') if kwargs.get('deploy'): app = IPAppliance(hostname=ip) else: app = Appliance.from_provider(kwargs['provider'], deploy_args['vm_name']) if provider_type == 'gce': with app as ipapp: ipapp.configure_gce() else: app.configure() logger.info('Successfully Configured the appliance.') except Exception as e: logger.exception(e) logger.error('Appliance Configuration Failed') if not kwargs.get('deploy'): app = Appliance.from_provider(kwargs['provider'], deploy_args['vm_name']) ssh_client = app.ssh_client() status, output = ssh_client.run_command('find /root/anaconda-post.log') if status == 0: ssh_client.get_file('/root/anaconda-post.log', log_path.join('anaconda-post.log').strpath) ssh_client.close() return 10 if kwargs.get('outfile') or kwargs.get('deploy'): with open(kwargs['outfile'], 'w') as outfile: outfile.write("appliance_ip_address={}\n".format(ip)) # In addition to the outfile, drop the ip address on stdout for easy parsing print(ip)
def main(**kwargs): # get_mgmt validates, since it will explode without an existing key or type if kwargs.get('deploy'): kwargs['configure'] = True kwargs['outfile'] = 'appliance_ip_address_1' providers = provider_data['management_systems'] provider_dict = provider_data['management_systems'][kwargs['provider']] credentials =\ {'username': provider_dict['username'], 'password': provider_dict['password'], 'tenant': provider_dict['template_upload'].get('tenant_admin', 'admin'), 'auth_url': provider_dict.get('auth_url'), } provider = get_mgmt(kwargs['provider'], providers=providers, credentials=credentials) provider_type = provider_data['management_systems'][kwargs['provider']]['type'] deploy_args = { 'vm_name': kwargs['vm_name'], 'template': kwargs['template'], } else: provider = get_mgmt(kwargs['provider']) provider_dict = cfme_data['management_systems'][kwargs['provider']] provider_type = provider_dict['type'] deploy_args = { 'vm_name': kwargs['vm_name'], 'template': kwargs['template'], } yaml_flavor = [ provider_dict.get('sprout', {}).get('flavor_name') or provider_dict.get('provisioning', {}).get('instance_type') or provider_dict.get('template_upload', {}).get('flavor_name') ] # None if none of them are set logger.info('Connecting to %s', kwargs['provider']) if kwargs.get('destroy'): # TODO: destroy should be its own script # but it's easy enough to just hijack the parser here # This returns True if destroy fails to give POSIXy exit codes (0 is good, False is 0, etc) return not destroy_vm(provider, deploy_args['vm_name']) # Try to snag defaults from cfme_data here for each provider type if provider_type == 'rhevm': cluster = provider_dict.get('default_cluster', kwargs.get('cluster')) if cluster is None: raise Exception('--cluster is required for rhev instances and default is not set') deploy_args['cluster'] = cluster if kwargs.get('place_policy_host') and kwargs.get('place_policy_aff'): deploy_args['placement_policy_host'] = kwargs['place_policy_host'] deploy_args['placement_policy_affinity'] = kwargs['place_policy_aff'] elif provider_type == 'ec2': # ec2 doesn't have an api to list available flavors, so the first flavor is the default try: # c3.xlarge has 4 CPU cores and 7.5GB RAM - minimal requirements for CFME Appliance flavor = kwargs.get('flavor', 'c3.xlarge') except IndexError: raise Exception('--flavor is required for EC2 instances and default is not set') deploy_args['instance_type'] = flavor deploy_args['key_name'] = "shared" # we want to override default cloud-init which disables root login and password login cloud_init_dict = { 'chpasswd': { 'expire': False, 'list': '{}:{}\n'.format(cred['ssh']['username'], cred['ssh']['password']) }, 'disable_root': False, 'ssh_pwauth': True } cloud_init = "#cloud-config\n{}".format(yaml.safe_dump(cloud_init_dict, default_flow_style=False)) deploy_args['user_data'] = cloud_init elif provider_type == 'openstack': # filter openstack flavors based on what's available available_flavors = provider.list_flavor() logger.info("Available flavors on provider: %s", available_flavors) generic_flavors = [f for f in yaml_flavor if f in available_flavors] try: # TODO py3 filter needs next() instead of indexing flavor = (kwargs.get('flavor', yaml_flavor) or generic_flavors[0]) except IndexError: raise Exception('flavor is required for RHOS instances and ' 'default is not set or unavailable on provider') logger.info('Selected flavor: %s', flavor) deploy_args['flavor_name'] = flavor network_name = (kwargs.get('network_name') or provider_dict.get('sprout', {}).get('network_name')) logger.info('Selected Network: %s', network_name) if network_name is not None: deploy_args['network_name'] = network_name provider_pools = [p.name for p in provider.api.floating_ip_pools.list()] try: # TODO: If there are multiple pools, have a provider default in cfme_data floating_ip_pool = kwargs.get('floating_ip_pool') or provider_pools[0] except IndexError: raise Exception('No floating IP pools available on provider') if floating_ip_pool is not None: logger.info('Selected floating ip pool: %s', floating_ip_pool) deploy_args['floating_ip_pool'] = floating_ip_pool elif provider_type == "virtualcenter": if "allowed_datastores" in provider_dict: deploy_args["allowed_datastores"] = provider_dict["allowed_datastores"] elif provider_type == 'scvmm': deploy_args["host_group"] = provider_dict["provisioning"]['host_group'] elif provider_type == 'gce': deploy_args['ssh_key'] = '{user_name}:{public_key}'.format( user_name=cred['ssh']['ssh-user'], public_key=cred['ssh']['public_key']) elif provider_type == 'openshift': trackerbot = api() raw_tags = trackerbot.providertemplate().get(provider=kwargs['provider'], template=deploy_args['template'])['objects'] raw_tags = raw_tags[-1]['template'].get('custom_data', "{}") deploy_args["tags"] = yaml.safe_load(raw_tags)['TAGS'] # Do it! try: logger.info( 'Cloning %s to %s on %s', deploy_args['template'], deploy_args['vm_name'], kwargs['provider'] ) # TODO: change after openshift wrapanapi refactor output = None # 'output' is only used for openshift providers if isinstance(provider, Openshift): output = provider.deploy_template(**deploy_args) else: template = provider.get_template(deploy_args['template']) template.deploy(**deploy_args) except Exception as e: logger.exception(e) logger.error('template deploy failed') if kwargs.get('cleanup'): logger.info('attempting to destroy %s', deploy_args['vm_name']) destroy_vm(provider, deploy_args['vm_name']) return 12 if not provider.does_vm_exist(deploy_args['vm_name']): logger.error('provider.deploy_template failed without exception') return 12 # TODO: change after openshift wrapanapi refactor if isinstance(provider, Openshift): if provider.is_vm_running(deploy_args['vm_name']): logger.info('VM %s is running', deploy_args['vm_name']) else: logger.error('VM %s is not running', deploy_args['vm_name']) return 10 else: vm_mgmt = provider.get_vm(deploy_args['vm_name']) vm_mgmt.ensure_state(VmState.RUNNING, timeout='5m') if provider_type == 'gce': try: attach_gce_disk(vm_mgmt) except Exception: logger.exception("Failed to attach db disk") destroy_vm(provider, deploy_args['vm_name']) return 10 if provider_type == 'openshift': vm_ip = output['url'] else: try: vm_ip, _ = wait_for( find_pingable, func_args=[vm_mgmt], fail_condition=None, delay=5, num_sec=300 ) except TimedOutError: msg = 'Timed out waiting for reachable depot VM IP' logger.exception(msg) return 10 try: if kwargs.get('configure'): logger.info('Configuring appliance, this can take a while.') if kwargs.get('deploy'): app = IPAppliance(hostname=vm_ip) else: app_args = (kwargs['provider'], deploy_args['vm_name']) app_kwargs = {} if provider_type == 'openshift': ocp_creds = cred[provider_dict['credentials']] ssh_creds = cred[provider_dict['ssh_creds']] app_kwargs = { 'project': output['project'], 'db_host': output['external_ip'], 'container': 'cloudforms-0', 'hostname': vm_ip, 'openshift_creds': { 'hostname': provider_dict['hostname'], 'username': ocp_creds['username'], 'password': ocp_creds['password'], 'ssh': { 'username': ssh_creds['username'], 'password': ssh_creds['password'], }, } } app = Appliance.from_provider(*app_args, **app_kwargs) if provider_type == 'ec2': wait_for( cloud_init_done, func_args=[app], num_sec=600, handle_exception=True, delay=5) if provider_type == 'gce': app.configure_gce() elif provider_type == 'openshift': # openshift appliances don't need any additional configuration pass else: app.configure() logger.info('Successfully Configured the appliance.') except Exception as e: logger.exception(e) logger.error('Appliance Configuration Failed') if not kwargs.get('deploy'): app = Appliance.from_provider(kwargs['provider'], deploy_args['vm_name']) ssh_client = app.ssh_client() result = ssh_client.run_command('find /root/anaconda-post.log') if result.success: ssh_client.get_file('/root/anaconda-post.log', log_path.join('anaconda-post.log').strpath) ssh_client.close() destroy_vm(app.provider.mgmt, deploy_args['vm_name']) return 10 if kwargs.get('outfile') or kwargs.get('deploy'): # todo: to get rid of those scripts in jenkins or develop them from scratch with open(kwargs['outfile'], 'w') as outfile: if provider_type == 'openshift': output_data = { 'appliances': [ { 'project': output['project'], 'db_host': output['external_ip'], 'hostname': vm_ip, 'container': 'cloudforms-0', 'openshift_creds': { 'hostname': provider_dict['hostname'], 'username': ocp_creds['username'], 'password': ocp_creds['password'], 'ssh': { 'username': ssh_creds['username'], 'password': ssh_creds['password'], } }, }, ], } else: output_data = { 'appliances': [{'hostname': vm_ip}] } yaml_data = yaml.safe_dump(output_data, default_flow_style=False) outfile.write(yaml_data) # In addition to the outfile, drop the ip address on stdout for easy parsing print(yaml_data)
def main(**kwargs): # get_mgmt validates, since it will explode without an existing key or type if kwargs.get('deploy'): kwargs['configure'] = True kwargs['outfile'] = 'appliance_ip_address_1' providers = provider_data['management_systems'] provider_dict = provider_data['management_systems'][kwargs['provider']] credentials =\ {'username': provider_dict['username'], 'password': provider_dict['password'], 'tenant': provider_dict['template_upload'].get('tenant_admin', 'admin'), 'auth_url': provider_dict.get('auth_url'), } provider = get_mgmt(kwargs['provider'], providers=providers, credentials=credentials) flavors = provider_dict['template_upload'].get('flavors', ['m1.medium']) provider_type = provider_data['management_systems'][kwargs['provider']]['type'] deploy_args = { 'vm_name': kwargs['vm_name'], 'template': kwargs['template'], } else: provider = get_mgmt(kwargs['provider']) provider_dict = cfme_data['management_systems'][kwargs['provider']] provider_type = provider_dict['type'] flavors = cfme_data['appliance_provisioning']['default_flavors'].get(provider_type, []) deploy_args = { 'vm_name': kwargs['vm_name'], 'template': kwargs['template'], } logger.info('Connecting to %s', kwargs['provider']) if kwargs.get('destroy'): # TODO: destroy should be its own script # but it's easy enough to just hijack the parser here # This returns True if destroy fails to give POSIXy exit codes (0 is good, False is 0, etc) return not destroy_vm(provider, deploy_args['vm_name']) # Try to snag defaults from cfme_data here for each provider type if provider_type == 'rhevm': cluster = provider_dict.get('default_cluster', kwargs.get('cluster')) if cluster is None: raise Exception('--cluster is required for rhev instances and default is not set') deploy_args['cluster'] = cluster if kwargs.get('place_policy_host') and kwargs.get('place_policy_aff'): deploy_args['placement_policy_host'] = kwargs['place_policy_host'] deploy_args['placement_policy_affinity'] = kwargs['place_policy_aff'] elif provider_type == 'ec2': # ec2 doesn't have an api to list available flavors, so the first flavor is the default try: # c3.xlarge has 4 CPU cores and 7.5GB RAM - minimal requirements for CFME Appliance flavor = kwargs.get('flavor', 'c3.xlarge') except IndexError: raise Exception('--flavor is required for EC2 instances and default is not set') deploy_args['instance_type'] = flavor deploy_args['key_name'] = "shared" # we want to override default cloud-init which disables root login and password login cloud_init_dict = { 'chpasswd': { 'expire': False, 'list': '{}:{}\n'.format(cred['ssh']['username'], cred['ssh']['password']) }, 'disable_root': False, 'ssh_pwauth': True } cloud_init = "#cloud-config\n{}".format(yaml.safe_dump(cloud_init_dict, default_flow_style=False)) deploy_args['user_data'] = cloud_init elif provider_type == 'openstack': # filter openstack flavors based on what's available available_flavors = provider.list_flavor() logger.info("Available flavors on provider: %s", available_flavors) generic_flavors = filter(lambda f: f in available_flavors, flavors) try: flavor = (kwargs.get('flavor') or provider_dict.get('sprout', {}).get('flavor_name') or generic_flavors[0]) except IndexError: raise Exception('--flavor is required for RHOS instances and ' 'default is not set or unavailable on provider') logger.info('Selected flavor: %s', flavor) deploy_args['flavor_name'] = flavor if 'network' in provider_dict: # support rhos4 network names deploy_args['network_name'] = provider_dict['network'] provider_pools = [p.name for p in provider.api.floating_ip_pools.list()] try: # TODO: If there are multiple pools, have a provider default in cfme_data floating_ip_pool = kwargs.get('floating_ip_pool') or provider_pools[0] except IndexError: raise Exception('No floating IP pools available on provider') if floating_ip_pool is not None: deploy_args['floating_ip_pool'] = floating_ip_pool elif provider_type == "virtualcenter": if "allowed_datastores" in provider_dict: deploy_args["allowed_datastores"] = provider_dict["allowed_datastores"] elif provider_type == 'scvmm': deploy_args["host_group"] = provider_dict["provisioning"]['host_group'] elif provider_type == 'gce': deploy_args['ssh_key'] = '{user_name}:{public_key}'.format( user_name=cred['ssh']['ssh-user'], public_key=cred['ssh']['public_key']) elif provider_type == 'openshift': trackerbot = api() raw_tags = trackerbot.providertemplate().get(provider=kwargs['provider'], template=deploy_args['template'])['objects'] raw_tags = raw_tags[-1]['template'].get('custom_data', "{}") deploy_args["tags"] = yaml.safe_load(raw_tags)['TAGS'] # Do it! try: logger.info( 'Cloning %s to %s on %s', deploy_args['template'], deploy_args['vm_name'], kwargs['provider'] ) # TODO: change after openshift wrapanapi refactor output = None # 'output' is only used for openshift providers if isinstance(provider, Openshift): output = provider.deploy_template(**deploy_args) else: template = provider.get_template(deploy_args['template']) template.deploy(**deploy_args) except Exception as e: logger.exception(e) logger.error('template deploy failed') if kwargs.get('cleanup'): logger.info('attempting to destroy %s', deploy_args['vm_name']) destroy_vm(provider, deploy_args['vm_name']) return 12 if not provider.does_vm_exist(deploy_args['vm_name']): logger.error('provider.deploy_template failed without exception') return 12 # TODO: change after openshift wrapanapi refactor if isinstance(provider, Openshift): if provider.is_vm_running(deploy_args['vm_name']): logger.info('VM %s is running', deploy_args['vm_name']) else: logger.error('VM %s is not running', deploy_args['vm_name']) return 10 else: vm = provider.get_vm(deploy_args['vm_name']) vm.ensure_state(VmState.RUNNING, timeout='5m') if provider_type == 'gce': try: attach_gce_disk(vm) except Exception: logger.exception("Failed to attach db disk") destroy_vm(provider, deploy_args['vm_name']) return 10 if provider_type == 'openshift': ip = output['url'] else: try: ip, _ = wait_for(lambda: vm.ip, num_sec=1200, fail_condition=None) logger.info('IP Address returned is %s', ip) except Exception as e: logger.exception(e) logger.error('IP address not returned') return 10 try: if kwargs.get('configure'): logger.info('Configuring appliance, this can take a while.') if kwargs.get('deploy'): app = IPAppliance(hostname=ip) else: app_args = (kwargs['provider'], deploy_args['vm_name']) app_kwargs = {} if provider_type == 'openshift': ocp_creds = cred[provider_dict['credentials']] ssh_creds = cred[provider_dict['ssh_creds']] app_kwargs = { 'project': output['project'], 'db_host': output['external_ip'], 'container': 'cloudforms-0', 'hostname': ip, 'openshift_creds': { 'hostname': provider_dict['hostname'], 'username': ocp_creds['username'], 'password': ocp_creds['password'], 'ssh': { 'username': ssh_creds['username'], 'password': ssh_creds['password'], }, } } app = Appliance.from_provider(*app_args, **app_kwargs) if provider_type == 'ec2': wait_for( cloud_init_done, func_args=[app], num_sec=600, handle_exception=True, delay=5) if provider_type == 'gce': app.configure_gce() elif provider_type == 'openshift': # openshift appliances don't need any additional configuration pass else: app.configure() logger.info('Successfully Configured the appliance.') except Exception as e: logger.exception(e) logger.error('Appliance Configuration Failed') if not kwargs.get('deploy'): app = Appliance.from_provider(kwargs['provider'], deploy_args['vm_name']) ssh_client = app.ssh_client() result = ssh_client.run_command('find /root/anaconda-post.log') if result.success: ssh_client.get_file('/root/anaconda-post.log', log_path.join('anaconda-post.log').strpath) ssh_client.close() destroy_vm(app.provider, deploy_args['vm_name']) return 10 if kwargs.get('outfile') or kwargs.get('deploy'): # todo: to get rid of those scripts in jenkins or develop them from scratch with open(kwargs['outfile'], 'w') as outfile: if provider_type == 'openshift': output_data = { 'appliances': [ { 'project': output['project'], 'db_host': output['external_ip'], 'hostname': ip, 'container': 'cloudforms-0', 'openshift_creds': { 'hostname': provider_dict['hostname'], 'username': ocp_creds['username'], 'password': ocp_creds['password'], 'ssh': { 'username': ssh_creds['username'], 'password': ssh_creds['password'], } }, }, ], } else: output_data = { 'appliances': [{'hostname': ip}] } yaml_data = yaml.safe_dump(output_data, default_flow_style=False) outfile.write(yaml_data) # In addition to the outfile, drop the ip address on stdout for easy parsing print(yaml_data)
def main(**kwargs): # get_mgmt validates, since it will explode without an existing key or type if kwargs.get('deploy'): kwargs['configure'] = True kwargs['outfile'] = 'appliance_ip_address_1' providers = provider_data['management_systems'] provider_dict = provider_data['management_systems'][kwargs['provider']] credentials =\ {'username': provider_dict['username'], 'password': provider_dict['password'], 'tenant': provider_dict['template_upload'].get('tenant_admin', 'admin'), 'auth_url': provider_dict.get('auth_url'), } provider = get_mgmt(kwargs['provider'], providers=providers, credentials=credentials) flavors = provider_dict['template_upload'].get('flavors', ['m1.medium']) provider_type = provider_data['management_systems'][ kwargs['provider']]['type'] deploy_args = { 'vm_name': kwargs['vm_name'], 'template': kwargs['template'], } else: provider = get_mgmt(kwargs['provider']) provider_dict = cfme_data['management_systems'][kwargs['provider']] provider_type = provider_dict['type'] flavors = cfme_data['appliance_provisioning']['default_flavors'].get( provider_type, []) deploy_args = { 'vm_name': kwargs['vm_name'], 'template': kwargs['template'], } logger.info('Connecting to {}'.format(kwargs['provider'])) if kwargs.get('destroy'): # TODO: destroy should be its own script # but it's easy enough to just hijack the parser here # This returns True if destroy fails to give POSIXy exit codes (0 is good, False is 0, etc) return not destroy_vm(provider, deploy_args['vm_name']) # Try to snag defaults from cfme_data here for each provider type if provider_type == 'rhevm': cluster = provider_dict.get('default_cluster', kwargs.get('cluster')) if cluster is None: raise Exception( '--cluster is required for rhev instances and default is not set' ) deploy_args['cluster'] = cluster if kwargs.get('place_policy_host') and kwargs.get('place_policy_aff'): deploy_args['placement_policy_host'] = kwargs['place_policy_host'] deploy_args['placement_policy_affinity'] = kwargs[ 'place_policy_aff'] elif provider_type == 'ec2': # ec2 doesn't have an api to list available flavors, so the first flavor is the default try: # c3.xlarge has 4 CPU cores and 7.5GB RAM - minimal requirements for CFME Appliance flavor = kwargs.get('flavor', 'c3.xlarge') except IndexError: raise Exception( '--flavor is required for EC2 instances and default is not set' ) deploy_args['instance_type'] = flavor deploy_args['key_name'] = "shared" # we want to override default cloud-init which disables root login and password login cloud_init_dict = { 'chpasswd': { 'expire': False, 'list': '{}:{}\n'.format(cred['ssh']['username'], cred['ssh']['password']) }, 'disable_root': 0, 'ssh_pwauth': 1 } cloud_init = "#cloud-config\n{}".format( yaml.safe_dump(cloud_init_dict, default_flow_style=False)) deploy_args['user_data'] = cloud_init elif provider_type == 'openstack': # filter openstack flavors based on what's available available_flavors = provider.list_flavor() flavors = filter(lambda f: f in available_flavors, flavors) try: flavor = kwargs.get('flavor') or flavors[0] except IndexError: raise Exception('--flavor is required for RHOS instances and ' 'default is not set or unavailable on provider') # flavour? Thanks, psav... deploy_args['flavour_name'] = flavor if 'network' in provider_dict: # support rhos4 network names deploy_args['network_name'] = provider_dict['network'] provider_pools = [ p.name for p in provider.api.floating_ip_pools.list() ] try: # TODO: If there are multiple pools, have a provider default in cfme_data floating_ip_pool = kwargs.get( 'floating_ip_pool') or provider_pools[0] except IndexError: raise Exception('No floating IP pools available on provider') if floating_ip_pool is not None: deploy_args['floating_ip_pool'] = floating_ip_pool elif provider_type == "virtualcenter": if "allowed_datastores" in provider_dict: deploy_args["allowed_datastores"] = provider_dict[ "allowed_datastores"] elif provider_type == 'scvmm': deploy_args["host_group"] = provider_dict["provisioning"]['host_group'] elif provider_type == 'gce': deploy_args['ssh_key'] = '{user_name}:{public_key}'.format( user_name=cred['ssh']['ssh-user'], public_key=cred['ssh']['public_key']) # Do it! try: logger.info('Cloning {} to {} on {}'.format(deploy_args['template'], deploy_args['vm_name'], kwargs['provider'])) provider.deploy_template(**deploy_args) except Exception as e: logger.exception(e) logger.error('provider.deploy_template failed') if kwargs.get('cleanup'): logger.info('attempting to destroy {}'.format( deploy_args['vm_name'])) destroy_vm(provider, deploy_args['vm_name']) return 12 if not provider.does_vm_exist(deploy_args['vm_name']): logger.error('provider.deploy_template failed without exception') return 12 if provider.is_vm_running(deploy_args['vm_name']): logger.info("VM {} is running".format(deploy_args['vm_name'])) else: logger.error("VM is not running") return 10 try: ip, time_taken = wait_for(provider.get_ip_address, [deploy_args['vm_name']], num_sec=1200, fail_condition=None) logger.info('IP Address returned is {}'.format(ip)) except Exception as e: logger.exception(e) logger.error('IP address not returned') return 10 try: if kwargs.get('configure'): logger.info('Configuring appliance, this can take a while.') if kwargs.get('deploy'): app = IPAppliance(hostname=ip) else: app = Appliance.from_provider(kwargs['provider'], deploy_args['vm_name']) if provider_type == 'gce': with app as ipapp: ipapp.configure_gce() else: app.configure() logger.info('Successfully Configured the appliance.') except Exception as e: logger.exception(e) logger.error('Appliance Configuration Failed') if not kwargs.get('deploy'): app = Appliance.from_provider(kwargs['provider'], deploy_args['vm_name']) ssh_client = app.ssh_client() status, output = ssh_client.run_command( 'find /root/anaconda-post.log') if status == 0: ssh_client.get_file('/root/anaconda-post.log', log_path.join('anaconda-post.log').strpath) ssh_client.close() return 10 if kwargs.get('outfile') or kwargs.get('deploy'): with open(kwargs['outfile'], 'w') as outfile: outfile.write("appliance_ip_address={}\n".format(ip)) # In addition to the outfile, drop the ip address on stdout for easy parsing print(ip)