def template(provider, provisioning, setup_provider): template_type = provisioning['stack_provisioning']['template_type'] template_name = fauxfactory.gen_alphanumeric() template = OrchestrationTemplate(template_type=template_type, template_name=template_name) if provider.type == "ec2": data_file = load_data_file( str(orchestration_path.join('aws_vm_template.json'))) elif provider.type == "openstack": data_file = load_data_file( str(orchestration_path.join('openstack_vm_template.data'))) elif provider.type == "azure": data_file = load_data_file( str(orchestration_path.join('azure_vm_template.json'))) template.create(data_file.read().replace('CFMETemplateName', template_name)) if provider.type == "azure": dialog_name = "azure-single-vm-from-user-image" else: dialog_name = "dialog_" + fauxfactory.gen_alphanumeric() if provider.type != "azure": template.create_service_dialog_from_template(dialog_name, template.template_name) yield template, dialog_name
def get_template_from_config(template_config_name): """ Convenience function to grab the details for a template from the yamls and create template. """ template_config = conf.cfme_data.get('customization_templates', {})[template_config_name] script_data = load_data_file(str(project_path.join(template_config['script_file'])), replacements=template_config['replacements']) script_data = script_data.read() appliance = get_or_create_current_appliance() collection = appliance.collections.customization_templates customization_template = collection.instantiate(name=template_config['name'], description=template_config['description'], image_type=template_config['image_type'], script_type=template_config['script_type'], script_data=script_data) if not customization_template.exists(): return collection.create(name=template_config['name'], description=template_config['description'], image_type=template_config['image_type'], script_type=template_config['script_type'], script_data=script_data) return customization_template
def get_template_from_config(template_config_name, create=False): """ Convenience function to grab the details for a template from the yamls and create template. """ template_config = conf.cfme_data.get('customization_templates', {})[template_config_name] script_data = load_data_file(str( project_path.join(template_config['script_file'])), replacements=template_config['replacements']) script_data = script_data.read() appliance = get_or_create_current_appliance() collection = appliance.collections.customization_templates kwargs = { 'name': template_config['name'], 'description': template_config['description'], 'image_type': template_config['image_type'], 'script_type': template_config['script_type'], 'script_data': script_data } customization_template = collection.instantiate(**kwargs) if create and not customization_template.exists(): return collection.create(**kwargs) return customization_template
def __call__(self, filename, replacements=None): if filename.startswith('/'): complete_path = data_path_for_filename(filename.strip('/'), self.base_path) else: complete_path = data_path_for_filename(filename, self.base_path, self.testmod_path) seen_data_files.add(complete_path) return load_data_file(complete_path, replacements)
def __call__(self, filename, replacements=None): if filename.startswith('/'): complete_path = data_path_for_filename( filename.strip('/'), self.base_path) else: complete_path = data_path_for_filename( filename, self.base_path, self.testmod_path) seen_data_files.add(complete_path) return load_data_file(complete_path, replacements)
def enable_internal(self, region=0, key_address=None, db_password=None, ssh_password=None): """Enables internal database Args: region: Region number of the CFME appliance. key_address: Address of CFME appliance where key can be fetched. Note: If key_address is None, a new encryption key is generated for the appliance. """ self.logger.info('Enabling internal DB (region {}) on {}.'.format(region, self.address)) self.address = self.appliance.address clear_property_cache(self, 'client') client = self.ssh_client # Defaults db_password = db_password or conf.credentials['database']['password'] ssh_password = ssh_password or conf.credentials['ssh']['password'] if self.appliance.has_cli: # use the cli if key_address: status, out = client.run_command( 'appliance_console_cli --region {0} --internal --fetch-key {1} -p {2} -a {3}' .format(region, key_address, db_password, ssh_password) ) else: status, out = client.run_command( 'appliance_console_cli --region {} --internal --force-key -p {}' .format(region, db_password) ) else: # no cli, use the enable internal db script rbt_repl = { 'miq_lib': '/var/www/miq/lib', 'region': region, 'postgres_version': self.postgres_version } # Find and load our rb template with replacements rbt = datafile.data_path_for_filename('enable-internal-db.rbt', scripts_path.strpath) rb = datafile.load_data_file(rbt, rbt_repl) # sent rb file over to /tmp remote_file = '/tmp/{}'.format(fauxfactory.gen_alphanumeric()) client.put_file(rb.name, remote_file) # Run the rb script, clean it up when done status, out = client.run_command('ruby {}'.format(remote_file)) client.run_command('rm {}'.format(remote_file)) return status, out
def template(provider, provisioning, setup_provider): template_type = provisioning['stack_provisioning']['template_type'] template_name = fauxfactory.gen_alphanumeric() template = OrchestrationTemplate(template_type=template_type, template_name=template_name) file = provisioning['stack_provisioning']['data_file'] data_file = load_data_file(str(orchestration_path.join(file))) template.create(data_file.read().replace('CFMETemplateName', template_name)) dialog_name = "dialog_" + fauxfactory.gen_alphanumeric() template.create_service_dialog_from_template(dialog_name, template.template_name) return template, dialog_name
def template(provider, provisioning, setup_provider): template_type = provisioning['stack_provisioning']['template_type'] template_name = fauxfactory.gen_alphanumeric() template = OrchestrationTemplate(template_type=template_type, template_name=template_name) if provider.type == "ec2": data_file = load_data_file(str(orchestration_path.join('aws_vm_template.json'))) elif provider.type == "openstack": data_file = load_data_file(str(orchestration_path.join('openstack_vm_template.data'))) elif provider.type == "azure": data_file = load_data_file(str(orchestration_path.join('azure_vm_template.json'))) template.create(data_file.read().replace('CFMETemplateName', template_name)) if provider.type == "azure": dialog_name = "azure-single-vm-from-user-image" else: dialog_name = "dialog_" + fauxfactory.gen_alphanumeric() if provider.type != "azure": template.create_service_dialog_from_template(dialog_name, template.template_name) yield template, dialog_name
def template(provider, provisioning, dialog_name, stack): template_type = provisioning['stack_provisioning']['template_type'] template_name = fauxfactory.gen_alphanumeric() template = OrchestrationTemplate(template_type=template_type, template_name=template_name) file = provisioning['stack_provisioning']['data_file'] data_file = load_data_file(str(orchestration_path.join(file))) template.create(data_file.read().replace('CFMETemplateName', template_name)) template.create_service_dialog_from_template(dialog_name, template.template_name) yield template if stack.exists: stack.retire_stack() if template.exists: template.delete()
def template(appliance, provider, provisioning, dialog_name): template_group = provisioning['stack_provisioning']['template_type'] template_type = provisioning['stack_provisioning']['template_type_dd'] template_name = fauxfactory.gen_alphanumeric(start="temp_") file = provisioning['stack_provisioning']['data_file'] data_file = load_data_file(str(orchestration_path.join(file))) content = data_file.read().replace('CFMETemplateName', template_name) collection = appliance.collections.orchestration_templates template = collection.create(template_group=template_group, template_name=template_name, template_type=template_type, description="my template", content=content) template.create_service_dialog_from_template(dialog_name) yield template if template.exists: template.delete()
def get_template_from_config(template_config_name): """ Convenience function to grab the details for a template from the yamls. """ template_config = conf.cfme_data.get('customization_templates', {})[template_config_name] script_data = load_data_file(str(project_path.join(template_config['script_file'])), replacements=template_config['replacements']) script_data = script_data.read() return CustomizationTemplate(name=template_config['name'], description=template_config['description'], image_type=template_config['image_type'], script_type=template_config['script_type'], script_data=script_data)
def template(appliance, provider, provisioning, dialog_name, stack): template_group = provisioning['stack_provisioning']['template_type'] template_type = provisioning['stack_provisioning']['template_type_dd'] template_name = fauxfactory.gen_alphanumeric() file = provisioning['stack_provisioning']['data_file'] data_file = load_data_file(str(orchestration_path.join(file))) content = data_file.read().replace('CFMETemplateName', template_name) collection = appliance.collections.orchestration_templates template = collection.create(template_group=template_group, template_name=template_name, template_type=template_type, description="my template", content=content) template.create_service_dialog_from_template(dialog_name) yield template if stack.exists: stack.retire_stack() if template.exists: template.delete()
def get_template_from_config(template_config_name): """ Convenience function to grab the details for a template from the yamls. """ template_config = conf.cfme_data.get('customization_templates', {})[template_config_name] script_data = load_data_file(str( project_path.join(template_config['script_file'])), replacements=template_config['replacements']) script_data = script_data.read() appliance = get_or_create_current_appliance() collection = appliance.collections.customization_templates return collection.instantiate(name=template_config['name'], description=template_config['description'], image_type=template_config['image_type'], script_type=template_config['script_type'], script_data=script_data)
def get_template_from_config(template_config_name, create=False, appliance=None): """ Convenience function to grab the details for a template from the yamls and create template. """ assert appliance is not None template_config = conf.cfme_data.get('customization_templates', {})[template_config_name] script_data = load_data_file(str(project_path.join(template_config['script_file'])), replacements=template_config['replacements']) script_data = script_data.read() collection = appliance.collections.customization_templates kwargs = { 'name': template_config['name'], 'description': template_config['description'], 'image_type': template_config['image_type'], 'script_type': template_config['script_type'], 'script_data': script_data } customization_template = collection.instantiate(**kwargs) if create and not customization_template.exists(): return collection.create(**kwargs) return customization_template
def enable_external(self, db_address, region=0, db_name=None, db_username=None, db_password=None, key_address=None): """Enables external database Args: db_address: Address of the external database region: Number of region to join db_name: Name of the external DB db_username: Username to access the external DB db_password: Password to access the external DB key_address: Address of the host from which to get the key Returns a tuple of (exitstatus, script_output) for reporting, if desired """ self.logger.info( 'Enabling external DB (db_address {}, region {}) on {}.'.format( db_address, region, self.appliance.hostname)) # default db_name = db_name or 'vmdb_production' db_username = db_username or conf.credentials['database']['username'] db_password = db_password or conf.credentials['database']['password'] appliance_client = self.appliance.ssh_client if self.appliance.has_cli: if not appliance_client.is_pod: # copy v2 key rand_filename = f"/tmp/v2_key_{fauxfactory.gen_alphanumeric()}" master_client = appliance_client(hostname=key_address) master_client.get_file("/var/www/miq/vmdb/certs/v2_key", rand_filename) appliance_client.put_file(rand_filename, "/var/www/miq/vmdb/certs/v2_key") # enable external DB with cli cmd = ( f'appliance_console_cli --hostname {db_address}' f' --dbname {db_name} --username {db_username} --password {db_password}' ) result = appliance_client.run_command(cmd) else: # no cli, use the enable external db script # TODO: add key_address rbt_repl = { 'miq_lib': '/var/www/miq/lib', 'host': db_address, 'region': region, 'database': db_name, 'username': db_username, 'password': db_password } # Find and load our rb template with replacements rbt = datafile.data_path_for_filename('enable-internal-db.rbt', scripts_path.strpath) rb = datafile.load_data_file(rbt, rbt_repl) # Init SSH client and sent rb file over to /tmp remote_file = f'/tmp/{fauxfactory.gen_alphanumeric()}' appliance_client.put_file(rb.name, remote_file) # Run the rb script, clean it up when done result = appliance_client.run_command(f'ruby {remote_file}') appliance_client.run_command(f'rm {remote_file}') if result.failed: self.logger.error('error enabling external db') self.logger.error(result.output) msg = ('Appliance {} failed to enable external DB running on {}'. format(self.appliance.hostname, db_address)) self.logger.error(msg) from cfme.utils.appliance import ApplianceException raise ApplianceException(msg) return result.rc, result.output
def enable_internal(self, region=0, key_address=None, db_password=None, ssh_password=None, db_disk=None): """Enables internal database Args: region: Region number of the CFME appliance. key_address: Address of CFME appliance where key can be fetched. db_disk: Path of the db disk for --dbdisk appliance_console_cli. If not specified it tries to load it from the appliance. Note: If key_address is None, a new encryption key is generated for the appliance. """ self.logger.info( f'Enabling internal DB (region {region}) on {self.address}.') self.address = self.appliance.hostname clear_property_cache(self, 'client') client = self.ssh_client # Defaults db_password = db_password or conf.credentials['database']['password'] ssh_password = ssh_password or conf.credentials['ssh']['password'] if not db_disk: # See if there's any unpartitioned disks on the appliance try: db_disk = self.appliance.unpartitioned_disks[0] self.logger.info("Using unpartitioned disk for DB at %s", db_disk) except IndexError: db_disk = None db_mounted = False if not db_disk: # If we still don't have a db disk to use, see if a db disk/partition has already # been created & mounted (such as by us in self.create_db_lvm) result = client.run_command( "mount | grep $APPLIANCE_PG_MOUNT_POINT | cut -f1 -d' '") if "".join(str(result).split( )): # strip all whitespace to see if we got a real result self.logger.info("Using pre-mounted DB disk at %s", result) db_mounted = True if not db_mounted and not db_disk: self.logger.warning( 'Failed to find a mounted DB disk, or a free unpartitioned disk.' ) if self.appliance.has_cli: base_command = f'appliance_console_cli --region {region}' # use the cli if key_address: command_options = ( '--internal --fetch-key {key} -p {db_pass} -a {ssh_pass}'. format(key=key_address, db_pass=db_password, ssh_pass=ssh_password)) else: command_options = f'--internal --force-key -p {db_password}' if db_disk: # make sure the dbdisk is unmounted, RHOS ephemeral disks come up mounted result = client.run_command(f'umount {db_disk}') if not result.success: self.logger.warning( f'umount non-zero return, output was: ') command_options = ' '.join( [command_options, f'--dbdisk {db_disk}']) result = client.run_command(' '.join( [base_command, command_options])) if result.failed or 'failed' in result.output.lower(): raise Exception( f'Could not set up the database:\n{result.output}') else: # no cli, use the enable internal db script rbt_repl = { 'miq_lib': '/var/www/miq/lib', 'region': region, 'postgres_svcname': self.service_name, 'postgres_prefix': self.pg_prefix, 'db_mounted': str(db_mounted), } # Find and load our rb template with replacements rbt = datafile.data_path_for_filename('enable-internal-db.rbt', scripts_path.strpath) rb = datafile.load_data_file(rbt, rbt_repl) # sent rb file over to /tmp remote_file = f'/tmp/{fauxfactory.gen_alphanumeric()}' client.put_file(rb.name, remote_file) # Run the rb script, clean it up when done result = client.run_command(f'ruby {remote_file}') client.run_command(f'rm {remote_file}') self.logger.info('Output from appliance db configuration: %s', result.output) return result.rc, result.output
def enable_external(self, db_address, region=0, db_name=None, db_username=None, db_password=None): """Enables external database Args: db_address: Address of the external database region: Number of region to join db_name: Name of the external DB db_username: Username to access the external DB db_password: Password to access the external DB Returns a tuple of (exitstatus, script_output) for reporting, if desired """ self.logger.info( 'Enabling external DB (db_address {}, region {}) on {}.'.format( db_address, region, self.address)) # reset the db address and clear the cached db object if we have one self.address = db_address clear_property_cache(self, 'client') # default db_name = db_name or 'vmdb_production' db_username = db_username or conf.credentials['database']['username'] db_password = db_password or conf.credentials['database']['password'] client = self.ssh_client if self.appliance.has_cli: # copy v2 key master_client = client(hostname=self.address) rand_filename = "/tmp/v2_key_{}".format( fauxfactory.gen_alphanumeric()) master_client.get_file("/var/www/miq/vmdb/certs/v2_key", rand_filename) client.put_file(rand_filename, "/var/www/miq/vmdb/certs/v2_key") # enable external DB with cli status, out = client.run_command( 'appliance_console_cli ' '--hostname {0} --region {1} --dbname {2} --username {3} --password {4}' .format(self.address, region, db_name, db_username, db_password)) else: # no cli, use the enable external db script rbt_repl = { 'miq_lib': '/var/www/miq/lib', 'host': self.address, 'region': region, 'database': db_name, 'username': db_username, 'password': db_password } # Find and load our rb template with replacements rbt = datafile.data_path_for_filename('enable-internal-db.rbt', scripts_path.strpath) rb = datafile.load_data_file(rbt, rbt_repl) # Init SSH client and sent rb file over to /tmp remote_file = '/tmp/{}'.format(fauxfactory.gen_alphanumeric()) client.put_file(rb.name, remote_file) # Run the rb script, clean it up when done status, out = client.run_command('ruby {}'.format(remote_file)) client.run_command('rm {}'.format(remote_file)) if status != 0: self.logger.error('error enabling external db') self.logger.error(out) msg = ('Appliance {} failed to enable external DB running on {}'. format(self.appliance.hostname, db_address)) self.logger.error(msg) from . import ApplianceException raise ApplianceException(msg) return status, out
def enable_internal(self, region=0, key_address=None, db_password=None, ssh_password=None, db_disk=None): """Enables internal database Args: region: Region number of the CFME appliance. key_address: Address of CFME appliance where key can be fetched. db_disk: Path of the db disk for --dbdisk appliance_console_cli. If not specified it tries to load it from the appliance. Note: If key_address is None, a new encryption key is generated for the appliance. """ # self.logger.info('Enabling internal DB (region {}) on {}.'.format(region, self.address)) self.address = self.appliance.hostname clear_property_cache(self, 'client') client = self.ssh_client # Defaults db_password = db_password or conf.credentials['database']['password'] ssh_password = ssh_password or conf.credentials['ssh']['password'] if not db_disk: try: db_disk = self.appliance.unpartitioned_disks[0] except IndexError: db_disk = None self.logger.warning( 'Failed to set --dbdisk from the appliance. On 5.9.0.3+ it will fail.' ) # make sure the dbdisk is unmounted, RHOS ephemeral disks come up mounted result = client.run_command('umount {}'.format(db_disk)) if not result.success: self.logger.warning( 'umount non-zero return, output was: '.format(result)) if self.appliance.has_cli: base_command = 'appliance_console_cli --region {}'.format(region) # use the cli if key_address: command_options = ( '--internal --fetch-key {key} -p {db_pass} -a {ssh_pass}'. format(key=key_address, db_pass=db_password, ssh_pass=ssh_password)) else: command_options = '--internal --force-key -p {db_pass}'.format( db_pass=db_password) if db_disk: command_options = ' '.join( [command_options, '--dbdisk {}'.format(db_disk)]) status, out = client.run_command(' '.join( [base_command, command_options])) if status != 0 or 'failed' in out.lower(): raise Exception( 'Could not set up the database:\n{}'.format(out)) else: # no cli, use the enable internal db script rbt_repl = { 'miq_lib': '/var/www/miq/lib', 'region': region, 'postgres_version': self.postgres_version } # Find and load our rb template with replacements rbt = datafile.data_path_for_filename('enable-internal-db.rbt', scripts_path.strpath) rb = datafile.load_data_file(rbt, rbt_repl) # sent rb file over to /tmp remote_file = '/tmp/{}'.format(fauxfactory.gen_alphanumeric()) client.put_file(rb.name, remote_file) # Run the rb script, clean it up when done status, out = client.run_command('ruby {}'.format(remote_file)) client.run_command('rm {}'.format(remote_file)) self.logger.info('Output from appliance db configuration: %s', out) return status, out
def enable_external(self, db_address, region=0, db_name=None, db_username=None, db_password=None): """Enables external database Args: db_address: Address of the external database region: Number of region to join db_name: Name of the external DB db_username: Username to access the external DB db_password: Password to access the external DB Returns a tuple of (exitstatus, script_output) for reporting, if desired """ self.logger.info('Enabling external DB (db_address {}, region {}) on {}.' .format(db_address, region, self.address)) # reset the db address and clear the cached db object if we have one self.address = db_address clear_property_cache(self, 'client') # default db_name = db_name or 'vmdb_production' db_username = db_username or conf.credentials['database']['username'] db_password = db_password or conf.credentials['database']['password'] client = self.ssh_client if self.appliance.has_cli: if not client.is_pod: # copy v2 key master_client = client(hostname=self.address) rand_filename = "/tmp/v2_key_{}".format(fauxfactory.gen_alphanumeric()) master_client.get_file("/var/www/miq/vmdb/certs/v2_key", rand_filename) client.put_file(rand_filename, "/var/www/miq/vmdb/certs/v2_key") # enable external DB with cli result = client.run_command( 'appliance_console_cli ' '--hostname {0} --region {1} --dbname {2} --username {3} --password {4}'.format( self.address, region, db_name, db_username, db_password ) ) else: # no cli, use the enable external db script rbt_repl = { 'miq_lib': '/var/www/miq/lib', 'host': self.address, 'region': region, 'database': db_name, 'username': db_username, 'password': db_password } # Find and load our rb template with replacements rbt = datafile.data_path_for_filename('enable-internal-db.rbt', scripts_path.strpath) rb = datafile.load_data_file(rbt, rbt_repl) # Init SSH client and sent rb file over to /tmp remote_file = '/tmp/{}'.format(fauxfactory.gen_alphanumeric()) client.put_file(rb.name, remote_file) # Run the rb script, clean it up when done result = client.run_command('ruby {}'.format(remote_file)) client.run_command('rm {}'.format(remote_file)) if result.failed: self.logger.error('error enabling external db') self.logger.error(result.output) msg = ('Appliance {} failed to enable external DB running on {}' .format(self.appliance.hostname, db_address)) self.logger.error(msg) from . import ApplianceException raise ApplianceException(msg) return result.rc, result.output
def enable_internal(self, region=0, key_address=None, db_password=None, ssh_password=None, db_disk=None): """Enables internal database Args: region: Region number of the CFME appliance. key_address: Address of CFME appliance where key can be fetched. db_disk: Path of the db disk for --dbdisk appliance_console_cli. If not specified it tries to load it from the appliance. Note: If key_address is None, a new encryption key is generated for the appliance. """ # self.logger.info('Enabling internal DB (region {}) on {}.'.format(region, self.address)) self.address = self.appliance.hostname clear_property_cache(self, 'client') client = self.ssh_client # Defaults db_password = db_password or conf.credentials['database']['password'] ssh_password = ssh_password or conf.credentials['ssh']['password'] if not db_disk: # See if there's any unpartitioned disks on the appliance try: db_disk = self.appliance.unpartitioned_disks[0] self.logger.info("Using unpartitioned disk for DB at %s", db_disk) except IndexError: db_disk = None db_mounted = False if not db_disk: # If we still don't have a db disk to use, see if a db disk/partition has already # been created & mounted (such as by us in self.create_db_lvm) result = client.run_command("mount | grep $APPLIANCE_PG_MOUNT_POINT | cut -f1 -d' '") if "".join(str(result).split()): # strip all whitespace to see if we got a real result self.logger.info("Using pre-mounted DB disk at %s", result) db_mounted = True if not db_mounted and not db_disk: self.logger.warning('Failed to find a mounted DB disk, or a free unpartitioned disk.') if self.appliance.has_cli: base_command = 'appliance_console_cli --region {}'.format(region) # use the cli if key_address: command_options = ('--internal --fetch-key {key} -p {db_pass} -a {ssh_pass}' .format(key=key_address, db_pass=db_password, ssh_pass=ssh_password)) else: command_options = '--internal --force-key -p {db_pass}'.format(db_pass=db_password) if db_disk: # make sure the dbdisk is unmounted, RHOS ephemeral disks come up mounted result = client.run_command('umount {}'.format(db_disk)) if not result.success: self.logger.warning('umount non-zero return, output was: '.format(result)) command_options = ' '.join([command_options, '--dbdisk {}'.format(db_disk)]) result = client.run_command(' '.join([base_command, command_options])) if result.failed or 'failed' in result.output.lower(): raise Exception('Could not set up the database:\n{}'.format(result.output)) else: # no cli, use the enable internal db script rbt_repl = { 'miq_lib': '/var/www/miq/lib', 'region': region, 'postgres_version': self.postgres_version, 'db_mounted': str(db_mounted), } # Find and load our rb template with replacements rbt = datafile.data_path_for_filename('enable-internal-db.rbt', scripts_path.strpath) rb = datafile.load_data_file(rbt, rbt_repl) # sent rb file over to /tmp remote_file = '/tmp/{}'.format(fauxfactory.gen_alphanumeric()) client.put_file(rb.name, remote_file) # Run the rb script, clean it up when done result = client.run_command('ruby {}'.format(remote_file)) client.run_command('rm {}'.format(remote_file)) self.logger.info('Output from appliance db configuration: %s', result.output) return result.rc, result.output
def enable_internal(self, region=0, key_address=None, db_password=None, ssh_password=None, db_disk=None): """Enables internal database Args: region: Region number of the CFME appliance. key_address: Address of CFME appliance where key can be fetched. db_disk: Path of the db disk for --dbdisk appliance_console_cli. If not specified it tries to load it from the appliance. Note: If key_address is None, a new encryption key is generated for the appliance. """ # self.logger.info('Enabling internal DB (region {}) on {}.'.format(region, self.address)) self.address = self.appliance.hostname clear_property_cache(self, 'client') client = self.ssh_client # Defaults db_password = db_password or conf.credentials['database']['password'] ssh_password = ssh_password or conf.credentials['ssh']['password'] if not db_disk: try: db_disk = self.appliance.unpartitioned_disks[0] except IndexError: db_disk = None self.logger.warning( 'Failed to set --dbdisk from the appliance. On 5.9.0.3+ it will fail.') # make sure the dbdisk is unmounted, RHOS ephemeral disks come up mounted result = client.run_command('umount {}'.format(db_disk)) if not result.success: self.logger.warning('umount non-zero return, output was: '.format(result)) if self.appliance.has_cli: base_command = 'appliance_console_cli --region {}'.format(region) # use the cli if key_address: command_options = ('--internal --fetch-key {key} -p {db_pass} -a {ssh_pass}' .format(key=key_address, db_pass=db_password, ssh_pass=ssh_password)) else: command_options = '--internal --force-key -p {db_pass}'.format(db_pass=db_password) if db_disk: command_options = ' '.join([command_options, '--dbdisk {}'.format(db_disk)]) result = client.run_command(' '.join([base_command, command_options])) if result.failed or 'failed' in result.output.lower(): raise Exception('Could not set up the database:\n{}'.format(result.output)) else: # no cli, use the enable internal db script rbt_repl = { 'miq_lib': '/var/www/miq/lib', 'region': region, 'postgres_version': self.postgres_version } # Find and load our rb template with replacements rbt = datafile.data_path_for_filename('enable-internal-db.rbt', scripts_path.strpath) rb = datafile.load_data_file(rbt, rbt_repl) # sent rb file over to /tmp remote_file = '/tmp/{}'.format(fauxfactory.gen_alphanumeric()) client.put_file(rb.name, remote_file) # Run the rb script, clean it up when done result = client.run_command('ruby {}'.format(remote_file)) client.run_command('rm {}'.format(remote_file)) self.logger.info('Output from appliance db configuration: %s', result.output) return result.rc, result.output