def set_yaml_config(config_name, data_dict, hostname=None): """Given a yaml name, dictionary and hostname, set the configuration yaml on the server The configuration yamls must be inserted into the DB using the ruby console, so this function uses SSH, not the database. It makes sense to be included here as a counterpart to :py:func:`get_yaml_config` Args: config_name: Name of the yaml configuration file data_dict: Dictionary with data to set/change hostname: Hostname/address of the server that we want to set up (default ``None``) Note: If hostname is set to ``None``, the default server set up for this session will be used. See :py:class:``utils.ssh.SSHClient`` for details of the default setup. Warning: Manually editing the config yamls is potentially dangerous. Furthermore, the rails runner doesn't return useful information on the outcome of the set request, so errors that arise from the newly loading config file will go unreported. Usage: # Update the appliance name, for example vmbd_yaml = get_yaml_config('vmdb') vmdb_yaml['server']['name'] = 'EVM IS AWESOME' set_yaml_config('vmdb', vmdb_yaml, '1.2.3.4') """ # CFME does a lot of things when loading a configfile, so # let their native conf loader handle the job # If hostname is defined, connect to the specified server if hostname is not None: _ssh_client = SSHClient(hostname=hostname) # Else, connect to the default one set up for this session else: _ssh_client = store.current_appliance.ssh_client # Build & send new config temp_yaml = NamedTemporaryFile() dest_yaml = '/tmp/conf.yaml' yaml.dump(data_dict, temp_yaml, default_flow_style=False) _ssh_client.put_file(temp_yaml.name, dest_yaml) # Build and send ruby script dest_ruby = '/tmp/load_conf.rb' ruby_template = data_path.join('utils', 'cfmedb_load_config.rbt') ruby_replacements = { 'config_name': config_name, 'config_file': dest_yaml } temp_ruby = load_data_file(ruby_template.strpath, ruby_replacements) _ssh_client.put_file(temp_ruby.name, dest_ruby) # Run it _ssh_client.run_rails_command(dest_ruby) fire('server_details_changed') fire('server_config_changed')
def main(): parser = argparse.ArgumentParser( epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') parser.add_argument('db_address', help='hostname or ip address of external database') parser.add_argument('--database', default='vmdb_production', help='name of the external database') parser.add_argument('--region', default=0, type=int, help='region to assign to the new DB') parser.add_argument('--username', default=credentials['database']['username'], help='username for external database') parser.add_argument('--password', default=credentials['database']['password'], help='password for external database') args = parser.parse_args() ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': args.address } rbt_repl = { 'miq_lib': '/var/www/miq/lib', 'host': args.db_address, 'database': args.database, 'region': args.region, 'username': args.username, 'password': args.password } # Find and load our rb template with replacements base_path = os.path.dirname(__file__) rbt = datafile.data_path_for_filename('enable-external-db.rbt', base_path) rb = datafile.load_data_file(rbt, rbt_repl) # Init SSH client and sent rb file over to /tmp remote_file = '/tmp/%s' % generate_random_string() client = SSHClient(**ssh_kwargs) client.put_file(rb.name, remote_file) # Run the rb script, clean it up when done print 'Initializing Appliance External DB' status, out = client.run_command('ruby %s' % remote_file) client.run_command('rm %s' % remote_file) if status != 0: print 'Enabling DB failed with error:' print out sys.exit(1) else: print 'DB Enabled, evm watchdog should start the UI shortly.'
def fix_merkyl_workaround(): """Workaround around merkyl not opening an iptables port for communication""" ssh_client = SSHClient() if ssh_client.run_command('test -f /etc/init.d/merkyl').rc == 0: logger.info('Rudely overwriting merkyl init.d on appliance;') local_file = data_path.join("bundles").join("merkyl").join("merkyl") remote_file = "/etc/init.d/merkyl" ssh_client.put_file(local_file.strpath, remote_file) ssh_client.run_command("service merkyl restart")
def main(): parser = argparse.ArgumentParser( epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') args = parser.parse_args() ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': args.address } # Init SSH client client = SSHClient(**ssh_kwargs) snmp_path = scripts_data_path.join("snmp") # Copy print("Copying files") client.put_file( snmp_path.join("snmp_listen.rb").strpath, "/root/snmp_listen.rb") client.put_file( snmp_path.join("snmp_listen.sh").strpath, "/root/snmp_listen.sh") # Enable after startup print("Enabling after startup") status = client.run_command("grep 'snmp_listen[.]sh' /etc/rc.local")[0] if status != 0: client.run_command( "echo 'cd /root/ && ./snmp_listen.sh start' >> /etc/rc.local") assert client.run_command( "grep 'snmp_listen[.]sh' /etc/rc.local")[0] == 0, "Could not enable!" # Run! print("Starting listener") assert client.run_command( "cd /root/ && ./snmp_listen.sh start")[0] == 0, "Could not start!" # Open the port if not opened print("Opening the port in iptables") status = client.run_command( "grep '--dport 8765' /etc/sysconfig/iptables")[0] if status != 0: # append after the 5432 entry client.run_command( "sed -i '/--dport 5432/a -A INPUT -p tcp -m tcp --dport 8765 -j ACCEPT' " "/etc/sysconfig/iptables") client.run_command("systemctl restart iptables") # Check if accessible try: requests.get("http://{}:8765/".format(args.address)) except requests.exceptions.ConnectionError: print("Could not detect running listener!") exit(2)
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') parser.add_argument('db_address', help='hostname or ip address of external database') parser.add_argument('--database', default='vmdb_production', help='name of the external database') parser.add_argument('--region', default=0, type=int, help='region to assign to the new DB') parser.add_argument('--username', default=credentials['database']['username'], help='username for external database') parser.add_argument('--password', default=credentials['database']['password'], help='password for external database') args = parser.parse_args() ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': args.address } rbt_repl = { 'miq_lib': '/var/www/miq/lib', 'host': args.db_address, 'database': args.database, 'region': args.region, 'username': args.username, 'password': args.password } # Find and load our rb template with replacements base_path = os.path.dirname(__file__) rbt = datafile.data_path_for_filename( 'enable-external-db.rbt', base_path) rb = datafile.load_data_file(rbt, rbt_repl) # Init SSH client and sent rb file over to /tmp remote_file = '/tmp/%s' % generate_random_string() client = SSHClient(**ssh_kwargs) client.put_file(rb.name, remote_file) # Run the rb script, clean it up when done print 'Initializing Appliance External DB' status, out = client.run_command('ruby %s' % remote_file) client.run_command('rm %s' % remote_file) if status != 0: print 'Enabling DB failed with error:' print out sys.exit(1) else: print 'DB Enabled, evm watchdog should start the UI shortly.'
def main(): parser = argparse.ArgumentParser( epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') parser.add_argument('-R', '--reverse', help='flag to indicate the patch should be undone', action='store_true', default=False, dest='reverse') args = parser.parse_args() # Find the patch file patch_file_name = data_path_for_filename('ajax_wait.diff', scripts_path.strpath) # Set up temp dir tmpdir = mkdtemp() atexit.register(shutil.rmtree, tmpdir) source = '/var/www/miq/vmdb/public/javascripts/application.js' target = os.path.join(tmpdir, 'application.js') # Init SSH client ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': args.address } client = SSHClient(**ssh_kwargs) print 'retriving appliance.js from appliance' client.get_file(source, target) os.chdir(tmpdir) # patch, level 4, patch direction (default forward), ignore whitespace, don't output rejects direction = '-N -R' if args.reverse else '-N' exitcode = subprocess.call('patch -p4 %s -l -r- < %s' % (direction, patch_file_name), shell=True) if exitcode == 0: # Put it back after successful patching. print 'replacing appliance.js on appliance' client.put_file(target, source) else: print 'not changing appliance' return exitcode
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') args = parser.parse_args() ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': args.address } # Init SSH client client = SSHClient(**ssh_kwargs) snmp_path = scripts_data_path.join("snmp") # Copy print("Copying files") client.put_file(snmp_path.join("snmp_listen.rb").strpath, "/root/snmp_listen.rb") client.put_file(snmp_path.join("snmp_listen.sh").strpath, "/root/snmp_listen.sh") # Enable after startup print("Enabling after startup") status = client.run_command("grep 'snmp_listen[.]sh' /etc/rc.local")[0] if status != 0: client.run_command("echo 'cd /root/ && ./snmp_listen.sh start' >> /etc/rc.local") assert client.run_command("grep 'snmp_listen[.]sh' /etc/rc.local")[0] == 0, "Could not enable!" # Run! print("Starting listener") assert client.run_command("cd /root/ && ./snmp_listen.sh start")[0] == 0, "Could not start!" # Open the port if not opened print("Opening the port in iptables") status = client.run_command("grep '--dport 8765' /etc/sysconfig/iptables")[0] if status != 0: # append after the 5432 entry client.run_command( "sed -i '/--dport 5432/a -A INPUT -p tcp -m tcp --dport 8765 -j ACCEPT' " "/etc/sysconfig/iptables" ) client.run_command("service iptables restart") # Check if accessible try: requests.get("http://{}:8765/".format(args.address)) except requests.exceptions.ConnectionError: print("Could not detect running listener!") exit(2)
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') parser.add_argument('-R', '--reverse', help='flag to indicate the patch should be undone', action='store_true', default=False, dest='reverse') args = parser.parse_args() # Find the patch file patch_file_name = data_path_for_filename('ajax_wait.diff', scripts_path.strpath) # Set up temp dir tmpdir = mkdtemp() atexit.register(shutil.rmtree, tmpdir) source = '/var/www/miq/vmdb/public/javascripts/application.js' target = os.path.join(tmpdir, 'application.js') # Init SSH client ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': args.address } client = SSHClient(**ssh_kwargs) print 'retriving appliance.js from appliance' client.get_file(source, target) os.chdir(tmpdir) # patch, level 4, patch direction (default forward), ignore whitespace, don't output rejects direction = '-N -R' if args.reverse else '-N' exitcode = subprocess.call('patch -p4 %s -l -r- < %s' % (direction, patch_file_name), shell=True) if exitcode == 0: # Put it back after successful patching. print 'replacing appliance.js on appliance' client.put_file(target, source) else: print 'not changing appliance' return exitcode
class UiCoveragePlugin(object): def __init__(self): self.ssh_client = SSHClient() # trylast so that terminalreporter's been configured before ui-coverage @pytest.mark.trylast def pytest_configure(self, config): # Eventually, the setup/teardown work for coverage should be handled by # utils.appliance.Appliance to make multi-appliance support easy self.reporter = config.pluginmanager.getplugin('terminalreporter') self.reporter.write_sep('-', 'Setting up UI coverage reporting') self.install_simplecov() self.install_coverage_hook() self.restart_evm() self.touch_all_the_things() check_appliance_ui(base_url()) def pytest_unconfigure(self, config): self.reporter.write_sep( '-', 'Waiting for coverage to finish and collecting reports') self.stop_touching_all_the_things() self.merge_reports() self.collect_reports() self.print_report() def install_simplecov(self): logger.info('Installing coverage gems on appliance') self.ssh_client.put_file(gemfile.strpath, rails_root.strpath) x, out = self.ssh_client.run_command( 'cd {}; bundle'.format(rails_root)) return x == 0 def install_coverage_hook(self): logger.info('Installing coverage hook on appliance') # Put the coverage hook in the miq lib path self.ssh_client.put_file( coverage_hook.strpath, rails_root.join('..', 'lib', coverage_hook.basename).strpath) replacements = { 'require': r"require 'coverage_hook'", 'config': rails_root.join('config').strpath } # grep/echo to try to add the require line only once # This goes in preinitializer after the miq lib path is set up, # which makes it so ruby can actually require the hook command_template = ( 'cd {config};' 'grep -q "{require}" preinitializer.rb || echo -e "\\n{require}" >> preinitializer.rb' ) x, out = self.ssh_client.run_command( command_template.format(**replacements)) return x == 0 def restart_evm(self, rude=True): logger.info('Restarting EVM to enable coverage reporting') # This is rude by default (issuing a kill -9 on ruby procs), since the most common use-case # will be to set up coverage on a freshly provisioned appliance in a jenkins run if rude: x, out = self.ssh_client.run_command( 'killall -9 ruby; service evmserverd start') else: x, out = self.ssh_client.run_comment('service evmserverd restart') return x == 0 def touch_all_the_things(self): logger.info( 'Establishing baseline overage by requiring ALL THE THINGS') # send over the thing toucher self.ssh_client.put_file( thing_toucher.strpath, rails_root.join(thing_toucher.basename).strpath) # start it in an async process so we can go one testing while this takes place self._thing_toucher_proc = Process(target=_thing_toucher_mp_handler, args=[self.ssh_client]) self._thing_toucher_proc.start() def stop_touching_all_the_things(self): logger.info('Waiting for baseline coverage generator to finish') # block while the thing toucher is still running self._thing_toucher_proc.join() return self._thing_toucher_proc.exitcode == 0 def merge_reports(self): logger.info("Merging coverage reports on appliance") # install the merger script self.ssh_client.put_file( coverage_merger.strpath, rails_root.join(coverage_merger.basename).strpath) # don't async this one since it's happening in unconfigure # merge/clean up the coverage reports x, out = self.ssh_client.run_rails_command('coverage_merger.rb') return x == 0 def collect_reports(self): coverage_dir = log_path.join('coverage') # clean out old coverage dir if it exists if coverage_dir.check(): coverage_dir.remove(rec=True, ignore_errors=True) # Then ensure the the empty dir exists coverage_dir.ensure(dir=True) # then copy the remote coverage dir into it logger.info("Collecting coverage reports to {}".format( coverage_dir.strpath)) logger.info("Report collection can take several minutes") self.ssh_client.get_file(rails_root.join('coverage').strpath, log_path.strpath, recursive=True) def print_report(self): try: last_run = json.load( log_path.join('coverage', '.last_run.json').open()) coverage = last_run['result']['covered_percent'] # TODO: Make the happy vs. sad coverage color configurable, and set it to something # good once we know what good is style = {'bold': True} if coverage > 40: style['green'] = True else: style['red'] = True self.reporter.line('UI Coverage Result: {}%'.format(coverage), **style) except KeyboardInterrupt: # don't block this, so users can cancel out raise except: logger.error( 'Error printing coverage report to terminal, traceback follows' ) logger.error(traceback.format_exc())
class UiCoveragePlugin(object): def __init__(self): self.ssh_client = SSHClient() # trylast so that terminalreporter's been configured before ui-coverage @pytest.mark.trylast def pytest_configure(self, config): # Eventually, the setup/teardown work for coverage should be handled by # utils.appliance.Appliance to make multi-appliance support easy self.reporter = config.pluginmanager.getplugin('terminalreporter') self.reporter.write_sep('-', 'Setting up UI coverage reporting') self.install_simplecov() self.install_coverage_hook() self.restart_evm() self.touch_all_the_things() check_appliance_ui(base_url()) def pytest_unconfigure(self, config): self.reporter.write_sep('-', 'Waiting for coverage to finish and collecting reports') self.stop_touching_all_the_things() self.merge_reports() self.collect_reports() self.print_report() def install_simplecov(self): logger.info('Installing coverage gems on appliance') self.ssh_client.put_file(gemfile.strpath, rails_root.strpath) x, out = self.ssh_client.run_command('cd {}; bundle'.format(rails_root)) return x == 0 def install_coverage_hook(self): logger.info('Installing coverage hook on appliance') # Put the coverage hook in the miq lib path self.ssh_client.put_file( coverage_hook.strpath, rails_root.join('..', 'lib', coverage_hook.basename).strpath ) replacements = { 'require': r"require 'coverage_hook'", 'config': rails_root.join('config').strpath } # grep/echo to try to add the require line only once # This goes in preinitializer after the miq lib path is set up, # which makes it so ruby can actually require the hook command_template = ( 'cd {config};' 'grep -q "{require}" preinitializer.rb || echo -e "\\n{require}" >> preinitializer.rb' ) x, out = self.ssh_client.run_command(command_template.format(**replacements)) return x == 0 def restart_evm(self, rude=True): logger.info('Restarting EVM to enable coverage reporting') # This is rude by default (issuing a kill -9 on ruby procs), since the most common use-case # will be to set up coverage on a freshly provisioned appliance in a jenkins run if rude: x, out = self.ssh_client.run_command('killall -9 ruby; service evmserverd start') else: x, out = self.ssh_client.run_comment('service evmserverd restart') return x == 0 def touch_all_the_things(self): logger.info('Establishing baseline overage by requiring ALL THE THINGS') # send over the thing toucher self.ssh_client.put_file( thing_toucher.strpath, rails_root.join(thing_toucher.basename).strpath ) # start it in an async process so we can go one testing while this takes place self._thing_toucher_proc = Process(target=_thing_toucher_mp_handler, args=[self.ssh_client]) self._thing_toucher_proc.start() def stop_touching_all_the_things(self): logger.info('Waiting for baseline coverage generator to finish') # block while the thing toucher is still running self._thing_toucher_proc.join() return self._thing_toucher_proc.exitcode == 0 def merge_reports(self): logger.info("Merging coverage reports on appliance") # install the merger script self.ssh_client.put_file( coverage_merger.strpath, rails_root.join(coverage_merger.basename).strpath ) # don't async this one since it's happening in unconfigure # merge/clean up the coverage reports x, out = self.ssh_client.run_rails_command('coverage_merger.rb') return x == 0 def collect_reports(self): coverage_dir = log_path.join('coverage') # clean out old coverage dir if it exists if coverage_dir.check(): coverage_dir.remove(rec=True, ignore_errors=True) # Then ensure the the empty dir exists coverage_dir.ensure(dir=True) # then copy the remote coverage dir into it logger.info("Collecting coverage reports to {}".format(coverage_dir.strpath)) logger.info("Report collection can take several minutes") self.ssh_client.get_file( rails_root.join('coverage').strpath, log_path.strpath, recursive=True ) def print_report(self): try: last_run = json.load(log_path.join('coverage', '.last_run.json').open()) coverage = last_run['result']['covered_percent'] # TODO: Make the happy vs. sad coverage color configurable, and set it to something # good once we know what good is style = {'bold': True} if coverage > 40: style['green'] = True else: style['red'] = True self.reporter.line('UI Coverage Result: {}%'.format(coverage), **style) except KeyboardInterrupt: # don't block this, so users can cancel out raise except: logger.error('Error printing coverage report to terminal, traceback follows') logger.error(traceback.format_exc())