def test_write_pidfile_not_called(getpid, write_to_disk): ''' Test that the pidfile is not written when called from a phase (setup_logging=True) ''' InsightsClient(setup_logging=True) getpid.assert_not_called() write_to_disk.assert_not_called()
def test_upload_500_retry(_, upload_archive): # Hack to prevent client from parsing args to py.test tmp = sys.argv sys.argv = [] try: retries = 3 config = InsightsConfig(logging_file='/tmp/insights.log', retries=retries) client = InsightsClient(config) client.upload('/tmp/insights.tar.gz') upload_archive.assert_called() assert upload_archive.call_count == retries finally: sys.argv = tmp
def test_upload_412_write_unregistered_file(_, upload_archive, write_unregistered_file): # Hack to prevent client from parsing args to py.test tmp = sys.argv sys.argv = [] try: config = InsightsConfig(logging_file='/tmp/insights.log', retries=3) client = InsightsClient(config) with pytest.raises(RuntimeError): client.upload('/tmp/insights.tar.gz') unregistered_at = upload_archive.return_value.json()["unregistered_at"] write_unregistered_file.assert_called_once_with(unregistered_at) finally: sys.argv = tmp
def _f(): try: config = InsightsConfig().load_all() except ValueError as e: sys.stderr.write('ERROR: ' + str(e) + '\n') sys.exit(constants.sig_kill_bad) client = InsightsClient(config) client.set_up_logging() if config.debug: logger.info("Core path: %s", os.path.dirname(__file__)) try_auto_configuration(config) try: func(client, config) except Exception: logger.exception("Fatal error") sys.exit(1) else: sys.exit() # Exit gracefully
def test_write_pidfile(getpid, write_to_disk): ''' Test writing of the pidfile when InsightsClient is called initially (when setup_logging=False) ''' InsightsClient(setup_logging=False) getpid.assert_called_once() write_to_disk.assert_called_with(InsightsConstants.pidfile, content=str(getpid.return_value))
def test_copy_to_output_dir_exists_and_not_empty(os_, shutil_, _copy_soscleaner_files): ''' Test that writing to an existing and non-empty directory is NOT performed. Due to the check in config.py this should never happen, but just to be safe. ''' config = InsightsConfig(output_dir='dest') client = InsightsClient(config) shutil_.copytree.side_effect = [OSError(17, 'File exists')] os_.listdir.return_value = ['test'] client.copy_to_output_dir('src') os_.listdir.assert_called_once_with(config.output_dir) shutil_.copytree.assert_called_once_with('src', config.output_dir) shutil_.copyfile.assert_not_called()
def test_copy_to_output_dir_exists_and_empty_err_during_copy( os_, shutil_, _copy_soscleaner_files): ''' Test that when writing to an existing but empty directory, if an error occurs, we bail out before finishing. ''' config = InsightsConfig(output_dir='dest') client = InsightsClient(config) # raise file exists error first, then raise nothing for "b" and "c" below shutil_.copytree.side_effect = [OSError(17, 'File exists'), None, None] # raise an unknown error for "a" shutil_.copyfile.side_effect = [OSError(19, '???'), None, None] # returns empty list for destination, file list for source os_.listdir.side_effect = [[], ['a', 'b', 'c']] # os.path.join called 6 times, once for each file per src and dest os_.path.join.side_effect = [ os.path.join('src', 'a'), os.path.join(config.output_dir, 'a'), os.path.join('src', 'b'), os.path.join(config.output_dir, 'b'), os.path.join('src', 'c'), os.path.join(config.output_dir, 'c') ] # 'a' is file, 'b', 'c' are dirs os_.path.isfile.side_effect = [True, False, False] # a is file so the check for 'a' does not fall through to the elif os_.path.isdir.side_effect = [True, True] client.copy_to_output_dir('src') os_.listdir.assert_has_calls([call(config.output_dir), call('src')]) os_.path.isfile.assert_has_calls([call('src/a')]) # a is file so the check for 'a' does not fall through to the elif os_.path.isdir.assert_not_called() # initial (failed) copy is part of the calls shutil_.copytree.assert_has_calls([call('src', config.output_dir)]) shutil_.copyfile.assert_has_calls( [call(os.path.join('src', 'a'), os.path.join(config.output_dir, 'a'))]) _copy_soscleaner_files.assert_not_called()
def collect_and_output(): c = InsightsClient() tar_file = c.collect(analyze_image_id=config["analyze_image_id"], analyze_file=config["analyze_file"], analyze_mountpoint=config["analyze_mountpoint"]) if not tar_file: sys.exit(constants.sig_kill_bad) if config['to_stdout']: with open(tar_file, 'rb') as tar_content: shutil.copyfileobj(tar_content, sys.stdout) else: resp = None if not config['no_upload']: resp = c.upload(tar_file) else: logger.info('Archive saved at %s', tar_file) if resp and config["to_json"]: print(json.dumps(resp)) sys.exit()
def get_version_info(): ''' Get the insights client and core versions for archival ''' from insights.client import InsightsClient cmd = 'rpm -q --qf "%{VERSION}-%{RELEASE}" insights-client' version_info = {} version_info['core_version'] = InsightsClient().version() version_info['client_version'] = run_command_get_output(cmd)['output'] return version_info
def _main(): """ attempt to update with current, fallback to rpm attempt to collect and upload with new, then current, then rpm if an egg fails a phase never try it again """ if not all([insights_uid, insights_gid, insights_grpid]): sys.exit("User and/or group 'insights' not found. Exiting.") validated_eggs = list(filter(gpg_validate, [STABLE_EGG, RPM_EGG])) if not validated_eggs: sys.exit("No GPG-verified eggs can be found") sys.path = validated_eggs + sys.path try: # flake8 complains because these imports aren't at the top import insights from insights.client import InsightsClient from insights.client.phase.v1 import get_phases # handle client instantation here so that it isn't done multiple times in __init__ client = InsightsClient(True, False) # read config, but dont setup logging config = client.get_conf() # handle log rotation here instead of core if os.path.isfile(config['logging_file']): log_handler = logging.handlers.RotatingFileHandler( config['logging_file'], backupCount=3) log_handler.doRollover() # we now have access to the clients logging mechanism instead of using print client.set_up_logging() logging.root.debug("Loaded initial egg: %s", os.path.dirname(insights.__file__)) # check for insights user/group if not (insights_uid or insights_gid): log("WARNING: 'insights' user not found. Using root to run all phases" ) # check if the user is in the insights group # make sure they are not root in_insights_group = insights_grpid in curr_user_grps if not in_insights_group and os.geteuid() != 0: log("ERROR: user not in 'insights' group AND not root. Exiting.") return if config["version"]: from insights_client.constants import InsightsConstants as constants print("Client: %s" % constants.version) print("Core: %s" % client.version()) return for p in get_phases(): run_phase(p, client) except KeyboardInterrupt: sys.exit('Aborting.')
def test_write_pidfile(get_parent_process, getpid, write_to_disk): ''' Test writing of the pidfile when InsightsClient is called initially (when setup_logging=False) ''' InsightsClient(setup_logging=False) getpid.assert_called_once() calls = [ write_to_disk(InsightsConstants.pidfile, content=str(getpid.return_value)), write_to_disk(InsightsConstants.ppidfile, content=get_parent_process.return_value) ] write_to_disk.has_calls(calls)
def _main(): """ attempt to update with current, fallback to rpm attempt to collect and upload with new, then current, then rpm if an egg fails a phase never try it again """ if os.getuid() != 0: sys.exit('Insights client must be run as root.') # sort rpm and stable eggs after verification validated_eggs = sorted_eggs( list(filter(gpg_validate, [STABLE_EGG, RPM_EGG]))) # if ENV_EGG was specified and it's valid, add that to front of sys.path # so it can be loaded initially. keep it in its own var so we don't # pass it to run_phase where we load it again if gpg_validate(ENV_EGG): valid_env_egg = [ENV_EGG] else: valid_env_egg = [] if not validated_eggs and not valid_env_egg: sys.exit("No GPG-verified eggs can be found") # ENV egg comes first sys.path = valid_env_egg + validated_eggs + sys.path try: # flake8 complains because these imports aren't at the top import insights from insights.client import InsightsClient from insights.client.phase.v1 import get_phases # handle client instantation here so that it isn't done multiple times in __init__ client = InsightsClient(True, False) # read config, but dont setup logging config = client.get_conf() # handle log rotation here instead of core if os.path.isfile(config['logging_file']): log_handler = logging.handlers.RotatingFileHandler( config['logging_file'], backupCount=3) log_handler.doRollover() # we now have access to the clients logging mechanism instead of using print client.set_up_logging() logging.root.debug("Loaded initial egg: %s", os.path.dirname(insights.__file__)) if config["version"]: from insights_client.constants import InsightsConstants as constants print("Client: %s" % constants.version) print("Core: %s" % client.version()) return for p in get_phases(): run_phase(p, client, validated_eggs) except KeyboardInterrupt: sys.exit('Aborting.')
def test_reg_check_unregistered_unreachable(): # unregister the machine first config = InsightsConfig(unregister=True) client = InsightsClient(config) client.connection = FakeConnection(registered=True) client.session = True assert client.unregister() is True # reset config and try to check registration config.unregister = False client.connection = FakeConnection(registered=False) assert client.get_registation_status()['unreachable'] is True assert client.register() is None for r in constants.registered_files: assert os.path.isfile(r) is False for u in constants.unregistered_files: assert os.path.isfile(u) is True
def test_checkin_error(): config = InsightsConfig() client = InsightsClient(config) client.connection = Mock(**{"checkin.side_effect": Exception}) client.session = True with raises(Exception): client.checkin() client.connection.checkin.assert_called_once_with()
def test_reg_check_unregistered(): # unregister the machine first config = InsightsConfig() client = InsightsClient(config) client.connection = FakeConnection(registered='unregistered') client.session = True # test function and integration in .register() assert client.get_registation_status()['status'] is False assert client.register() is False for r in constants.registered_files: assert os.path.isfile(r) is False for u in constants.unregistered_files: assert os.path.isfile(u) is True
def main(): compile_config() set_up_logging() v = handle_startup() if v is not None: if type(v) != bool: print(v) return else: client = InsightsClient() client.update_rules() tar = client.collect(check_timestamp=False, image_id=(config["image_id"] or config["only"]), tar_file=config["tar_file"], mountpoint=config["mountpoint"]) if not config['no_upload']: client.upload(tar) else: print('Archive saved to ' + tar)
def test_force_reregister(): config = InsightsConfig(reregister=True) client = InsightsClient(config) client.connection = FakeConnection(registered=None) client.session = True # initialize comparisons old_machine_id = None new_machine_id = None # register first assert client.register() is True for r in constants.registered_files: assert os.path.isfile(r) is True # get modified time of .registered to ensure it's regenerated old_reg_file1_ts = os.path.getmtime(constants.registered_files[0]) old_reg_file2_ts = os.path.getmtime(constants.registered_files[1]) old_machine_id = generate_machine_id() # wait to allow for timestamp difference time.sleep(3) # reregister with new machine-id client.connection = FakeConnection(registered=True) config.reregister = True assert client.register() is True new_machine_id = generate_machine_id() new_reg_file1_ts = os.path.getmtime(constants.registered_files[0]) new_reg_file2_ts = os.path.getmtime(constants.registered_files[1]) assert old_machine_id != new_machine_id assert old_reg_file1_ts != new_reg_file1_ts assert old_reg_file2_ts != new_reg_file2_ts
def test_get_diagnosis_offline(): conf = InsightsConfig() conf.offline = True c = InsightsClient(conf) assert c.get_diagnosis() is None
from insights.client import InsightsClient client = InsightsClient() client.config.legacy_upload = False client.upload(payload='test.tar.gz', content_type='application/vnd.redhat.advisor.test+tgz')
def update(): c = InsightsClient() c.update() c.update_rules()