def pre_update(client, config): if config.version: logger.info(constants.version) sys.exit(constants.sig_kill_ok) # validate the remove file if config.validate: if validate_remove_file(config.remove_file): sys.exit(constants.sig_kill_ok) else: sys.exit(constants.sig_kill_bad) # handle cron stuff if config.enable_schedule: # enable automatic scheduling logger.debug('Updating config...') updated = get_scheduler(config).set_daily() if updated: logger.info('Automatic scheduling for Insights has been enabled.') sys.exit(constants.sig_kill_ok) if config.disable_schedule: # disable automatic schedling updated = get_scheduler(config).remove_scheduling() if updated: logger.info('Automatic scheduling for Insights has been disabled.') if not config.register: sys.exit(constants.sig_kill_ok) # delete someday if config.analyze_container: logger.debug('Not scanning host.') logger.debug('Scanning image ID, tar file, or mountpoint.') # test the insights connection if config.test_connection: logger.info("Running Connection Tests...") rc = client.test_connection() if rc == 0: sys.exit(constants.sig_kill_ok) else: sys.exit(constants.sig_kill_bad) if config.support: support = InsightsSupport(config) support.collect_support_info() sys.exit(constants.sig_kill_ok) if config.diagnosis: remediation_id = None if config.diagnosis is not True: remediation_id = config.diagnosis resp = client.get_diagnosis(remediation_id) if not resp: sys.exit(constants.sig_kill_bad) print(json.dumps(resp)) sys.exit(constants.sig_kill_ok)
def pre_update(): if config['version']: logger.info(constants.version) sys.exit(constants.sig_kill_ok) # validate the remove file if config['validate']: if validate_remove_file(): sys.exit(constants.sig_kill_ok) else: sys.exit(constants.sig_kill_bad) # handle cron stuff if config['enable_schedule'] and config['disable_schedule']: logger.error( 'Conflicting options: --enable-schedule and --disable-schedule') sys.exit(constants.sig_kill_bad) if config['enable_schedule']: # enable automatic scheduling logger.debug('Updating config...') updated = get_scheduler().set_daily() if updated: logger.info('Automatic scheduling for Insights has been enabled.') sys.exit(constants.sig_kill_ok) if config['disable_schedule']: # disable automatic schedling updated = get_scheduler().remove_scheduling() if updated: logger.info('Automatic scheduling for Insights has been disabled.') if not config['register']: sys.exit(constants.sig_kill_ok) if config['container_mode']: logger.debug('Not scanning host.') logger.debug('Scanning image ID, tar file, or mountpoint.') # test the insights connection if config['test_connection']: logger.info("Running Connection Tests...") pconn = client.get_connection() rc = pconn.test_connection() if rc == 0: sys.exit(constants.sig_kill_ok) else: sys.exit(constants.sig_kill_bad) if config['support']: support = InsightsSupport() support.collect_support_info() sys.exit(constants.sig_kill_ok)
def test_failed_removal(): """ Just verifying that trying to remove scheduling does not raise an exception """ target = tempfile.mktemp() with tempfile.NamedTemporaryFile() as source: schedule = sched.get_scheduler(source.name, target) schedule.remove_scheduling()
def test_set_daily(): target = tempfile.mktemp() with tempfile.NamedTemporaryFile() as source: schedule = sched.get_scheduler(source.name, target) assert not schedule.active assert schedule.set_daily() assert schedule.active schedule.remove_scheduling() assert not schedule.active
def enable_delayed_registration(config): ''' Write a marker file to allow client to know that it should attempt to register when it runs ''' logger.debug('Writing to %s', constants.register_marker_file) write_to_disk(constants.register_marker_file) job = get_scheduler(config) job.set_daily()
def post_update(client, config): # create a machine id first thing. we'll need it for all uploads logger.debug('Machine ID: %s', client.get_machine_id()) logger.debug("CONFIG: %s", config) if config.status: reg_check = client.get_registration_status() for msg in reg_check['messages']: logger.info(msg) if reg_check['status']: sys.exit(constants.sig_kill_ok) else: sys.exit(constants.sig_kill_bad) # put this first to avoid conflicts with register if config.unregister: if client.unregister(): sys.exit(constants.sig_kill_ok) else: sys.exit(constants.sig_kill_bad) if config.offline: logger.debug('Running client in offline mode. Bypassing registration.') return if config.analyze_container: logger.debug( 'Running client in container mode. Bypassing registration.') return if config.display_name and not config.register: # setting display name independent of registration if client.set_display_name(config.display_name): if 'display_name' in config._cli_opts: # only exit on success if it was invoked from command line sys.exit(constants.sig_kill_ok) else: sys.exit(constants.sig_kill_bad) reg = client.register() if reg is None: # API unreachable logger.info('Running connection test...') client.test_connection() sys.exit(constants.sig_kill_bad) elif reg is False: # unregistered sys.exit(constants.sig_kill_bad) if config.register: if (not config.disable_schedule and get_scheduler(config).set_daily()): logger.info('Automatic scheduling for Insights has been enabled.')
def post_update(client, config): logger.debug("CONFIG: %s", config) if config['status']: reg_check = registration_check(client.get_connection()) for msg in reg_check['messages']: logger.info(msg) sys.exit(constants.sig_kill_ok) # put this first to avoid conflicts with register if config['unregister']: pconn = client.get_connection() if pconn.unregister(): sys.exit(constants.sig_kill_ok) else: sys.exit(constants.sig_kill_bad) # force-reregister -- remove machine-id files and registration files # before trying to register again new = False if config['reregister']: new = True config['register'] = True delete_registered_file() delete_unregistered_file() write_to_disk(constants.machine_id_file, delete=True) logger.debug('Machine-id: %s', generate_machine_id(new)) if config['register']: registration = client.try_register() if registration is None: logger.info('Running connection test...') client.test_connection() sys.exit(constants.sig_kill_bad) if (not config['disable_schedule'] and get_scheduler(config).set_daily()): logger.info('Automatic scheduling for Insights has been enabled.') # check registration before doing any uploads # only do this if we are not running in container mode # Ignore if in offline mode if not config["analyze_container"]: if not config['register'] and not config['offline']: msg, is_registered = client._is_client_registered() if not is_registered: logger.error(msg) sys.exit(constants.sig_kill_bad)
def post_update(client, config): logger.debug("CONFIG: %s", config) if config.status: reg_check = client.get_registration_status() for msg in reg_check['messages']: logger.info(msg) sys.exit(constants.sig_kill_ok) # put this first to avoid conflicts with register if config.unregister: if client.unregister(): sys.exit(constants.sig_kill_ok) else: sys.exit(constants.sig_kill_bad) if config.offline: logger.debug('Running client in offline mode. Bypassing registration.') return if config.analyze_container: logger.debug( 'Running client in container mode. Bypassing registration.') return if config.display_name and not config.register: # setting display name independent of registration if client.set_display_name(config.display_name): sys.exit(constants.sig_kill_ok) else: sys.exit(constants.sig_kill_bad) reg = client.register() if reg is None: # API unreachable logger.info('Running connection test...') client.test_connection() sys.exit(constants.sig_kill_bad) elif reg is False: # unregistered sys.exit(constants.sig_kill_bad) if config.register: if (not config.disable_schedule and get_scheduler(config).set_daily()): logger.info('Automatic scheduling for Insights has been enabled.')
def post_update(client, config): # create a machine id first thing. we'll need it for all uploads logger.debug('Machine ID: %s', client.get_machine_id()) logger.debug("CONFIG: %s", config) print_egg_versions() # -------delete everything below this line------- if config.legacy_upload: if config.status: reg_check = client.get_registration_status() for msg in reg_check['messages']: logger.info(msg) if reg_check['status']: sys.exit(constants.sig_kill_ok) else: sys.exit(constants.sig_kill_bad) # put this first to avoid conflicts with register if config.unregister: if client.unregister(): sys.exit(constants.sig_kill_ok) else: sys.exit(constants.sig_kill_bad) if config.offline: logger.debug( 'Running client in offline mode. Bypassing registration.') return if config.analyze_container: logger.debug( 'Running client in container mode. Bypassing registration.') return if config.display_name and not config.register: # setting display name independent of registration if client.set_display_name(config.display_name): if 'display_name' in config._cli_opts: # only exit on success if it was invoked from command line sys.exit(constants.sig_kill_ok) else: sys.exit(constants.sig_kill_bad) reg = client.register() if reg is None: # API unreachable logger.info('Running connection test...') client.test_connection() sys.exit(constants.sig_kill_bad) elif reg is False: # unregistered sys.exit(constants.sig_kill_bad) if config.register: if (not config.disable_schedule and get_scheduler(config).set_daily()): logger.info( 'Automatic scheduling for Insights has been enabled.') return # -------delete everything above this line------- if config.offline: logger.debug('Running client in offline mode. Bypassing registration.') return # --payload short circuits registration check if config.payload: logger.debug('Uploading a specified archive. Bypassing registration.') return # check registration status before anything else reg_check = client.get_registration_status() if reg_check is None: sys.exit(constants.sig_kill_bad) # --status if config.status: if reg_check: logger.info('This host is registered.') sys.exit(constants.sig_kill_ok) else: logger.info('This host is unregistered.') sys.exit(constants.sig_kill_bad) # put this first to avoid conflicts with register if config.unregister: if reg_check: logger.info('Unregistering this host from Insights.') if client.unregister(): get_scheduler(config).remove_scheduling() sys.exit(constants.sig_kill_ok) else: sys.exit(constants.sig_kill_bad) else: logger.info( 'This host is not registered, unregistration is not applicable.' ) sys.exit(constants.sig_kill_bad) # halt here if unregistered if not reg_check and not config.register: logger.info('This host has not been registered. ' 'Use --register to register this host.') sys.exit(constants.sig_kill_bad) # --force-reregister, clear machine-id if config.reregister: reg_check = False client.clear_local_registration() # --register was called if config.register: # don't actually need to make a call to register() since # system creation and upload are a single event on the platform if reg_check: logger.info('This host has already been registered.') if (not config.disable_schedule and get_scheduler(config).set_daily()): logger.info('Automatic scheduling for Insights has been enabled.') # set --display-name independent of register # only do this if set from the CLI. normally display_name is sent on upload if 'display_name' in config._cli_opts and not config.register: if client.set_display_name(config.display_name): sys.exit(constants.sig_kill_ok) else: sys.exit(constants.sig_kill_bad)
def pre_update(client, config): if config.version: logger.info(constants.version) sys.exit(constants.sig_kill_ok) # validate the remove file if config.validate: try: validate_remove_file(config) sys.exit(constants.sig_kill_ok) except RuntimeError as e: logger.error(e) sys.exit(constants.sig_kill_bad) # handle cron stuff if config.enable_schedule: # enable automatic scheduling logger.debug('Updating config...') updated = get_scheduler(config).set_daily() if updated: logger.info('Automatic scheduling for Insights has been enabled.') sys.exit(constants.sig_kill_ok) if config.disable_schedule: # disable automatic schedling updated = get_scheduler(config).remove_scheduling() if updated: logger.info('Automatic scheduling for Insights has been disabled.') if not config.register: sys.exit(constants.sig_kill_ok) # test the insights connection if config.test_connection: logger.info("Running Connection Tests...") rc = client.test_connection() if rc == 0: sys.exit(constants.sig_kill_ok) else: sys.exit(constants.sig_kill_bad) if config.support: support = InsightsSupport(config) support.collect_support_info() sys.exit(constants.sig_kill_ok) if config.diagnosis: remediation_id = None if config.diagnosis is not True: remediation_id = config.diagnosis resp = client.get_diagnosis(remediation_id) if not resp: sys.exit(constants.sig_kill_bad) print(json.dumps(resp)) sys.exit(constants.sig_kill_ok) if config.checkin: checkin_success = client.checkin() if checkin_success: sys.exit(constants.sig_kill_ok) else: sys.exit(constants.sig_kill_bad)
def test_get_schedule_cron(): target = tempfile.mktemp() config = InsightsConfig() with tempfile.NamedTemporaryFile() as source: schedule = sched.get_scheduler(config, source.name, target) assert isinstance(schedule, sched.InsightsSchedulerCron)
def test_get_scheduler_systemd(): config = InsightsConfig() schedule = sched.get_scheduler(config, "no cron") assert isinstance(schedule, sched.InsightsSchedulerSystemd)