def test_write_to_disk(): content = 'boop' filename = '/tmp/testing' util.write_to_disk(filename, content=content) assert os.path.isfile(filename) with open(filename, 'r') as f: result = f.read() assert result == 'boop' util.write_to_disk(filename, delete=True) is None
def enable_delayed_registration(config): ''' Write a marker file to allow client to know that it should attempt to register when it runs ''' logger.debug('Writing to %s', constants.register_marker_file) write_to_disk(constants.register_marker_file) job = get_scheduler(config) job.set_daily()
def post_update(client, config): logger.debug("CONFIG: %s", config) if config['status']: reg_check = registration_check(client.get_connection()) for msg in reg_check['messages']: logger.info(msg) sys.exit(constants.sig_kill_ok) # put this first to avoid conflicts with register if config['unregister']: pconn = client.get_connection() if pconn.unregister(): sys.exit(constants.sig_kill_ok) else: sys.exit(constants.sig_kill_bad) # force-reregister -- remove machine-id files and registration files # before trying to register again new = False if config['reregister']: new = True config['register'] = True delete_registered_file() delete_unregistered_file() write_to_disk(constants.machine_id_file, delete=True) logger.debug('Machine-id: %s', generate_machine_id(new)) if config['register']: registration = client.try_register() if registration is None: logger.info('Running connection test...') client.test_connection() sys.exit(constants.sig_kill_bad) if (not config['disable_schedule'] and get_scheduler(config).set_daily()): logger.info('Automatic scheduling for Insights has been enabled.') # check registration before doing any uploads # only do this if we are not running in container mode # Ignore if in offline mode if not config["analyze_container"]: if not config['register'] and not config['offline']: msg, is_registered = client._is_client_registered() if not is_registered: logger.error(msg) sys.exit(constants.sig_kill_bad)