def try_register(): # if we are running an image analysis then dont register if config["analyze_container"]: logger.info( "Running client in Container mode. Bypassing registration.") return # check reg status with API reg_check = registration_check(get_connection()) if reg_check['status']: logger.info('This host has already been registered.') # regenerate the .registered file write_to_disk(constants.registered_file) return True if reg_check['unreachable']: logger.error(reg_check['messages'][1]) return None message, hostname, group, display_name = register() if config['display_name'] is None and config['group'] is None: logger.info('Successfully registered host %s', hostname) elif config['display_name'] is None: logger.info('Successfully registered host %s in group %s', hostname, group) else: logger.info('Successfully registered host %s as %s in group %s', hostname, display_name, group) if message: logger.info(message) return reg_check, message, hostname, group, display_name
def register(self): """ Register this machine """ write_to_disk(constants.unregistered_file, delete=True) client_hostname = determine_hostname() # This will undo a blacklist logger.debug("API: Create system") system = self.create_system(new_machine_id=False) if not system: return ('Could not reach the Insights service to register.', '', '', '') # If we get a 409, we know we need to generate a new machine-id if system.status_code == 409: system = self.create_system(new_machine_id=True) self.handle_fail_rcs(system) logger.debug("System: %s", system.json()) message = system.headers.get("x-rh-message", "") write_to_disk(constants.registered_file) # Do grouping if config['group'] is not None: self.do_group() # Display registration success messasge to STDOUT and logs if system.status_code == 201: try: system_json = system.json() machine_id = system_json["machine_id"] account_number = system_json["account_number"] logger.info("You successfully registered %s to account %s." % (machine_id, account_number)) except: logger.debug('Received invalid JSON on system registration.') logger.debug( 'API still indicates valid registration with 201 status code.' ) logger.debug(system) logger.debug(system.json()) if config['group'] is not None: return (message, client_hostname, config['group'], config['display_name']) elif config['display_name'] is not None: return (message, client_hostname, "None", config['display_name']) else: return (message, client_hostname, "None", "")
def handle_registration(): """ returns (json): {'success': bool, 'machine-id': uuid from API, 'response': response from API, 'code': http code} """ # force-reregister -- remove machine-id files and registration files # before trying to register again new = False if config['reregister']: logger.debug('Re-register set, forcing registration.') new = True config['register'] = True write_to_disk(constants.registered_file, delete=True) write_to_disk(constants.unregistered_file, delete=True) write_to_disk(constants.machine_id_file, delete=True) logger.debug('Machine-id: %s', generate_machine_id(new)) logger.debug('Trying registration.') registration = try_register() if registration is None: return None msg, is_registered = _is_client_registered() return { 'success': is_registered, 'machine-id': generate_machine_id(), 'registration': registration }
def _is_client_registered(): # If the client is running in container mode, bypass this stuff msg_container_mode = 'Client running in container/image mode. Bypassing registration check' if config['analyze_container']: return msg_container_mode, False # All other cases msg_notyet = 'This machine has not yet been registered.' msg_unreg = 'This machine has been unregistered.' msg_doreg = 'Use --register to register this machine.' msg_rereg = 'Use --register if you would like to re-register this machine.' # check reg status w/ API reg_check = registration_check(get_connection()) if not reg_check['status']: # not registered if reg_check['unreg_date']: # system has been unregistered from the UI msg = '\n'.join([msg_unreg, msg_rereg]) write_unregistered_file(reg_check['unreg_date']) return msg, False else: # no record of system in remote msg = '\n'.join([msg_notyet, msg_doreg]) # clear any local records write_to_disk(constants.registered_file, delete=True) write_to_disk(constants.unregistered_file, delete=True) return msg, False else: # API confirms reg if not os.path.isfile(constants.registered_file): write_to_disk(constants.registered_file) # delete any stray unregistered write_to_disk(constants.unregistered_file, delete=True) return '', True
def upload(tar_file, collection_duration=None): logger.info('Uploading Insights data.') pconn = get_connection() api_response = None for tries in range(config['retries']): upload = pconn.upload_archive( tar_file, collection_duration, cluster=generate_machine_id(docker_group=config['container_mode'])) if upload.status_code in (200, 201): api_response = json.loads(upload.text) machine_id = generate_machine_id() # Write to last upload file with open(constants.last_upload_results_file, 'w') as handler: handler.write(upload.text.encode('utf-8')) write_to_disk(constants.lastupload_file) # Write to ansible facts directory if os.path.isdir(constants.insights_ansible_facts_dir): insights_facts = {} insights_facts['last_upload'] = api_response sat6 = _try_satellite6_configuration() sat5 = None if not sat6: sat5 = _try_satellite5_configuration() if sat6: connection = 'sat6' elif sat5: connection = 'sat5' else: connection = 'rhsm' insights_facts['conf'] = { 'machine-id': machine_id, 'connection': connection } with open(constants.insights_ansible_facts_file, 'w') as handler: handler.write(json.dumps(insights_facts)) account_number = config.get('account_number') if account_number: logger.info( "Successfully uploaded report from %s to account %s." % (machine_id, account_number)) else: logger.info("Successfully uploaded report for %s." % (machine_id)) break elif upload.status_code == 412: pconn.handle_fail_rcs(upload) else: logger.error("Upload attempt %d of %d failed! Status Code: %s", tries + 1, config['retries'], upload.status_code) if tries + 1 != config['retries']: logger.info("Waiting %d seconds then retrying", constants.sleep_time) time.sleep(constants.sleep_time) else: logger.error("All attempts to upload have failed!") logger.error("Please see %s for additional information", config['logging_file']) return api_response
# -*- utf-8 -*- import requests from datetime import datetime import utilities as util most_read_url = 'https://www.bbc.co.uk/news/popular/read' dst_dir = '/home/jerome/Documents/bbc_most_read_monitor/data/bbc_most_read' time_of_request = int(datetime.now().strftime('%s')) # time since epoch timeout = 15 # seconds # Throws an error if it times out (that's a good thing) headers = {'User-Agent': 'Mozilla/5.0'} r = requests.get(most_read_url, timeout=timeout, headers=headers) if 200 <= r.status_code and r.status_code < 300: # request successful list_of_headline_dcts = util.parse_headlines(r.text, time_of_request) util.write_to_disk(list_of_headline_dcts, dst_dir, time_of_request) util.write_to_sql(list_of_headline_dcts) else: raise RuntimeError('Request failed: response status code {}.'.format( r.status_code))