コード例 #1
0
def _do_upload(pconn, tar_file, logging_name, collection_duration, rc=0):
    # do the upload
    logger.info('Uploading Insights data for %s, this may take a few minutes', logging_name)
    for tries in range(InsightsClient.options.retries):
        upload = pconn.upload_archive(tar_file, collection_duration,
                                      cluster=generate_machine_id(
                                          docker_group=InsightsClient.options.container_mode))
        if upload.status_code == 201:
            write_lastupload_file()
            machine_id = generate_machine_id()
            try:
                logger.info("You successfully uploaded a report from %s to account %s." % (machine_id, InsightsClient.account_number))
            except:
                pass
            logger.info("Upload completed successfully!")
            break
        elif upload.status_code == 412:
            pconn.handle_fail_rcs(upload)
        else:
            logger.error("Upload attempt %d of %d failed! Status Code: %s",
                         tries + 1, InsightsClient.options.retries, upload.status_code)
            if tries + 1 != InsightsClient.options.retries:
                logger.info("Waiting %d seconds then retrying",
                            constants.sleep_time)
                time.sleep(constants.sleep_time)
            else:
                logger.error("All attempts to upload have failed!")
                logger.error("Please see %s for additional information",
                             constants.default_log_file)
                rc = 1
    return rc
コード例 #2
0
ファイル: client.py プロジェクト: sagaraivale/insights-core
def handle_registration():
    """
        returns (json): {'success': bool,
                        'machine-id': uuid from API,
                        'response': response from API,
                        'code': http code}
    """
    # force-reregister -- remove machine-id files and registration files
    # before trying to register again
    new = False
    if config['reregister']:
        logger.debug('Re-register set, forcing registration.')
        new = True
        config['register'] = True
        write_to_disk(constants.registered_file, delete=True)
        write_to_disk(constants.unregistered_file, delete=True)
        write_to_disk(constants.machine_id_file, delete=True)
    logger.debug('Machine-id: %s', generate_machine_id(new))

    logger.debug('Trying registration.')
    registration = try_register()
    if registration is None:
        return None
    msg, is_registered = _is_client_registered()

    return {
        'success': is_registered,
        'machine-id': generate_machine_id(),
        'registration': registration
    }
コード例 #3
0
ファイル: __init__.py プロジェクト: alikins/insights-client
def _create_metadata_json(archives):
    metadata = {
        'display_name': archives[-1]['display_name'],
        'product': 'Docker',
        'system_id': generate_machine_id(docker_group=True),
        'systems': []
    }

    # host archive is appended to the end of the targets array,
    #   so it will always be the last one (index -1)
    docker_links = []
    c_i_links = container_image_links()
    for a in archives:
        system = {}
        if a['type'] == 'host':
            system['links'] = docker_links
        else:
            docker_links.append({
                'system_id': a['system_id'],
                'type': a['type']
            })
            system['links'] = [{
                'system_id': archives[-1]['system_id'],
                'type': 'host'
            }]
            if a['docker_id'] in c_i_links:
                system['links'].extend(c_i_links[a['docker_id']])
            system['docker_id'] = a['docker_id']
        system['display_name'] = a['display_name']
        system['product'] = a['product']
        system['system_id'] = a['system_id']
        system['type'] = a['type']
        metadata['systems'].append(system)
    return metadata
コード例 #4
0
ファイル: connection.py プロジェクト: matysek/insights-client
 def api_registration_check(self):
     '''
     Check registration status through API
     '''
     machine_id = generate_machine_id()
     try:
         res = self.session.get(self.api_url + '/v1/systems/' + machine_id)
     except requests.ConnectionError:
         return False
     # check the 'unregistered_at' key of the response
     try:
         unreg_status = json.loads(res.content)['unregistered_at']
     except KeyError:
         # no record of this machine, machine was never registered
         # empty json object
         return None
     except ValueError:
         # bad response, no json object
         return False
     if unreg_status is None:
         # unregistered_at = null, means this machine IS registered
         return True
     else:
         # machine has been unregistered, this is a timestamp
         return unreg_status
コード例 #5
0
 def do_group(self):
     """
     Do grouping on register
     """
     group_id = config['group']
     systems = {'machine_id': generate_machine_id()}
     self.group_systems(group_id, systems)
コード例 #6
0
    def create_system(self, new_machine_id=False):
        """
        Create the machine via the API
        """
        client_hostname = determine_hostname()
        machine_id = generate_machine_id(new_machine_id)

        branch_info = self.branch_info()
        if not branch_info:
            return False

        remote_branch = branch_info['remote_branch']
        remote_leaf = branch_info['remote_leaf']

        data = {
            'machine_id': machine_id,
            'remote_branch': remote_branch,
            'remote_leaf': remote_leaf,
            'hostname': client_hostname
        }
        if config['display_name'] is not None:
            data['display_name'] = config['display_name']
        if config['display_name'] is not None:
            data['display_name'] = config['display_name']
        data = json.dumps(data)
        post_system_url = self.api_url + '/v1/systems'
        logger.debug("POST System: %s", post_system_url)
        logger.debug(data)
        net_logger.info("POST %s", post_system_url)
        return self.session.post(post_system_url,
                                 headers={'Content-Type': 'application/json'},
                                 data=data)
コード例 #7
0
ファイル: connection.py プロジェクト: alikins/insights-client
    def upload_archive(self, data_collected, duration, cluster=None):
        """
        Do an HTTPS Upload of the archive
        """
        file_name = os.path.basename(data_collected)
        try:
            import magic
            m = magic.open(magic.MAGIC_MIME)
            m.load()
            mime_type = m.file(data_collected)
        except ImportError:
            magic = None
            logger.debug(
                'python-magic not installed, using backup function...')
            from utilities import magic_plan_b
            mime_type = magic_plan_b(data_collected)

        files = {'file': (file_name, open(data_collected, 'rb'), mime_type)}

        if cluster:
            upload_url = self.upload_url + '/' + cluster
        else:
            upload_url = self.upload_url + '/' + generate_machine_id()

        logger.debug("Uploading %s to %s", data_collected, upload_url)

        headers = {'x-rh-collection-time': duration}
        upload = self.session.post(upload_url, files=files, headers=headers)

        logger.debug("Upload status: %s %s %s", upload.status_code,
                     upload.reason, upload.text)
        logger.debug("Upload duration: %s", upload.elapsed)
        return upload
コード例 #8
0
ファイル: connection.py プロジェクト: matysek/insights-client
    def upload_archive(self, data_collected, duration, cluster=None):
        """
        Do an HTTPS Upload of the archive
        """
        file_name = os.path.basename(data_collected)
        try:
            import magic
            m = magic.open(magic.MAGIC_MIME)
            m.load()
            mime_type = m.file(data_collected)
        except ImportError:
            magic = None
            logger.debug('python-magic not installed, using backup function...')
            from utilities import magic_plan_b
            mime_type = magic_plan_b(data_collected)

        files = {
            'file': (file_name, open(data_collected, 'rb'), mime_type)}

        if cluster:
            upload_url = self.upload_url + '/' + cluster
        else:
            upload_url = self.upload_url + '/' + generate_machine_id()

        logger.debug("Uploading %s to %s", data_collected, upload_url)

        headers = {'x-rh-collection-time': duration}
        upload = self.session.post(upload_url, files=files, headers=headers)

        logger.debug("Upload status: %s %s %s",
                     upload.status_code, upload.reason, upload.text)
        logger.debug("Upload duration: %s", upload.elapsed)
        return upload
コード例 #9
0
ファイル: __init__.py プロジェクト: alikins/insights-client
def _do_upload(pconn, tar_file, logging_name, collection_duration, rc=0):
    # do the upload
    logger.info('Uploading Insights data for %s, this may take a few minutes',
                logging_name)
    for tries in range(InsightsClient.options.retries):
        upload = pconn.upload_archive(
            tar_file,
            collection_duration,
            cluster=generate_machine_id(
                docker_group=InsightsClient.options.container_mode))
        if upload.status_code == 201:
            write_lastupload_file()
            logger.info("Upload completed successfully!")
            break
        elif upload.status_code == 412:
            pconn.handle_fail_rcs(upload)
        else:
            logger.error("Upload attempt %d of %d failed! Status Code: %s",
                         tries + 1, InsightsClient.options.retries,
                         upload.status_code)
            if tries + 1 != InsightsClient.options.retries:
                logger.info("Waiting %d seconds then retrying",
                            constants.sleep_time)
                time.sleep(constants.sleep_time)
            else:
                logger.error("All attempts to upload have failed!")
                logger.error("Please see %s for additional information",
                             constants.default_log_file)
                rc = 1
    return rc
コード例 #10
0
 def api_registration_check(self):
     '''
     Check registration status through API
     '''
     logger.debug('Checking registration status...')
     machine_id = generate_machine_id()
     try:
         res = self.session.get(self.api_url + '/v1/systems/' + machine_id, timeout=10)
     except requests.ConnectionError as e:
         # can't connect, run connection test
         logger.error('Connection timed out. Running connection test...')
         self.test_connection()
         return False
     # had to do a quick bugfix changing this around,
     #   which makes the None-False-True dichotomy seem f*****g weird
     #   TODO: reconsider what gets returned, probably this:
     #       True for registered
     #       False for unregistered
     #       None for system 404
     try:
         # check the 'unregistered_at' key of the response
         unreg_status = json.loads(res.content).get('unregistered_at', 'undefined')
     except ValueError:
         # bad response, no json object
         return False
     if unreg_status == 'undefined':
         # key not found, machine not yet registered
         return None
     elif unreg_status is None:
         # unregistered_at = null, means this machine IS registered
         return True
     else:
         # machine has been unregistered, this is a timestamp
         return unreg_status
コード例 #11
0
    def do_group(self, group_id):
        """
        Do grouping on register
        """
        api_group_id = None
        headers = {'Content-Type': 'application/json'}
        group_path = self.api_url + '/v1/groups'
        group_get_path = group_path + ('?display_name=%s' % group_id)

        logger.debug("GET group: %s", group_get_path)
        get_group = self.session.get(group_get_path)
        logger.debug("GET group status: %s", get_group.status_code)
        if get_group.status_code == 200:
            api_group_id = get_group.json()['id']

        if get_group.status_code == 404:
            # Group does not exist, POST to create
            logger.debug("POST group")
            data = json.dumps({'display_name': group_id})
            post_group = self.session.post(group_path,
                                           headers=headers,
                                           data=data)
            logger.debug("POST group status: %s", post_group.status_code)
            logger.debug("POST Group: %s", post_group.json())
            self.handle_fail_rcs(post_group)
            api_group_id = post_group.json()['id']

        logger.debug("PUT group")
        data = json.dumps({'machine_id': generate_machine_id()})
        put_group = self.session.put(group_path +
                                     ('/%s/systems' % api_group_id),
                                     headers=headers,
                                     data=data)
        logger.debug("PUT group status: %d", put_group.status_code)
        logger.debug("PUT Group: %s", put_group.json())
コード例 #12
0
ファイル: __init__.py プロジェクト: matysek/insights-client
def _create_metadata_json(archives):
    metadata = {'display_name': archives[-1]['display_name'],
                'product': 'Docker',
                'system_id': generate_machine_id(docker_group=True),
                'systems': []}

    # host archive is appended to the end of the targets array,
    #   so it will always be the last one (index -1)
    docker_links = []
    for a in archives:
        system = {}
        if a['type'] == 'host':
            system['links'] = docker_links
        else:
            docker_links.append({
                'system_id': a['system_id'],
                'type': a['type']
            })
            system['links'] = [{'system_id': archives[-1]['system_id'],
                                'type': 'host'}]
            system['docker_id'] = a['docker_id']
        system['display_name'] = a['display_name']
        system['product'] = a['product']
        system['system_id'] = a['system_id']
        system['type'] = a['type']
        metadata['systems'].append(system)
    return metadata
コード例 #13
0
    def do_group(self, group_id):
        """
        Do grouping on register
        """
        api_group_id = None
        headers = {'Content-Type': 'application/json'}
        group_path = self.api_url + '/v1/groups'
        group_get_path = group_path + ('?display_name=%s' % group_id)

        logger.debug("GET group: %s", group_get_path)
        get_group = self.session.get(group_get_path)
        logger.debug("GET group status: %s", get_group.status_code)
        if get_group.status_code == 200:
            api_group_id = get_group.json()['id']

        if get_group.status_code == 404:
            # Group does not exist, POST to create
            logger.debug("POST group")
            data = json.dumps({'display_name': group_id})
            post_group = self.session.post(group_path,
                                           headers=headers,
                                           data=data)
            logger.debug("POST group status: %s", post_group.status_code)
            logger.debug("POST Group: %s", post_group.json())
            self.handle_fail_rcs(post_group)
            api_group_id = post_group.json()['id']

        logger.debug("PUT group")
        data = json.dumps({'machine_id': generate_machine_id()})
        put_group = self.session.put(group_path +
                                     ('/%s/systems' % api_group_id),
                                     headers=headers,
                                     data=data)
        logger.debug("PUT group status: %d", put_group.status_code)
        logger.debug("PUT Group: %s", put_group.json())
コード例 #14
0
ファイル: connection.py プロジェクト: JoySnow/insights-client
 def do_group(self):
     """
     Do grouping on register
     """
     group_id = InsightsClient.options.group
     systems = {'machine_id': generate_machine_id()}
     self.group_systems(group_id, systems)
コード例 #15
0
 def do_group(self):
     """
     Do grouping on register
     """
     group_id = InsightsClient.options.group
     systems = {'machine_id': generate_machine_id()}
     self.group_systems(group_id, systems)
コード例 #16
0
    def check_registration(self):
        """
        Check if we were unregistered
        """
        registration_url = self.api_url + '/v1/systems/' + generate_machine_id()
        logger.debug("Checking registration status: %s", registration_url)
        system_info = self.session.get(registration_url)

        if system_info.status_code == 404:
            # This system hasn't been registered and is it's first upload
            return
        elif system_info.status_code == 200:
            system_info = system_info.json()
            logger.debug("System info: %s", json.dumps(system_info))
        else:
            self.handle_fail_rcs(system_info)
            logger.error("Could not check blacklist")
            sys.exit(1)

        try:
            if system_info['unregistered_at']:
                write_unregistered_file(system_info['unregistered_at'])
            else:
                logger.debug("This machine is registered")
        except LookupError:
            logger.debug("This machine is registered")
コード例 #17
0
    def create_system(self, new_machine_id=False):
        """
        Create the machine via the API
        """
        client_hostname = determine_hostname()
        machine_id = generate_machine_id(new_machine_id)

        try:
            branch_info = self.branch_info()
            remote_branch = branch_info['remote_branch']
            remote_leaf = branch_info['remote_leaf']

        except LookupError:
            logger.error(
                "ERROR: Could not determine branch information, exiting!")
            logger.error("See %s for more information",
                         constants.default_log_file)
            logger.error(
                "Could not register system, running configuration test")
            self.test_connection(1)

        except requests.ConnectionError as e:
            logger.debug(e)
            logger.error(
                "ERROR: Could not determine branch information, exiting!")
            logger.error("See %s for more information",
                         constants.default_log_file)
            logger.error(
                "Could not register system, running configuration test")
            self.test_connection(1)

        data = {
            'machine_id': machine_id,
            'remote_branch': remote_branch,
            'remote_leaf': remote_leaf,
            'hostname': client_hostname
        }
        if InsightsClient.config.get(APP_NAME, 'display_name') is not None:
            data['display_name'] = InsightsClient.config.get(
                APP_NAME, 'display_name')
        if InsightsClient.options.display_name is not None:
            data['display_name'] = InsightsClient.options.display_name
        data = json.dumps(data)
        headers = {'Content-Type': 'application/json'}
        post_system_url = self.api_url + '/v1/systems'
        logger.debug("POST System: %s", post_system_url)
        logger.debug(data)
        system = None
        try:
            system = self.session.post(post_system_url,
                                       headers=headers,
                                       data=data)
            logger.debug("POST System status: %d", system.status_code)
        except requests.ConnectionError as e:
            logger.debug(e)
            logger.error(
                "Could not register system, running configuration test")
            self.test_connection(1)
        return system
コード例 #18
0
    def create_system(self, new_machine_id=False):
        """
        Create the machine via the API
        """
        client_hostname = determine_hostname()
        machine_id = generate_machine_id(new_machine_id)

        try:
            branch_info = self.branch_info()
            remote_branch = branch_info['remote_branch']
            remote_leaf = branch_info['remote_leaf']

        except LookupError:
            logger.error(
                "ERROR: Could not determine branch information, exiting!")
            logger.error(
                "See %s for more information", constants.default_log_file)
            logger.error(
                "Could not register system, running configuration test")
            self.test_connection(1)

        except requests.ConnectionError as e:
            logger.debug(e)
            logger.error(
                "ERROR: Could not determine branch information, exiting!")
            logger.error(
                "See %s for more information", constants.default_log_file)
            logger.error(
                "Could not register system, running configuration test")
            self.test_connection(1)

        data = {'machine_id': machine_id,
                'remote_branch': remote_branch,
                'remote_leaf': remote_leaf,
                'hostname': client_hostname}
        if InsightsClient.config.get(APP_NAME, 'display_name') is not None:
            data['display_name'] = InsightsClient.config.get(APP_NAME, 'display_name')
        if InsightsClient.options.display_name is not None:
            data['display_name'] = InsightsClient.options.display_name
        data = json.dumps(data)
        headers = {'Content-Type': 'application/json'}
        post_system_url = self.api_url + '/v1/systems'
        logger.debug("POST System: %s", post_system_url)
        logger.debug(data)
        system = None
        try:
            system = self.session.post(post_system_url,
                                       headers=headers,
                                       data=data)
            logger.debug("POST System status: %d", system.status_code)
        except requests.ConnectionError as e:
            logger.debug(e)
            logger.error(
                "Could not register system, running configuration test")
            self.test_connection(1)
        return system
コード例 #19
0
 def unregister(self):
     """
     Unregister this system from the insights service
     """
     machine_id = generate_machine_id()
     try:
         logger.debug("Unregistering %s", machine_id)
         self.session.delete(self.api_url + "/v1/systems/" + machine_id)
         logger.info("Successfully unregistered from the Red Hat Access Insights Service")
         write_unregistered_file()
     except requests.ConnectionError as e:
         logger.debug(e)
         logger.error("Could not unregister this system")
コード例 #20
0
    def upload_archive(self, data_collected, duration, cluster=None):
        """
        Do an HTTPS Upload of the archive
        """
        file_name = os.path.basename(data_collected)
        try:
            from insights.contrib import magic
            m = magic.open(magic.MAGIC_MIME)
            m.load()
            mime_type = m.file(data_collected)
        except ImportError:
            magic = None
            logger.debug(
                'python-magic not installed, using backup function...')
            from utilities import magic_plan_b
            mime_type = magic_plan_b(data_collected)

        files = {'file': (file_name, open(data_collected, 'rb'), mime_type)}

        if config["analyze_container"]:
            logger.debug('Uploading container, image, mountpoint or tarfile.')
            upload_url = self.upload_url
        elif cluster:
            logger.debug('Uploading cluster/host.')
            upload_url = self.upload_url + '/' + cluster
        else:
            logger.debug('Uploading a host.')
            upload_url = self.upload_url + '/' + generate_machine_id()

        logger.debug("Uploading %s to %s", data_collected, upload_url)

        headers = {'x-rh-collection-time': duration}
        net_logger.info("POST %s", upload_url)
        upload = self.session.post(upload_url, files=files, headers=headers)

        logger.debug("Upload status: %s %s %s", upload.status_code,
                     upload.reason, upload.text)
        if upload.status_code in (200, 201):
            the_json = json.loads(upload.text)
        else:
            logger.error("Upload archive failed with status code  %s",
                         upload.status_code)
            return upload
        try:
            config['account_number'] = the_json["upload"]["account_number"]
        except:
            config['account_number'] = None
        logger.debug("Upload duration: %s", upload.elapsed)
        return upload
コード例 #21
0
    def upload_archive(self, data_collected):
        """
        Do an HTTPS Upload of the archive
        """
        file_name = os.path.basename(data_collected)
        files = {'file': (file_name, open(data_collected, 'rb'))}

        upload_url = self.upload_url + '/' + generate_machine_id()
        logger.debug("Uploading %s to %s", data_collected, upload_url)
        upload = self.session.post(upload_url, files=files)

        logger.debug("Upload status: %s %s %s", upload.status_code,
                     upload.reason, upload.text)
        logger.debug("Upload duration: %s", upload.elapsed)
        return upload.status_code
コード例 #22
0
ファイル: connection.py プロジェクト: JoySnow/insights-client
 def unregister(self):
     """
     Unregister this system from the insights service
     """
     machine_id = generate_machine_id()
     try:
         logger.debug("Unregistering %s", machine_id)
         self.session.delete(self.api_url + "/v1/systems/" + machine_id)
         logger.info(
             "Successfully unregistered from the Red Hat Insights Service")
         write_unregistered_file()
         InsightsSchedule().remove_scheduling()
     except requests.ConnectionError as e:
         logger.debug(e)
         logger.error("Could not unregister this system")
コード例 #23
0
 def create_system(self, new_machine_id=False):
     """
     Create the machine via the API
     """
     client_hostname = determine_hostname()
     machine_id = generate_machine_id(new_machine_id)
     data = {'machine_id': machine_id, 'hostname': client_hostname}
     data = json.dumps(data)
     headers = {'Content-Type': 'application/json'}
     post_system_url = self.api_url + '/v1/systems'
     logger.debug("POST System: %s", post_system_url)
     logger.debug(data)
     system = self.session.post(post_system_url,
                                headers=headers,
                                data=data)
     logger.debug("POST System status: %d", system.status_code)
     return system
コード例 #24
0
def _create_metadata_json(archives):
    metadata = {
        'display_name': archives[-1]['display_name'],
        'product': 'Docker',
        'system_id': generate_machine_id(docker_group=True),
        'systems': []
    }

    # host archive is appended to the end of the targets array,
    #   so it will always be the last one (index -1)
    docker_links = []
    c_i_links = container_image_links()
    for a in archives:
        system = {}
        if a['type'] == 'host':
            system['links'] = docker_links
        else:
            docker_links.append({
                'system_id': a['system_id'],
                'type': a['type']
            })
            system['links'] = [{
                'system_id': archives[-1]['system_id'],
                'type': 'host'
            }]
            if a['docker_id'] in c_i_links:
                system['links'].extend(c_i_links[a['docker_id']])
            system['docker_id'] = a['docker_id']
        system['display_name'] = a['display_name']
        system['product'] = a['product']
        system['system_id'] = a['system_id']
        system['type'] = a['type']
        metadata['systems'].append(system)

    # merge additional metadata that can be passed in from the config file, --from-file
    if InsightsClient.options.from_file:
        stdin_config = {}
        with open(InsightsClient.options.from_file, 'r') as f:
            stdin_config = json.load(f)
        if 'metadata' in stdin_config:
            new_metadata = metadata.copy()
            new_metadata.update(stdin_config['metadata'])
            metadata = new_metadata

    return metadata
コード例 #25
0
    def upload_archive(self, data_collected, duration):
        """
        Do an HTTPS Upload of the archive
        """
        file_name = os.path.basename(data_collected)
        import magic
        m = magic.open(magic.MAGIC_MIME_TYPE)
        m.load()
        files = {'file': (file_name, open(data_collected, 'rb'), m.file(data_collected))}

        upload_url = self.upload_url + '/' + generate_machine_id()
        logger.debug("Uploading %s to %s", data_collected, upload_url)

        headers = {'x-rh-collection-time': duration}
        upload = self.session.post(upload_url, files=files, headers=headers)

        logger.debug("Upload status: %s %s %s",
                     upload.status_code, upload.reason, upload.text)
        logger.debug("Upload duration: %s", upload.elapsed)
        return upload
コード例 #26
0
def _create_metadata_json(archives):
    metadata = {'display_name': archives[-1]['display_name'],
                'product': 'Docker',
                'system_id': generate_machine_id(docker_group=True),
                'systems': []}

    # host archive is appended to the end of the targets array,
    #   so it will always be the last one (index -1)
    docker_links = []
    c_i_links = container_image_links()
    for a in archives:
        system = {}
        if a['type'] == 'host':
            system['links'] = docker_links
        else:
            docker_links.append({
                'system_id': a['system_id'],
                'type': a['type']
            })
            system['links'] = [{'system_id': archives[-1]['system_id'],
                                'type': 'host'}]
            if a['docker_id'] in c_i_links:
                system['links'].extend(c_i_links[a['docker_id']])
            system['docker_id'] = a['docker_id']
        system['display_name'] = a['display_name']
        system['product'] = a['product']
        system['system_id'] = a['system_id']
        system['type'] = a['type']
        metadata['systems'].append(system)

    # merge additional metadata that can be passed in from the config file, --from-file
    if InsightsClient.options.from_file:
        stdin_config = {}
        with open(InsightsClient.options.from_file, 'r') as f:
            stdin_config = json.load(f)
        if 'metadata' in stdin_config:
            new_metadata = metadata.copy()
            new_metadata.update(stdin_config['metadata'])
            metadata = new_metadata

    return metadata
コード例 #27
0
 def api_registration_check(self):
     '''
     Check registration status through API
     '''
     logger.debug('Checking registration status...')
     machine_id = generate_machine_id()
     try:
         url = self.api_url + '/v1/systems/' + machine_id
         net_logger.info("GET %s", url)
         res = self.session.get(url, timeout=10)
     except requests.ConnectionError:
         # can't connect, run connection test
         logger.error('Connection timed out. Running connection test...')
         self.test_connection()
         return False
     # had to do a quick bugfix changing this around,
     #   which makes the None-False-True dichotomy seem f*****g weird
     #   TODO: reconsider what gets returned, probably this:
     #       True for registered
     #       False for unregistered
     #       None for system 404
     try:
         # check the 'unregistered_at' key of the response
         unreg_status = json.loads(res.content).get('unregistered_at',
                                                    'undefined')
         # set the global account number
         config['account_number'] = json.loads(res.content).get(
             'account_number', 'undefined')
     except ValueError:
         # bad response, no json object
         return False
     if unreg_status == 'undefined':
         # key not found, machine not yet registered
         return None
     elif unreg_status is None:
         # unregistered_at = null, means this machine IS registered
         return True
     else:
         # machine has been unregistered, this is a timestamp
         return unreg_status
コード例 #28
0
ファイル: __init__.py プロジェクト: iphands/insights-client
def handle_startup(options, config):
    """
    Handle startup options
    """

    if options.version:
        print constants.version
        sys.exit()

    if options.validate:
        validate_remove_file()
        sys.exit()

    # Generate /etc/machine-id if it does not exist
    new = False
    if options.reregister:
        new = True
        options.register = True
    logger.debug("Machine-ID: " + generate_machine_id(new))

    # Disable GPG verification
    if options.no_gpg:
        logger.warn("WARNING: GPG VERIFICATION DISABLED")
        config.set(APP_NAME, 'gpg', 'False')

    # Log config except the password
    # and proxy as it might have a pw as well
    for item, value in config.items(APP_NAME):
        if item != 'password' and item != 'proxy':
            logger.debug("%s:%s", item, value)

    if config.getboolean(APP_NAME, 'auto_update'):
        options.update = True

    if config.getboolean(APP_NAME, 'auto_config'):
        # Try to discover if we are connected to a satellite or not
        try_auto_configuration(config)

    # Set the schedule
    if not options.no_schedule and not config.getboolean(
            APP_NAME, 'no_schedule'):
        InsightsSchedule()

    # Test connection, useful for proxy debug
    if options.test_connection:
        pconn = InsightsConnection(config)
        pconn.test_connection()

    if options.unregister:
        pconn = InsightsConnection(config)
        pconn.unregister()
        sys.exit()

    # Handle registration, grouping, and display name
    if options.register:
        opt_group = options.group
        message, hostname, opt_group, display_name = register(config, options)
        if options.display_name is None and options.group is None:
            logger.info('Successfully registered %s', hostname)
        elif options.display_name is None:
            logger.info('Successfully registered %s in group %s', hostname, opt_group)
        else:
            logger.info('Successfully registered %s as %s in group %s', hostname, display_name,
                        opt_group)

        logger.info(message)

    # Collect debug/log information
    if options.support:
        support = InsightsSupport(config)
        support.collect_support_info()
        sys.exit(0)

    # Just check registration status
    if options.status:
        support = InsightsSupport(config)
        reg_check = support.registration_check()
        logger.info('\n'.join(reg_check))
        sys.exit(0)

    # Set offline mode for OSP use
    offline_mode = False
    if (options.offline and options.from_stdin) or options.no_upload:
        offline_mode = True

    # First startup, no .registered or .unregistered
    # Ignore if in offline mode
    if (not os.path.isfile(constants.registered_file) and
       not os.path.isfile(constants.unregistered_file) and
       not options.register and not offline_mode):
        logger.error('This machine has not yet been registered.')
        logger.error('Use --register to register this machine.')
        logger.error("Exiting")
        sys.exit(1)

    # Check for .unregistered file
    if (os.path.isfile(constants.unregistered_file) and
       not options.register and not offline_mode):
        logger.error("This machine has been unregistered.")
        logger.error("Use --register if you would like to re-register this machine.")
        logger.error("Exiting")
        sys.exit(1)
コード例 #29
0
ファイル: client.py プロジェクト: sagaraivale/insights-core
def upload(tar_file, collection_duration=None):
    logger.info('Uploading Insights data.')
    pconn = get_connection()
    api_response = None
    for tries in range(config['retries']):
        upload = pconn.upload_archive(
            tar_file,
            collection_duration,
            cluster=generate_machine_id(docker_group=config['container_mode']))

        if upload.status_code in (200, 201):
            api_response = json.loads(upload.text)
            machine_id = generate_machine_id()

            # Write to last upload file
            with open(constants.last_upload_results_file, 'w') as handler:
                handler.write(upload.text.encode('utf-8'))
            write_to_disk(constants.lastupload_file)

            # Write to ansible facts directory
            if os.path.isdir(constants.insights_ansible_facts_dir):
                insights_facts = {}
                insights_facts['last_upload'] = api_response

                sat6 = _try_satellite6_configuration()
                sat5 = None
                if not sat6:
                    sat5 = _try_satellite5_configuration()

                if sat6:
                    connection = 'sat6'
                elif sat5:
                    connection = 'sat5'
                else:
                    connection = 'rhsm'

                insights_facts['conf'] = {
                    'machine-id': machine_id,
                    'connection': connection
                }
                with open(constants.insights_ansible_facts_file,
                          'w') as handler:
                    handler.write(json.dumps(insights_facts))

            account_number = config.get('account_number')
            if account_number:
                logger.info(
                    "Successfully uploaded report from %s to account %s." %
                    (machine_id, account_number))
            else:
                logger.info("Successfully uploaded report for %s." %
                            (machine_id))
            break

        elif upload.status_code == 412:
            pconn.handle_fail_rcs(upload)
        else:
            logger.error("Upload attempt %d of %d failed! Status Code: %s",
                         tries + 1, config['retries'], upload.status_code)
            if tries + 1 != config['retries']:
                logger.info("Waiting %d seconds then retrying",
                            constants.sleep_time)
                time.sleep(constants.sleep_time)
            else:
                logger.error("All attempts to upload have failed!")
                logger.error("Please see %s for additional information",
                             config['logging_file'])
    return api_response
コード例 #30
0
def _main():
    """
    Main entry point
    Parse cmdline options
    Parse config file
    Call data collector
    """
    global logger
    sys.excepthook = handle_exception

    parser = optparse.OptionParser()
    set_up_options(parser)
    options, args = parser.parse_args()
    if len(args) > 0:
        parser.error("Unknown arguments: %s" % args)
        sys.exit(1)

    if options.version:
        print constants.version
        sys.exit()

    if options.validate:
        validate_remove_file()
        sys.exit()

    config = parse_config_file()
    logger, handler = set_up_logging(config, options)

    # Defer logging till it's ready
    logger.debug('invoked with args: %s', options)
    logger.debug("Version: " + constants.version)
    # Generate /etc/machine-id if it does not exist
    new = False
    if options.reregister:
        new = True
        options.register = True
    logger.debug("Machine-ID: " + generate_machine_id(new))

    # Disable GPG verification
    if options.no_gpg:
        logger.warn("WARNING: GPG VERIFICATION DISABLED")
        config.set(APP_NAME, 'gpg', 'False')

    # Log config except the password
    # and proxy as it might have a pw as well
    for item, value in config.items(APP_NAME):
        if item != 'password' and item != 'proxy':
            logger.debug("%s:%s", item, value)

    if config.getboolean(APP_NAME, 'auto_update'):
        options.update = True

    if config.getboolean(APP_NAME, 'auto_config'):
        # Try to discover if we are connected to a satellite or not
        try_auto_configuration(config)

    # Set the schedule
    InsightsSchedule(options)

    # Test connection, useful for proxy debug
    if options.test_connection:
        pconn = InsightsConnection(config)
        pconn.test_connection()

    if options.unregister:
        pconn = InsightsConnection(config)
        pconn.unregister()
        sys.exit()

    # Handle registration, grouping, and display name
    if options.register:
        opt_group = options.group
        hostname, opt_group, display_name = register(config, options)
        if options.display_name is None and options.group is None:
            logger.info('Successfully registered %s', hostname)
        elif options.display_name is None:
            logger.info('Successfully registered %s in group %s', hostname, opt_group)
        else:
            logger.info('Successfully registered %s as %s in group %s', hostname, display_name, opt_group)

    # Check for .unregistered file
    if os.path.isfile(constants.unregistered_file):
        logger.error("This machine has been unregistered")
        logger.error("Use --register if you would like to re-register this machine")
        logger.error("Exiting")
        sys.exit(1)

    # do work
    collect_data_and_upload(config, options)
    handler.doRollover()
コード例 #31
0
def _main():
    """
    Main entry point
    Parse cmdline options
    Parse config file
    Call data collector
    """
    global logger
    sys.excepthook = handle_exception

    parser = optparse.OptionParser()
    set_up_options(parser)
    options, args = parser.parse_args()
    if len(args) > 0:
        parser.error("Unknown arguments: %s" % args)
        sys.exit(1)

    if options.satellite_group and not options.register:
        parser.error("--satellite-group must be used with --register")

    if options.version:
        print constants.version
        sys.exit()

    if options.daily and options.weekly:
        parser.error("options --daily and --weekly are mutually exclusive")

    config = parse_config_file()
    logger, handler = set_up_logging(config, options.verbose)

    # Defer logging till it's ready
    logger.debug('invoked with args: %s', options)
    logger.debug("Version: " + constants.version)
    # Generate /etc/machine-id if it does not exist
    new = False
    if options.regenerate:
        new = True
    logger.debug("Machine-ID: " + generate_machine_id(new))

    # Disable GPG verification
    if options.no_gpg:
        logger.warn("GPG VERIFICATION DISABLED")
        config.set(APP_NAME, 'gpg', 'False')

    # Log config except the password
    # and proxy as it might have a pw as well
    for item, value in config.items(APP_NAME):
        if item != 'password' and item != 'proxy':
            logger.debug("%s:%s",  item, value)

    if config.getboolean(APP_NAME, 'auto_update'):
        options.update = True

    if config.getboolean(APP_NAME, 'auto_config'):
        # Try to discover if we are connected to a satellite or not
        try_auto_configuration(config)

    # Set the schedule
    InsightsSchedule(options)

    # Test connection, useful for proxy debug
    if options.test_connection:
        pconn = InsightsConnection(config)
        pconn._test_connection()

    # Handle registration and grouping, this is mostly a no-op
    if options.register:
        opt_group = options.group
        if options.satellite_group:
            opt_group = get_satellite_group()
        hostname, opt_group = register(config, opt_group)
        logger.info('Successfully registered %s in group %s', hostname, opt_group)

    # Check for .unregistered file
    if os.path.isfile(constants.unregistered_file):
        logger.error("This machine has been unregistered")
        logger.error("Use --register if you would like to re-register this machine")
        logger.error("Exiting")
        sys.exit(1)

    # If we are not just setting the schedule, do work son
    if not options.schedule:
        collect_data_and_upload(config, options)
        handler.doRollover()
コード例 #32
0
ファイル: client.py プロジェクト: sagaraivale/insights-core
def get_machine_id():
    return generate_machine_id()
コード例 #33
0
ファイル: __init__.py プロジェクト: matysek/insights-client
def handle_startup():
    """
    Handle startup options
    """
    # ----do X and exit options----
    # show version and exit
    if InsightsClient.options.version:
        print constants.version
        sys.exit()

    if (InsightsClient.options.container_mode and
       not InsightsClient.options.run_here and
       insights_client_container_is_available()):
        sys.exit(run_in_container())

    if InsightsClient.options.validate:
        validate_remove_file()
        sys.exit()

    if InsightsClient.options.enable_schedule and InsightsClient.options.disable_schedule:
        logger.error('Conflicting options: --enable-schedule and --disable-schedule')
        sys.exit(1)

    if InsightsClient.options.enable_schedule:
        # enable automatic scheduling
        InsightsSchedule(container_mode=options.container_mode)
        InsightsClient.config.set(APP_NAME, 'no_schedule', False)
        logger.info('Automatic scheduling for Insights has been enabled.')
        sys.exit()

    if InsightsClient.options.disable_schedule:
        # disable automatic schedling
        InsightsSchedule(set_cron=False).remove_scheduling()
        InsightsClient.config.set(APP_NAME, 'no_schedule', True)
        logger.info('Automatic scheduling for Insights has been disabled.')
        sys.exit()

    # do auto_config here, for connection-related 'do X and exit' options
    if InsightsClient.config.getboolean(APP_NAME, 'auto_config') and not InsightsClient.options.offline:
        # Try to discover if we are connected to a satellite or not
        try_auto_configuration()

    if InsightsClient.options.test_connection:
        pconn = InsightsConnection()
        rc = pconn.test_connection()
        sys.exit(rc)

    if InsightsClient.options.status:
        reg_check = registration_check()
        logger.info('\n'.join(reg_check['messages']))
        # exit with !status, 0 for True, 1 for False
        sys.exit(not reg_check['status'])

    if InsightsClient.options.support:
        support = InsightsSupport()
        support.collect_support_info()
        sys.exit()

    # ----config options----
    # log the config
    # ignore password and proxy -- proxy might have pw
    for item, value in InsightsClient.config.items(APP_NAME):
        if item != 'password' and item != 'proxy':
            logger.debug("%s:%s", item, value)

    if InsightsClient.config.getboolean(APP_NAME, 'auto_update') and not InsightsClient.options.offline:
        # TODO: config updates option, but in GPG option, the option updates
        # the config.  make this consistent
        InsightsClient.options.update = True

    # disable automatic scheduling if it was set in the config, and if the job exists
    if InsightsClient.config.getboolean(APP_NAME, 'no_schedule'):
        cron = InsightsSchedule(set_cron=False)
        if cron.already_linked():
            cron.remove_scheduling()
            logger.debug('Automatic scheduling for Insights has been disabled.')

    # ----modifier options----
    if InsightsClient.options.no_gpg:
        logger.warn("WARNING: GPG VERIFICATION DISABLED")
        InsightsClient.config.set(APP_NAME, 'gpg', 'False')

    if InsightsClient.options.just_upload:
        # override these for great justice
        InsightsClient.options.no_tar_file = False
        InsightsClient.options.keep_archive = True

    if InsightsClient.options.container_mode and InsightsClient.options.no_tar_file:
        logger.error('Invalid combination: --container and --no-tar-file')
        sys.exit(1)

    # can't use bofa
    if InsightsClient.options.from_stdin and InsightsClient.options.from_file:
        logger.error('Can\'t use both --from-stdin and --from-file.')
        sys.exit(1)

    # ----register options----
    # put this first to avoid conflicts with register
    if InsightsClient.options.unregister:
        pconn = InsightsConnection()
        pconn.unregister()
        sys.exit()

    # force-reregister -- remove machine-id files and registration files before trying to register again
    new = False
    if InsightsClient.options.reregister:
        new = True
        InsightsClient.options.register = True
        delete_registered_file()
        delete_unregistered_file()
        delete_machine_id()
    logger.debug('Machine-id: %s', generate_machine_id(new))

    if InsightsClient.options.register:
        try_register()
        if not InsightsClient.config.getboolean('no_schedule'):
            InsightsSchedule(container_mode=options.container_mode)

    # check registration before doing any uploads
    # Ignore if in offline mode
    if not InsightsClient.options.register and not InsightsClient.options.offline:
        msg, is_registered = _is_client_registered()
        if not is_registered:
            logger.error(msg)
            sys.exit(1)
コード例 #34
0
ファイル: __init__.py プロジェクト: jhjaggars/insights-client
def handle_startup(options, config):
    """
    Handle startup options
    """

    if options.version:
        print constants.version
        sys.exit()

    if options.validate:
        validate_remove_file()
        sys.exit()

    # Generate /etc/machine-id if it does not exist
    new = False
    if options.reregister:
        new = True
        options.register = True
    logger.debug("Machine-ID: " + generate_machine_id(new))

    # Disable GPG verification
    if options.no_gpg:
        logger.warn("WARNING: GPG VERIFICATION DISABLED")
        config.set(APP_NAME, 'gpg', 'False')

    # Log config except the password
    # and proxy as it might have a pw as well
    for item, value in config.items(APP_NAME):
        if item != 'password' and item != 'proxy':
            logger.debug("%s:%s", item, value)

    if config.getboolean(APP_NAME, 'auto_update'):
        options.update = True

    if config.getboolean(APP_NAME, 'auto_config'):
        # Try to discover if we are connected to a satellite or not
        try_auto_configuration(config)

    # Set the schedule
    InsightsSchedule()

    # Test connection, useful for proxy debug
    if options.test_connection:
        pconn = InsightsConnection(config)
        pconn.test_connection()

    if options.unregister:
        pconn = InsightsConnection(config)
        pconn.unregister()
        sys.exit()

    # Handle registration, grouping, and display name
    if options.register:
        opt_group = options.group
        message, hostname, opt_group, display_name = register(config, options)
        if options.display_name is None and options.group is None:
            logger.info('Successfully registered %s', hostname)
        elif options.display_name is None:
            logger.info('Successfully registered %s in group %s', hostname, opt_group)
        else:
            logger.info('Successfully registered %s as %s in group %s', hostname, display_name, opt_group)

        logger.info(message)

    # Check for .unregistered file
    if os.path.isfile(constants.unregistered_file):
        logger.error("This machine has been unregistered")
        logger.error("Use --register if you would like to re-register this machine")
        logger.error("Exiting")
        sys.exit(1)
コード例 #35
0
def handle_startup():
    """
    Handle startup options
    """
    # ----do X and exit options----
    # show version and exit
    if InsightsClient.options.version:
        print constants.version
        sys.exit()

    if (InsightsClient.options.container_mode
            and not InsightsClient.options.run_here
            and insights_client_container_is_available()):
        sys.exit(run_in_container())

    if (InsightsClient.options.container_mode
            and not InsightsClient.options.only):
        logger.error(
            "Client running in container mode but no image/container specified via --only."
        )
        sys.exit(1)

    if InsightsClient.options.only != None and len(
            InsightsClient.options.only) < 12:
        logger.error(
            "Image/Container ID must be atleast twelve characters long.")
        sys.exit(1)

    if InsightsClient.options.validate:
        validate_remove_file()
        sys.exit()

    if InsightsClient.options.enable_schedule and InsightsClient.options.disable_schedule:
        logger.error(
            'Conflicting options: --enable-schedule and --disable-schedule')
        sys.exit(1)

    if InsightsClient.options.enable_schedule:
        # enable automatic scheduling
        InsightsSchedule()
        InsightsClient.config.set(APP_NAME, 'no_schedule', False)
        logger.info('Automatic scheduling for Insights has been enabled.')
        logger.debug('Updating config...')
        modify_config_file({'no_schedule': 'False'})
        sys.exit()

    if InsightsClient.options.disable_schedule:
        # disable automatic schedling
        InsightsSchedule(set_cron=False).remove_scheduling()
        InsightsClient.config.set(APP_NAME, 'no_schedule', True)
        logger.info('Automatic scheduling for Insights has been disabled.')
        logger.debug('Updating config...')
        modify_config_file({'no_schedule': 'True'})
        sys.exit()

    # do auto_config here, for connection-related 'do X and exit' options
    if InsightsClient.config.getboolean(
            APP_NAME, 'auto_config') and not InsightsClient.options.offline:
        # Try to discover if we are connected to a satellite or not
        try_auto_configuration()

    if InsightsClient.options.test_connection:
        pconn = InsightsConnection()
        rc = pconn.test_connection()
        sys.exit(rc)

    if InsightsClient.options.status:
        reg_check = registration_check()
        logger.info('\n'.join(reg_check['messages']))
        # exit with !status, 0 for True, 1 for False
        sys.exit(not reg_check['status'])

    if InsightsClient.options.support:
        support = InsightsSupport()
        support.collect_support_info()
        sys.exit()

    # ----config options----
    # log the config
    # ignore password and proxy -- proxy might have pw
    for item, value in InsightsClient.config.items(APP_NAME):
        if item != 'password' and item != 'proxy':
            logger.debug("%s:%s", item, value)

    if InsightsClient.config.getboolean(
            APP_NAME, 'auto_update') and not InsightsClient.options.offline:
        # TODO: config updates option, but in GPG option, the option updates
        # the config.  make this consistent
        InsightsClient.options.update = True

    # disable automatic scheduling if it was set in the config, and if the job exists
    if InsightsClient.config.getboolean(APP_NAME, 'no_schedule'):
        cron = InsightsSchedule(set_cron=False)
        if cron.already_linked():
            cron.remove_scheduling()
            logger.debug(
                'Automatic scheduling for Insights has been disabled.')

    # ----modifier options----
    if InsightsClient.options.no_gpg:
        logger.warn("WARNING: GPG VERIFICATION DISABLED")
        InsightsClient.config.set(APP_NAME, 'gpg', 'False')

    if InsightsClient.options.just_upload:
        if InsightsClient.options.offline or InsightsClient.options.no_upload:
            logger.error(
                'Cannot use --just-upload in combination with --offline or --no-upload.'
            )
            sys.exit(1)
        # override these for great justice
        InsightsClient.options.no_tar_file = False
        InsightsClient.options.keep_archive = True

    # if InsightsClient.options.container_mode and InsightsClient.options.no_tar_file:
    #    logger.error('Invalid combination: --container and --no-tar-file')
    #    sys.exit(1)

    # can't use bofa
    if InsightsClient.options.from_stdin and InsightsClient.options.from_file:
        logger.error('Can\'t use both --from-stdin and --from-file.')
        sys.exit(1)

    # handle some docker/atomic flags
    if InsightsClient.options.use_docker and InsightsClient.options.use_atomic:
        logger.error('Cant\'t use both --use-docker and --use-atomic.')
        sys.exit(1)

    if InsightsClient.options.to_stdout:
        InsightsClient.options.no_upload = True

    # ----register options----
    # put this first to avoid conflicts with register
    if InsightsClient.options.unregister:
        pconn = InsightsConnection()
        pconn.unregister()
        sys.exit()

    # force-reregister -- remove machine-id files and registration files
    # before trying to register again
    new = False
    if InsightsClient.options.reregister:
        new = True
        InsightsClient.options.register = True
        delete_registered_file()
        delete_unregistered_file()
        delete_machine_id()
    logger.debug('Machine-id: %s', generate_machine_id(new))

    if InsightsClient.options.register:
        try_register()
        if not InsightsClient.config.getboolean(APP_NAME, 'no_schedule'):
            InsightsSchedule()

    # check registration before doing any uploads
    # Ignore if in offline mode
    if not InsightsClient.options.register and not InsightsClient.options.offline:
        msg, is_registered = _is_client_registered()
        if not is_registered:
            logger.error(msg)
            sys.exit(1)