def register(self, group_id=None):
        """
        Register this machine
        """

        delete_unregistered_file()

        client_hostname = determine_hostname()
        # This will undo a blacklist
        logger.debug("API: Create system")
        system = self.create_system(new_machine_id=False)

        # If we get a 409, we know we need to generate a new machine-id
        if system.status_code == 409:
            system = self.create_system(new_machine_id=True)
        self.handle_fail_rcs(system)

        logger.debug("System: %s", system.json())

        # Do grouping
        if group_id is not None:
            self.do_group(group_id)

        if group_id is not None:
            return (client_hostname, group_id)
        else:
            return (client_hostname, "None")
Esempio n. 2
0
    def register(self):
        """
        Register this machine
        """

        delete_unregistered_file()

        client_hostname = determine_hostname()
        # This will undo a blacklist
        logger.debug("API: Create system")
        system = self.create_system(new_machine_id=False)

        # If we get a 409, we know we need to generate a new machine-id
        if system.status_code == 409:
            system = self.create_system(new_machine_id=True)
        self.handle_fail_rcs(system)

        logger.debug("System: %s", system.json())

        message = system.headers.get("x-rh-message", "")

        write_registered_file()

        # Do grouping
        if InsightsClient.options.group is not None:
            self.do_group()

        if InsightsClient.options.group is not None:
            return (message, client_hostname, InsightsClient.options.group,
                    InsightsClient.options.display_name)
        elif InsightsClient.options.display_name is not None:
            return (message, client_hostname, "None",
                    InsightsClient.options.display_name)
        else:
            return (message, client_hostname, "None", "")
Esempio n. 3
0
    def create_system(self, new_machine_id=False):
        """
        Create the machine via the API
        """
        client_hostname = determine_hostname()
        machine_id = generate_machine_id(new_machine_id)

        branch_info = self.branch_info()
        if not branch_info:
            return False

        remote_branch = branch_info['remote_branch']
        remote_leaf = branch_info['remote_leaf']

        data = {
            'machine_id': machine_id,
            'remote_branch': remote_branch,
            'remote_leaf': remote_leaf,
            'hostname': client_hostname
        }
        if config['display_name'] is not None:
            data['display_name'] = config['display_name']
        if config['display_name'] is not None:
            data['display_name'] = config['display_name']
        data = json.dumps(data)
        post_system_url = self.api_url + '/v1/systems'
        logger.debug("POST System: %s", post_system_url)
        logger.debug(data)
        net_logger.info("POST %s", post_system_url)
        return self.session.post(post_system_url,
                                 headers={'Content-Type': 'application/json'},
                                 data=data)
Esempio n. 4
0
    def register(self, options):
        """
        Register this machine
        """

        delete_unregistered_file()

        client_hostname = determine_hostname()
        # This will undo a blacklist
        logger.debug("API: Create system")
        system = self.create_system(options, new_machine_id=False)

        # If we get a 409, we know we need to generate a new machine-id
        if system.status_code == 409:
            system = self.create_system(options, new_machine_id=True)
        self.handle_fail_rcs(system)

        logger.debug("System: %s", system.json())

        # Do grouping
        if options.group is not None:
            self.do_group(options.group)

        if options.group is not None:
            return (client_hostname, options.group, options.display_name)
        elif options.display_name is not None:
            return (client_hostname, "None", options.display_name)
        else:
            return (client_hostname, "None", "")
Esempio n. 5
0
    def register(self):
        """
        Register this machine
        """

        delete_unregistered_file()

        client_hostname = determine_hostname()
        # This will undo a blacklist
        logger.debug("API: Create system")
        system = self.create_system(new_machine_id=False)

        # If we get a 409, we know we need to generate a new machine-id
        if system.status_code == 409:
            system = self.create_system(new_machine_id=True)
        self.handle_fail_rcs(system)

        logger.debug("System: %s", system.json())

        message = system.headers.get("x-rh-message", "")

        write_registered_file()

        # Do grouping
        if InsightsClient.options.group is not None:
            self.do_group()

        if InsightsClient.options.group is not None:
            return (message, client_hostname, InsightsClient.options.group, InsightsClient.options.display_name)
        elif InsightsClient.options.display_name is not None:
            return (message, client_hostname, "None", InsightsClient.options.display_name)
        else:
            return (message, client_hostname, "None", "")
Esempio n. 6
0
    def create_system(self, new_machine_id=False):
        """
        Create the machine via the API
        """
        client_hostname = determine_hostname()
        machine_id = generate_machine_id(new_machine_id)

        try:
            branch_info = self.branch_info()
            remote_branch = branch_info['remote_branch']
            remote_leaf = branch_info['remote_leaf']

        except LookupError:
            logger.error(
                "ERROR: Could not determine branch information, exiting!")
            logger.error("See %s for more information",
                         constants.default_log_file)
            logger.error(
                "Could not register system, running configuration test")
            self.test_connection(1)

        except requests.ConnectionError as e:
            logger.debug(e)
            logger.error(
                "ERROR: Could not determine branch information, exiting!")
            logger.error("See %s for more information",
                         constants.default_log_file)
            logger.error(
                "Could not register system, running configuration test")
            self.test_connection(1)

        data = {
            'machine_id': machine_id,
            'remote_branch': remote_branch,
            'remote_leaf': remote_leaf,
            'hostname': client_hostname
        }
        if InsightsClient.config.get(APP_NAME, 'display_name') is not None:
            data['display_name'] = InsightsClient.config.get(
                APP_NAME, 'display_name')
        if InsightsClient.options.display_name is not None:
            data['display_name'] = InsightsClient.options.display_name
        data = json.dumps(data)
        headers = {'Content-Type': 'application/json'}
        post_system_url = self.api_url + '/v1/systems'
        logger.debug("POST System: %s", post_system_url)
        logger.debug(data)
        system = None
        try:
            system = self.session.post(post_system_url,
                                       headers=headers,
                                       data=data)
            logger.debug("POST System status: %d", system.status_code)
        except requests.ConnectionError as e:
            logger.debug(e)
            logger.error(
                "Could not register system, running configuration test")
            self.test_connection(1)
        return system
Esempio n. 7
0
 def _handle_hostname(self, command):
     """
     Helper to attempt to get fqdn as hostname
     """
     self.archive.add_command_output({
         'cmd': self._mangle_command(command),
         'status': 0,
         'output': determine_hostname()
     })
 def _handle_hostname(self, command):
     """
     Helper to attempt to get fqdn as hostname
     """
     self.archive.add_command_output({
         'cmd': self._mangle_command(command),
         'status': 0,
         'output': determine_hostname()
     })
    def create_system(self, new_machine_id=False):
        """
        Create the machine via the API
        """
        client_hostname = determine_hostname()
        machine_id = generate_machine_id(new_machine_id)

        try:
            branch_info = self.branch_info()
            remote_branch = branch_info['remote_branch']
            remote_leaf = branch_info['remote_leaf']

        except LookupError:
            logger.error(
                "ERROR: Could not determine branch information, exiting!")
            logger.error(
                "See %s for more information", constants.default_log_file)
            logger.error(
                "Could not register system, running configuration test")
            self.test_connection(1)

        except requests.ConnectionError as e:
            logger.debug(e)
            logger.error(
                "ERROR: Could not determine branch information, exiting!")
            logger.error(
                "See %s for more information", constants.default_log_file)
            logger.error(
                "Could not register system, running configuration test")
            self.test_connection(1)

        data = {'machine_id': machine_id,
                'remote_branch': remote_branch,
                'remote_leaf': remote_leaf,
                'hostname': client_hostname}
        if InsightsClient.config.get(APP_NAME, 'display_name') is not None:
            data['display_name'] = InsightsClient.config.get(APP_NAME, 'display_name')
        if InsightsClient.options.display_name is not None:
            data['display_name'] = InsightsClient.options.display_name
        data = json.dumps(data)
        headers = {'Content-Type': 'application/json'}
        post_system_url = self.api_url + '/v1/systems'
        logger.debug("POST System: %s", post_system_url)
        logger.debug(data)
        system = None
        try:
            system = self.session.post(post_system_url,
                                       headers=headers,
                                       data=data)
            logger.debug("POST System status: %d", system.status_code)
        except requests.ConnectionError as e:
            logger.debug(e)
            logger.error(
                "Could not register system, running configuration test")
            self.test_connection(1)
        return system
Esempio n. 10
0
 def __init__(self):
     """
     Initialize the Insights Archive
     Create temp dir, archive dir, and command dir
     """
     self.tmp_dir = tempfile.mkdtemp(prefix='/var/tmp/')
     self.archive_name = ("insights-%s-%s" %
                          (determine_hostname(),
                           time.strftime("%Y%m%d%H%M%S")))
     self.archive_dir = self.create_archive_dir()
     self.cmd_dir = self.create_command_dir()
Esempio n. 11
0
    def register(self):
        """
        Register this machine
        """

        write_to_disk(constants.unregistered_file, delete=True)

        client_hostname = determine_hostname()
        # This will undo a blacklist
        logger.debug("API: Create system")
        system = self.create_system(new_machine_id=False)
        if not system:
            return ('Could not reach the Insights service to register.', '',
                    '', '')

        # If we get a 409, we know we need to generate a new machine-id
        if system.status_code == 409:
            system = self.create_system(new_machine_id=True)
        self.handle_fail_rcs(system)

        logger.debug("System: %s", system.json())

        message = system.headers.get("x-rh-message", "")

        write_to_disk(constants.registered_file)

        # Do grouping
        if config['group'] is not None:
            self.do_group()

        # Display registration success messasge to STDOUT and logs
        if system.status_code == 201:
            try:
                system_json = system.json()
                machine_id = system_json["machine_id"]
                account_number = system_json["account_number"]
                logger.info("You successfully registered %s to account %s." %
                            (machine_id, account_number))
            except:
                logger.debug('Received invalid JSON on system registration.')
                logger.debug(
                    'API still indicates valid registration with 201 status code.'
                )
                logger.debug(system)
                logger.debug(system.json())

        if config['group'] is not None:
            return (message, client_hostname, config['group'],
                    config['display_name'])
        elif config['display_name'] is not None:
            return (message, client_hostname, "None", config['display_name'])
        else:
            return (message, client_hostname, "None", "")
Esempio n. 12
0
 def __init__(self, compressor="gz", target_name=None):
     """
     Initialize the Insights Archive
     Create temp dir, archive dir, and command dir
     """
     self.tmp_dir = tempfile.mkdtemp(prefix='/var/tmp/')
     name = determine_hostname(target_name)
     self.archive_name = ("insights-%s-%s" %
                          (name, time.strftime("%Y%m%d%H%M%S")))
     self.archive_dir = self.create_archive_dir()
     self.cmd_dir = self.create_command_dir()
     self.compressor = compressor
Esempio n. 13
0
 def __init__(self, compressor="gz", target_name=None):
     """
     Initialize the Insights Archive
     Create temp dir, archive dir, and command dir
     """
     self.tmp_dir = tempfile.mkdtemp(prefix='/var/tmp/')
     name = determine_hostname(target_name)
     self.archive_name = ("insights-%s-%s" %
                          (name,
                           time.strftime("%Y%m%d%H%M%S")))
     self.archive_dir = self.create_archive_dir()
     self.cmd_dir = self.create_command_dir()
     self.compressor = compressor
Esempio n. 14
0
 def create_system(self, new_machine_id=False):
     """
     Create the machine via the API
     """
     client_hostname = determine_hostname()
     machine_id = generate_machine_id(new_machine_id)
     data = {'machine_id': machine_id, 'hostname': client_hostname}
     data = json.dumps(data)
     headers = {'Content-Type': 'application/json'}
     post_system_url = self.api_url + '/v1/systems'
     logger.debug("POST System: %s", post_system_url)
     logger.debug(data)
     system = self.session.post(post_system_url,
                                headers=headers,
                                data=data)
     logger.debug("POST System status: %d", system.status_code)
     return system
Esempio n. 15
0
    def register(self):
        """
        Register this machine
        """

        delete_unregistered_file()

        client_hostname = determine_hostname()
        # This will undo a blacklist
        logger.debug("API: Create system")
        system = self.create_system(new_machine_id=False)

        # If we get a 409, we know we need to generate a new machine-id
        if system.status_code == 409:
            system = self.create_system(new_machine_id=True)
        self.handle_fail_rcs(system)

        logger.debug("System: %s", system.json())

        message = system.headers.get("x-rh-message", "")

        write_registered_file()

        # Do grouping
        if InsightsClient.options.group is not None:
            self.do_group()

        # Display registration success messasge to STDOUT and logs
        if system.status_code == 201:
            try:
                system_json = system.json()
                machine_id = system_json["machine_id"]
                account_number = system_json["account_number"]
                logger.info("You successfully registered %s to account %s." % (machine_id, account_number))
            except:
                logger.debug('Received invalid JSON on system registration.')
                logger.debug('API still indicates valid registration with 201 status code.')
                logger.debug(system)
                logger.debug(system.json())

        if InsightsClient.options.group is not None:
            return (message, client_hostname, InsightsClient.options.group, InsightsClient.options.display_name)
        elif InsightsClient.options.display_name is not None:
            return (message, client_hostname, "None", InsightsClient.options.display_name)
        else:
            return (message, client_hostname, "None", "")
Esempio n. 16
0
def collect_data_and_upload(rc=0):
    """
    All the heavy lifting done here
    Run through "targets" - could be just one (host, default) or many (containers+host)
    """
    # initialize collection targets
    # for now we do either containers OR host -- not both at same time
    if InsightsClient.options.container_mode:
        targets = get_targets()
        targets = targets + constants.default_target
    else:
        targets = constants.default_target

    if InsightsClient.options.offline:
        logger.warning("Assuming remote branch and leaf value of -1")
        pconn = None
        branch_info = constants.default_branch_info
    else:
        pconn = InsightsConnection()

    # TODO: change these err msgs to be more meaningful , i.e.
    # "could not determine login information"
    if pconn:
        try:
            branch_info = pconn.branch_info()
        except requests.ConnectionError:
            branch_info = handle_branch_info_error(
                "Could not connect to determine branch information")
        except LookupError:
            branch_info = handle_branch_info_error(
                "Could not determine branch information")
    pc = InsightsConfig(pconn)

    if InsightsClient.options.just_upload:
        if not os.path.exists(InsightsClient.options.just_upload):
            logger.error('No file %s', InsightsClient.options.just_upload)
            return 1
        tar_file = InsightsClient.options.just_upload
        rc = _do_upload(pconn, tar_file, 'dummy', 0)
        return rc

    # load config from stdin/file if specified
    try:
        stdin_config = {}
        if InsightsClient.options.from_file:
            with open(InsightsClient.options.from_file, 'r') as f:
                stdin_config = json.load(f)
        elif InsightsClient.options.from_stdin:
            stdin_config = json.load(sys.stdin)
        if ((InsightsClient.options.from_file or InsightsClient.options.from_stdin) and
            ('uploader.json' not in stdin_config or
             'sig' not in stdin_config)):
            raise ValueError
    except:
        logger.error('ERROR: Invalid config for %s! Exiting...',
                     ('--from-file' if InsightsClient.options.from_file else '--from-stdin'))
        sys.exit(1)

    start = time.clock()
    collection_rules, rm_conf = pc.get_conf(InsightsClient.options.update, stdin_config)
    collection_elapsed = (time.clock() - start)
    logger.debug("Rules configuration loaded. Elapsed time: %s", collection_elapsed)

    individual_archives = []

    for t in targets:
        # defaults
        archive = None
        container_connection = None
        mp = None
        obfuscate = None
        # archive metadata
        archive_meta = {}

        try:
            if t['type'] == 'docker_image':
                container_connection = open_image(t['name'])
                logging_name = 'Docker image ' + t['name']
                archive_meta['display_name'] = get_repotag(t['name'])
                archive_meta['docker_id'] = t['name']
                if container_connection:
                    mp = container_connection.get_fs()
                else:
                    logger.error('Could not open %s for analysis', logging_name)
                    continue
            elif t['type'] == 'docker_container':
                container_connection = open_container(t['name'])
                logging_name = 'Docker container ' + t['name']
                archive_meta['display_name'] = t['name']
                if container_connection:
                    mp = container_connection.get_fs()
                else:
                    logger.error('Could not open %s for analysis', logging_name)
                    continue
            elif t['type'] == 'host':
                logging_name = determine_hostname()
                archive_meta['display_name'] = determine_hostname(InsightsClient.options.display_name)
            else:
                logger.error('Unexpected analysis target: %s', t['type'])
                continue

            archive_meta['type'] = t['type'].replace('docker_', '')
            archive_meta['product'] = 'Docker'
            archive_meta['system_id'] = generate_analysis_target_id(t['type'], t['name'])

            collection_start = time.clock()
            archive = InsightsArchive(compressor=InsightsClient.options.compressor if not InsightsClient.options.container_mode else "none",
                                      target_name=t['name'])
            atexit.register(_delete_archive, archive)
            dc = DataCollector(archive,
                               mountpoint=mp,
                               target_name=t['name'],
                               target_type=t['type'])

            logger.info('Starting to collect Insights data for %s', logging_name)
            dc.run_collection(collection_rules, rm_conf, branch_info)
            elapsed = (time.clock() - start)
            logger.debug("Data collection complete. Elapsed time: %s", elapsed)

            obfuscate = InsightsClient.config.getboolean(APP_NAME, "obfuscate")

            # include rule refresh time in the duration
            collection_duration = (time.clock() - collection_start) + collection_elapsed

            if InsightsClient.options.no_tar_file:
                logger.info('See Insights data in %s', dc.archive.archive_dir)
                return rc

            tar_file = dc.done(collection_rules, rm_conf)

            # add archives to list of individual uploads
            archive_meta['tar_file'] = tar_file
            individual_archives.append(archive_meta)

        finally:
            # called on loop iter end or unexpected exit
            if container_connection:
                container_connection.close()

    # if multiple targets (container mode), add all archives to single archive
    if InsightsClient.options.container_mode:
        full_archive = InsightsArchive(compressor=InsightsClient.options.compressor)
        for a in individual_archives:
            shutil.copy(a['tar_file'], full_archive.archive_dir)
        # don't want insights_commands in meta archive
        shutil.rmtree(full_archive.cmd_dir)
        metadata = _create_metadata_json(individual_archives)
        full_archive.add_metadata_to_archive(json.dumps(metadata), 'metadata.json')
        full_tar_file = full_archive.create_tar_file(full_archive=True)
    # if only one target (regular mode), just upload one
    else:
        full_archive = archive
        full_tar_file = tar_file

    if InsightsClient.options.offline or InsightsClient.options.no_upload:
        handle_file_output(full_tar_file, full_archive)
        return rc

    # do the upload
    rc = _do_upload(pconn, full_tar_file, logging_name, collection_duration)

    if InsightsClient.options.keep_archive:
        logger.info('Insights data retained in %s', full_tar_file)
        return rc
    if obfuscate:
        logger.info('Obfuscated Insights data retained in %s',
                    os.path.dirname(full_tar_file))
    full_archive.delete_archive_dir()
    return rc
Esempio n. 17
0
def collect_data_and_upload(rc=0):
    """
    All the heavy lifting done here
    Run through "targets" - could be just ONE (host, default) or ONE (container/image)
    """
    # initialize collection targets
    # for now we do either containers OR host -- not both at same time
    if InsightsClient.options.container_mode:
        logger.debug("Client running in container/image mode.")
        logger.debug("Scanning for matching container/image.")
        targets = get_targets()
    else:
        logger.debug("Host selected as scanning target.")
        targets = constants.default_target

    # if there are no targets to scan then bail
    if not len(targets):
        logger.debug("No targets were found. Exiting.")
        sys.exit(1)

    if InsightsClient.options.offline:
        logger.warning("Assuming remote branch and leaf value of -1")
        pconn = None
        branch_info = constants.default_branch_info
    else:
        pconn = InsightsConnection()

    # TODO: change these err msgs to be more meaningful , i.e.
    # "could not determine login information"
    if pconn:
        try:
            branch_info = pconn.branch_info()
        except requests.ConnectionError:
            branch_info = handle_branch_info_error(
                "Could not connect to determine branch information")
        except LookupError:
            branch_info = handle_branch_info_error(
                "Could not determine branch information")
    pc = InsightsConfig(pconn)
    tar_file = None

    if InsightsClient.options.just_upload:
        if not os.path.exists(InsightsClient.options.just_upload):
            logger.error('No file %s', InsightsClient.options.just_upload)
            return 1
        tar_file = InsightsClient.options.just_upload
        rc = _do_upload(pconn, tar_file, 'dummy', 0)
        return rc

    # load config from stdin/file if specified
    try:
        stdin_config = {}
        if InsightsClient.options.from_file:
            with open(InsightsClient.options.from_file, 'r') as f:
                stdin_config = json.load(f)
        elif InsightsClient.options.from_stdin:
            stdin_config = json.load(sys.stdin)
        if ((InsightsClient.options.from_file
             or InsightsClient.options.from_stdin)
                and ('uploader.json' not in stdin_config
                     or 'sig' not in stdin_config)):
            raise ValueError
        if ((InsightsClient.options.from_file
             or InsightsClient.options.from_stdin)
                and 'branch_info' in stdin_config
                and stdin_config['branch_info'] is not None):
            branch_info = stdin_config['branch_info']
    except:
        logger.error('ERROR: Invalid config for %s! Exiting...',
                     ('--from-file'
                      if InsightsClient.options.from_file else '--from-stdin'))
        sys.exit(1)

    start = time.clock()
    collection_rules, rm_conf = pc.get_conf(InsightsClient.options.update,
                                            stdin_config)
    collection_elapsed = (time.clock() - start)
    logger.debug("Rules configuration loaded. Elapsed time: %s",
                 collection_elapsed)

    individual_archives = []

    for t in targets:
        # defaults
        archive = None
        container_connection = None
        mp = None
        obfuscate = None
        # archive metadata
        archive_meta = {}

        try:
            if t['type'] == 'docker_image':
                container_connection = open_image(t['name'])
                logging_name = 'Docker image ' + t['name']
                archive_meta['docker_id'] = t['name']
                archive_meta['display_name'] = docker_display_name(
                    t['name'], t['type'].replace('docker_', ''))
                logger.debug('Docker display_name: %s',
                             archive_meta['display_name'])
                logger.debug('Docker docker_id: %s', archive_meta['docker_id'])
                if container_connection:
                    mp = container_connection.get_fs()
                else:
                    logger.error('Could not open %s for analysis',
                                 logging_name)
                    sys.exit(1)
            elif t['type'] == 'docker_container':
                container_connection = open_container(t['name'])
                logging_name = 'Docker container ' + t['name']
                archive_meta['docker_id'] = t['name']
                archive_meta['display_name'] = docker_display_name(
                    t['name'], t['type'].replace('docker_', ''))
                logger.debug('Docker display_name: %s',
                             archive_meta['display_name'])
                logger.debug('Docker docker_id: %s', archive_meta['docker_id'])
                if container_connection:
                    mp = container_connection.get_fs()
                else:
                    logger.error('Could not open %s for analysis',
                                 logging_name)
                    sys.exit(1)
            elif t['type'] == 'host':
                logging_name = determine_hostname()
                archive_meta['display_name'] = determine_hostname(
                    InsightsClient.options.display_name)
            else:
                logger.error('Unexpected analysis target: %s', t['type'])
                sys.exit(1)

            archive_meta['type'] = t['type'].replace('docker_', '')
            archive_meta['product'] = 'Docker'
            archive_meta['system_id'] = generate_analysis_target_id(
                t['type'], t['name'])

            collection_start = time.clock()
            archive = InsightsArchive(
                compressor=InsightsClient.options.compressor
                if not InsightsClient.options.container_mode else "none",
                target_name=t['name'])
            atexit.register(_delete_archive, archive)
            dc = DataCollector(archive,
                               InsightsClient.config,
                               mountpoint=mp,
                               target_name=t['name'],
                               target_type=t['type'])

            logger.info('Starting to collect Insights data for %s',
                        logging_name)
            dc.run_collection(collection_rules, rm_conf, branch_info)
            elapsed = (time.clock() - start)
            logger.debug("Data collection complete. Elapsed time: %s", elapsed)

            obfuscate = InsightsClient.config.getboolean(APP_NAME, "obfuscate")

            # include rule refresh time in the duration
            collection_duration = (time.clock() -
                                   collection_start) + collection_elapsed

            # add custom metadata about a host if provided by from_file
            # use in the OSE case
            if InsightsClient.options.from_file:
                with open(InsightsClient.options.from_file, 'r') as f:
                    stdin_config = json.load(f)
                    if 'metadata' in stdin_config:
                        archive.add_metadata_to_archive(
                            json.dumps(stdin_config['metadata']),
                            'metadata.json')

            if InsightsClient.options.no_tar_file:
                logger.info('See Insights data in %s', dc.archive.archive_dir)
                return rc

            tar_file = dc.done(collection_rules, rm_conf)

            # add archives to list of individual uploads
            archive_meta['tar_file'] = tar_file
            individual_archives.append(archive_meta)

        finally:
            # called on loop iter end or unexpected exit
            if container_connection:
                container_connection.close()

    # if multiple targets (container mode), add all archives to single archive
    # if InsightsClient.options.container_mode:
    if False:  # we only run single collections now (not the uber archives), bypass this
        full_archive = InsightsArchive(
            compressor=InsightsClient.options.compressor)
        for a in individual_archives:
            shutil.copy(a['tar_file'], full_archive.archive_dir)
        # don't want insights_commands in meta archive
        shutil.rmtree(full_archive.cmd_dir)
        metadata = _create_metadata_json(individual_archives)
        full_archive.add_metadata_to_archive(json.dumps(metadata),
                                             'metadata.json')
        full_tar_file = full_archive.create_tar_file(full_archive=True)
    # if only one target (regular mode), just upload one
    else:
        full_archive = archive
        full_tar_file = tar_file

    if InsightsClient.options.offline or InsightsClient.options.no_upload:
        handle_file_output(full_tar_file, full_archive)
        return rc

    # do the upload
    rc = _do_upload(pconn, full_tar_file, logging_name, collection_duration)

    if InsightsClient.options.keep_archive:
        logger.info('Insights data retained in %s', full_tar_file)
        return rc
    if obfuscate:
        logger.info('Obfuscated Insights data retained in %s',
                    os.path.dirname(full_tar_file))
    full_archive.delete_archive_dir()
    return rc
Esempio n. 18
0
def collect(rc=0):
    """
    All the heavy lifting done here
    Run through "targets" - could be just ONE (host, default) or ONE (container/image)
    """
    # initialize collection targets
    # tar files
    if config['analyze_file'] is not None:
        logger.debug("Client analyzing a compress filesystem.")
        targets = [{
            'type':
            'compressed_file',
            'name':
            os.path.splitext(os.path.basename(config['analyze_file']))[0],
            'location':
            config['analyze_file']
        }]

    # mountpoints
    elif config['analyze_mountpoint'] is not None:
        logger.debug("Client analyzing a filesystem already mounted.")
        targets = [{
            'type':
            'mountpoint',
            'name':
            os.path.splitext(os.path.basename(
                config['analyze_mountpoint']))[0],
            'location':
            config['analyze_mountpoint']
        }]

    # container mode
    elif config['container_mode']:
        logger.debug("Client running in container/image mode.")
        logger.debug("Scanning for matching container/image.")

        from containers import get_targets
        targets = get_targets()

    # the host
    else:
        logger.debug("Host selected as scanning target.")
        targets = constants.default_target

    # if there are no targets to scan then bail
    if not len(targets):
        logger.debug("No targets were found. Exiting.")
        return False
    logger.debug("Found targets: ")
    logger.debug(targets)

    branch_info = get_branch_info()
    pc = InsightsConfig()
    tar_file = None

    # load config from stdin/file if specified
    try:
        stdin_config = {}
        if config['from_file']:
            with open(config['from_file'], 'r') as f:
                stdin_config = json.load(f)
        elif config['from_stdin']:
            stdin_config = json.load(sys.stdin)
        if ((config['from_file'] or config['from_stdin'])
                and ('uploader.json' not in stdin_config
                     or 'sig' not in stdin_config)):
            raise ValueError
        if ((config['from_file'] or config['from_stdin'])
                and 'branch_info' in stdin_config
                and stdin_config['branch_info'] is not None):
            branch_info = stdin_config['branch_info']
    except:
        logger.error(
            'ERROR: Invalid config for %s! Exiting...',
            ('--from-file' if config['from_file'] else '--from-stdin'))
        return False

    collection_rules, rm_conf = pc.get_conf(False, stdin_config)
    individual_archives = []

    for t in targets:
        # defaults
        archive = None
        container_connection = None
        mp = None
        compressed_filesystem = None
        # archive metadata
        archive_meta = {}

        try:

            # analyze docker images
            if t['type'] == 'docker_image':

                from containers import open_image
                container_connection = open_image(t['name'])
                logging_name = 'Docker image ' + t['name']
                archive_meta['docker_id'] = t['name']

                from containers import docker_display_name
                archive_meta['display_name'] = docker_display_name(
                    t['name'], t['type'].replace('docker_', ''))

                logger.debug('Docker display_name: %s',
                             archive_meta['display_name'])
                logger.debug('Docker docker_id: %s', archive_meta['docker_id'])

                if container_connection:
                    mp = container_connection.get_fs()
                else:
                    logger.error('Could not open %s for analysis',
                                 logging_name)
                    return False

            # analyze docker containers
            elif t['type'] == 'docker_container':
                from containers import open_container
                container_connection = open_container(t['name'])

                logging_name = 'Docker container ' + t['name']
                archive_meta['docker_id'] = t['name']

                from containers import docker_display_name
                archive_meta['display_name'] = docker_display_name(
                    t['name'], t['type'].replace('docker_', ''))
                logger.debug('Docker display_name: %s',
                             archive_meta['display_name'])
                logger.debug('Docker docker_id: %s', archive_meta['docker_id'])

                if container_connection:
                    mp = container_connection.get_fs()
                else:
                    logger.error('Could not open %s for analysis',
                                 logging_name)
                    return False

            # analyze compressed files
            elif t['type'] == 'compressed_file':

                logging_name = 'Compressed file ' + t[
                    'name'] + ' at location ' + t['location']

                from compressed_file import InsightsCompressedFile
                compressed_filesystem = InsightsCompressedFile(t['location'])

                if compressed_filesystem.is_tarfile is False:
                    logger.debug("Could not access compressed tar filesystem.")
                    return False

                mp = compressed_filesystem.get_filesystem_path()

            # analyze mountpoints
            elif t['type'] == 'mountpoint':

                logging_name = 'Filesystem ' + t['name'] + ' at location ' + t[
                    'location']
                mp = config['analyze_mountpoint']

            # analyze the host
            elif t['type'] == 'host':
                logging_name = determine_hostname()
                archive_meta['display_name'] = determine_hostname(
                    config['display_name'])

            # nothing found to analyze
            else:
                logger.error('Unexpected analysis target: %s', t['type'])
                return False

            archive_meta['type'] = t['type'].replace('docker_', '')
            archive_meta['product'] = 'Docker'

            machine_id = generate_analysis_target_id(t['type'], t['name'])
            archive_meta['system_id'] = machine_id
            archive_meta['machine_id'] = machine_id

            archive = InsightsArchive(compressor=config['compressor'] if
                                      not config['container_mode'] else "none",
                                      target_name=t['name'])

            # determine the target type and begin collection
            # we infer "docker_image" SPEC analysis for certain types
            if t['type'] in ["mountpoint", "compressed_file"]:
                target_type = "docker_image"
            else:
                target_type = t['type']
            logger.debug("Inferring target_type '%s' for SPEC collection",
                         target_type)
            logger.debug("Inferred from '%s'", t['type'])
            dc = DataCollector(archive,
                               config,
                               mountpoint=mp,
                               target_name=t['name'],
                               target_type=target_type)

            logger.info('Starting to collect Insights data for %s',
                        logging_name)
            dc.run_collection(collection_rules, rm_conf, branch_info)

            # add custom metadata about a host if provided by from_file
            # use in the OSE case
            if config['from_file']:
                with open(config['from_file'], 'r') as f:
                    stdin_config = json.load(f)
                    if 'metadata' in stdin_config:
                        archive.add_metadata_to_archive(
                            json.dumps(stdin_config['metadata']),
                            'metadata.json')

            tar_file = dc.done(collection_rules, rm_conf)

            # add archives to list of individual uploads
            archive_meta['tar_file'] = tar_file
            individual_archives.append(archive_meta)

        finally:
            # called on loop iter end or unexpected exit
            if container_connection:
                container_connection.close()

    # cleanup the temporary stuff for analyzing tar files
    if config['analyze_file'] is not None and compressed_filesystem is not None:
        compressed_filesystem.cleanup_temp_filesystem()

    return tar_file