def run_with_result_store(self, manager_interrupt, result_store): """Run with a given ConnectResultStore.""" serializer = SourceSerializer(self.scan_task.source) source = serializer.data if self.scan_job.options is not None: forks = self.scan_job.options.max_concurrency else: forks = ScanOptions.get_default_forks() if self.scan_task.source.options is not None: use_paramiko = self.scan_task.source.options.use_paramiko else: use_paramiko = False connection_port = source['port'] credentials = source['credentials'] remaining_hosts = result_store.remaining_hosts() for cred_id in credentials: check_manager_interrupt(manager_interrupt.value) credential = Credential.objects.get(pk=cred_id) if not remaining_hosts: message = 'Skipping credential %s. No remaining hosts.' % \ credential.name self.scan_task.log_message(message) break message = 'Attempting credential %s.' % credential.name self.scan_task.log_message(message) try: scan_message, scan_result = _connect(manager_interrupt, self.scan_task, remaining_hosts, result_store, credential, connection_port, forks, use_paramiko) if scan_result != ScanTask.COMPLETED: return scan_message, scan_result except AnsibleRunnerException as ansible_error: remaining_hosts_str = ', '.join(result_store.remaining_hosts()) error_message = 'Connect scan task failed with credential %s.'\ ' Error: %s Hosts: %s' %\ (credential.name, ansible_error, remaining_hosts_str) return error_message, ScanTask.FAILED remaining_hosts = result_store.remaining_hosts() logger.debug('Failed systems: %s', remaining_hosts) for host in remaining_hosts: # We haven't connected to these hosts with any # credentials, so they have failed. result_store.record_result(host, self.scan_task.source, None, SystemConnectionResult.FAILED) return None, ScanTask.COMPLETED
def run_with_result_store(self, result_store): """Run with a given ConnectResultStore.""" serializer = SourceSerializer(self.scan_task.source) source = serializer.data if self.scan_job.options is None: forks = ScanOptions.get_default_forks() else: forks = self.scan_job.options.max_concurrency connection_port = source['port'] credentials = source['credentials'] remaining_hosts = result_store.remaining_hosts() for cred_id in credentials: credential = Credential.objects.get(pk=cred_id) if not remaining_hosts: message = 'Skipping credential %s. No remaining hosts.' % \ credential.name self.scan_task.log_message(message) break message = 'Attempting credential %s.' % credential.name self.scan_task.log_message(message) cred_data = CredentialSerializer(credential).data callback = ConnectResultCallback(result_store, credential, self.scan_task.source) try: connect(remaining_hosts, callback, cred_data, connection_port, forks=forks) except AnsibleError as ansible_error: remaining_hosts_str = ', '.join(result_store.remaining_hosts()) error_message = 'Connect scan task failed with credential %s.'\ ' Error: %s Hosts: %s' %\ (credential.name, ansible_error, remaining_hosts_str) return error_message, ScanTask.FAILED remaining_hosts = result_store.remaining_hosts() logger.debug('Failed systems: %s', remaining_hosts) for host in remaining_hosts: # We haven't connected to these hosts with any # credentials, so they have failed. result_store.record_result(host, self.scan_task.source, None, SystemConnectionResult.FAILED) return None, ScanTask.COMPLETED
def __init__(self, scan_job, scan_task): """Set context for interface.""" self.scan_job = scan_job if scan_job.options is None: self.max_concurrency = ScanOptions.get_default_forks() else: self.max_concurrency = scan_job.options.max_concurrency if scan_task.scan_type == ScanTask.SCAN_TYPE_CONNECT: self.connect_scan_task = scan_task self.inspect_scan_task = None else: self.connect_scan_task = scan_task.prerequisites.first() self.inspect_scan_task = scan_task self.source = scan_task.source
def setUp(self): """Create test setup.""" management.call_command('flush', '--no-input') self.cred = Credential.objects.create(name='cred1', username='******', password='******', become_password=None, ssh_keyfile=None) self.cred_for_upload = self.cred.id self.source = Source(name='source1', source_type='network', port=22) self.source.save() self.source2 = Source(name='source2', source_type='network', port=22) self.source2.save() self.source2.credentials.add(self.cred) self.concurrency = ScanOptions.get_default_forks()
class ScanOptionsSerializer(NotEmptySerializer): """Serializer for the ScanOptions model.""" max_concurrency = IntegerField(required=False, min_value=1, max_value=200, default=ScanOptions.get_default_forks()) disabled_optional_products = DisableOptionalProductsOptionsSerializer( required=False) enabled_extended_product_search = ExtendedProductSearchOptionsSerializer( required=False) class Meta: """Metadata for serializer.""" model = ScanOptions fields = [ 'max_concurrency', 'disabled_optional_products', 'enabled_extended_product_search' ]
def _inspect_scan(self, manager_interrupt, connected, roles=DEFAULT_ROLES, base_ssh_executable=None, ssh_timeout=None): """Execute the host scan with the initialized source. :param manager_interrupt: Signal used to communicate termination of scan :param connected: list of (host, credential) pairs to inspect :param roles: list of roles to execute :param base_ssh_executable: ssh executable, or None for 'ssh'. Will be wrapped with a timeout before being passed to Ansible. :param ssh_timeout: string in the format of the 'timeout' command. Timeout for individual tasks. :returns: An array of dictionaries of facts """ connection_port = self.scan_task.source.port if self.scan_task.source.options is not None: use_paramiko = self.scan_task.source.options.use_paramiko else: use_paramiko = None if self.scan_job.options is not None: forks = self.scan_job.options.max_concurrency extra_vars = self.scan_job.options.get_extra_vars() else: forks = ScanOptions.get_default_forks() extra_vars = ScanOptions.get_default_extra_vars() if extra_vars.get(ScanOptions.EXT_PRODUCT_SEARCH_DIRS) is None: extra_vars[ScanOptions.EXT_PRODUCT_SEARCH_DIRS] = \ ' '.join(DEFAULT_SCAN_DIRS) ssh_executable = os.path.abspath( os.path.join(os.path.dirname(__file__), '../../../bin/timeout_ssh')) base_ssh_executable = base_ssh_executable or 'ssh' ssh_timeout = ssh_timeout or settings.QPC_SSH_INSPECT_TIMEOUT # pylint: disable=line-too-long # the ssh arg is required for become-pass because # ansible checks for an exact string match of ssh # anywhere in the command array # See https://github.com/ansible/ansible/blob/stable-2.3/lib/ansible/plugins/connection/ssh.py#L490-L500 # noqa # timeout_ssh will remove the ssh argument before running the command ssh_args = [ '--executable=' + base_ssh_executable, '--timeout=' + ssh_timeout, 'ssh' ] group_names, inventory = _construct_scan_inventory( connected, connection_port, forks, ssh_executable=ssh_executable, ssh_args=ssh_args) inventory_file = write_inventory(inventory) error_msg = '' log_message = 'START INSPECT PROCESSING GROUPS'\ ' with use_paramiko: %s, '\ '%d forks and extra_vars=%s' % (use_paramiko, forks, extra_vars) self.scan_task.log_message(log_message) scan_result = ScanTask.COMPLETED scan_message = 'success' for idx, group_name in enumerate(group_names): if manager_interrupt.value == ScanJob.JOB_TERMINATE_CANCEL: raise NetworkCancelException() if manager_interrupt.value == ScanJob.JOB_TERMINATE_PAUSE: raise NetworkPauseException() log_message = 'START INSPECT PROCESSING GROUP %d of %d' % ( (idx + 1), len(group_names)) self.scan_task.log_message(log_message) callback =\ InspectResultCallback( scan_task=self.scan_task) playbook = { 'name': 'scan systems for product fingerprint facts', 'hosts': group_name, 'gather_facts': False, 'roles': roles } result = run_playbook(inventory_file, callback, playbook, extra_vars, use_paramiko, forks=forks) if result != TaskQueueManager.RUN_OK: new_error_msg = _construct_error_msg(result) callback.finalize_failed_hosts() if result not in [ TaskQueueManager.RUN_UNREACHABLE_HOSTS, TaskQueueManager.RUN_FAILED_HOSTS ]: error_msg += '{}\n'.format(new_error_msg) if error_msg != '': raise AnsibleError(error_msg) return scan_message, scan_result
def _inspect_scan(self, connected, roles=DEFAULT_ROLES, base_ssh_executable=None, ssh_timeout=None): """Execute the host scan with the initialized source. :param connected: list of (host, credential) pairs to inspect :param roles: list of roles to execute :param base_ssh_executable: ssh executable, or None for 'ssh'. Will be wrapped with a timeout before being passed to Ansible. :param ssh_timeout: string in the format of the 'timeout' command. Timeout for individual tasks. :returns: An array of dictionaries of facts """ playbook = {'name': 'scan systems for product fingerprint facts', 'hosts': 'all', 'gather_facts': False, 'strategy': 'free', 'roles': roles} connection_port = self.scan_task.source.port if self.scan_job.options is not None: forks = self.scan_job.options.max_concurrency extra_vars = self.scan_job.options.get_extra_vars() else: forks = ScanOptions.get_default_forks() extra_vars = ScanOptions.get_default_extra_vars() if extra_vars.get(ScanOptions.EXT_PRODUCT_SEARCH_DIRS) is None: extra_vars[ScanOptions.EXT_PRODUCT_SEARCH_DIRS] = \ ' '.join(DEFAULT_SCAN_DIRS) ssh_executable = os.path.abspath( os.path.join(os.path.dirname(__file__), '../../../bin/timeout_ssh')) base_ssh_executable = base_ssh_executable or 'ssh' ssh_timeout = ssh_timeout or DEFAULT_TIMEOUT ssh_args = ['--executable=' + base_ssh_executable, '--timeout=' + ssh_timeout] group_names, inventory = _construct_scan_inventory( connected, connection_port, forks, ssh_executable=ssh_executable, ssh_args=ssh_args) inventory_file = write_inventory(inventory) error_msg = '' log_message = 'START PROCESSING GROUPS with %d forks' \ ' and extra_vars=%s' % (forks, extra_vars) self.scan_task.log_message(log_message) scan_result = ScanTask.COMPLETED scan_message = 'success' for idx, group_name in enumerate(group_names): log_message = 'START PROCESSING GROUP %d of %d' % ( (idx + 1), len(group_names)) self.scan_task.log_message(log_message) callback =\ InspectResultCallback( scan_task=self.scan_task) playbook = {'name': 'scan systems for product fingerprint facts', 'hosts': group_name, 'gather_facts': False, 'roles': roles} result = run_playbook( inventory_file, callback, playbook, extra_vars, forks=forks) if result != TaskQueueManager.RUN_OK: new_error_msg = _construct_error_msg(result) callback.finalize_failed_hosts() if result != TaskQueueManager.RUN_UNREACHABLE_HOSTS and \ result != TaskQueueManager.RUN_FAILED_HOSTS: error_msg += '{}\n'.format(new_error_msg) if error_msg != '': raise AnsibleError(error_msg) return scan_message, scan_result
def _inspect_scan(self, manager_interrupt, connected, base_ssh_executable=None, ssh_timeout=None): """Execute the host scan with the initialized source. :param manager_interrupt: Signal used to communicate termination of scan :param connected: list of (host, credential) pairs to inspect :param roles: list of roles to execute :param base_ssh_executable: ssh executable, or None for 'ssh'. Will be wrapped with a timeout before being passed to Ansible. :param ssh_timeout: string in the format of the 'timeout' command. Timeout for individual tasks. :returns: An array of dictionaries of facts Note: base_ssh_executable & ssh_timeout are parameters that are only used for testing. """ # pylint: disable=too-many-locals,too-many-arguments # pylint: disable=too-many-branches,too-many-statements connection_port = self.scan_task.source.port if self.scan_task.source.options is not None: use_paramiko = self.scan_task.source.options.use_paramiko else: use_paramiko = False if self.scan_job.options is not None: forks = self.scan_job.options.max_concurrency extra_vars = self.scan_job.options.get_extra_vars() else: forks = ScanOptions.get_default_forks() extra_vars = ScanOptions.get_default_extra_vars() if extra_vars.get(ScanOptions.EXT_PRODUCT_SEARCH_DIRS) is None: extra_vars[ScanOptions.EXT_PRODUCT_SEARCH_DIRS] = \ ' '.join(DEFAULT_SCAN_DIRS) ssh_executable = os.path.abspath( os.path.join(os.path.dirname(__file__), '../../../bin/timeout_ssh')) base_ssh_executable = base_ssh_executable or 'ssh' ssh_timeout = ssh_timeout or settings.QPC_SSH_INSPECT_TIMEOUT # pylint: disable=line-too-long # the ssh arg is required for become-pass because # ansible checks for an exact string match of ssh # anywhere in the command array # See https://github.com/ansible/ansible/blob/stable-2.3/lib/ansible/plugins/connection/ssh.py#L490-L500 # noqa # timeout_ssh will remove the ssh argument before running the command ssh_args = [ '--executable=' + base_ssh_executable, '--timeout=' + ssh_timeout, 'ssh' ] group_names, inventory = _construct_scan_inventory( connected, connection_port, forks, ssh_executable=ssh_executable, ssh_args=ssh_args) inventory_file = write_to_yaml(inventory) error_msg = None log_message = 'START INSPECT PROCESSING GROUPS'\ ' with use_paramiko: %s, '\ '%d forks and extra_vars=%s' % (use_paramiko, forks, extra_vars) self.scan_task.log_message(log_message) scan_result = ScanTask.COMPLETED # Build Ansible Runner Dependencies for idx, group_name in enumerate(group_names): check_manager_interrupt(manager_interrupt.value) log_message = 'START INSPECT PROCESSING GROUP %d of %d' % ( (idx + 1), len(group_names)) self.scan_task.log_message(log_message) call = InspectResultCallback(self.scan_task, manager_interrupt) # Build Ansible Runner Parameters runner_settings = { 'idle_timeout': int(settings.NETWORK_INSPECT_JOB_TIMEOUT), 'job_timeout': int(settings.NETWORK_INSPECT_JOB_TIMEOUT), 'pexpect_timeout': 5 } playbook_path = os.path.join(settings.BASE_DIR, 'scanner/network/runner/inspect.yml') extra_vars['variable_host'] = group_name cmdline_list = [] vault_file_path = '--vault-password-file=%s' % ( settings.DJANGO_SECRET_PATH) cmdline_list.append(vault_file_path) forks_cmd = '--forks=%s' % (forks) cmdline_list.append(forks_cmd) if use_paramiko: cmdline_list.append('--connection=paramiko') all_commands = ' '.join(cmdline_list) if int(settings.ANSIBLE_LOG_LEVEL) == 0: quiet_bool = True verbosity_lvl = 0 else: quiet_bool = False verbosity_lvl = int(settings.ANSIBLE_LOG_LEVEL) try: runner_obj = ansible_runner.run( quiet=quiet_bool, settings=runner_settings, inventory=inventory_file, extravars=extra_vars, event_handler=call.event_callback, cancel_callback=call.cancel_callback, playbook=playbook_path, cmdline=all_commands, verbosity=verbosity_lvl) except Exception as error: error_msg = error raise AnsibleRunnerException(error_msg) final_status = runner_obj.status if final_status != 'successful': if final_status == 'canceled': interrupt = manager_interrupt.value if interrupt == ScanJob.JOB_TERMINATE_CANCEL: msg = log_messages.NETWORK_PLAYBOOK_STOPPED % ( 'INSPECT', 'canceled') else: msg = log_messages.NETWORK_PLAYBOOK_STOPPED % ( 'INSPECT', 'paused') self.scan_task.log_message(msg) check_manager_interrupt(interrupt) if final_status not in ['unreachable', 'failed']: if final_status == 'timeout': error_msg = log_messages.NETWORK_TIMEOUT_ERR else: error_msg = log_messages.NETWORK_UNKNOWN_ERR scan_result = ScanTask.FAILED call.finalize_failed_hosts() return error_msg, scan_result
def setUp(self): """Create test case setup.""" self.cred = Credential( name='cred1', username='******', password='******', ssh_keyfile='keyfile', become_method='sudo', become_user='******', become_password='******') self.cred.save() # Source with excluded hosts self.source = Source( name='source1', hosts='["1.2.3.4", "1.2.3.5"]', exclude_hosts='["1.2.3.5", "1.2.3.6"]', source_type='network', port=22) self.source.save() self.source.credentials.add(self.cred) self.source.save() self.scan_job, self.scan_task = create_scan_job( self.source, ScanTask.SCAN_TYPE_CONNECT) self.scan_task.update_stats('TEST NETWORK CONNECT.', sys_failed=0) # Source without excluded hosts self.source2 = Source( name='source2', hosts='["1.2.3.4"]', source_type='network', port=22) self.source2.save() self.source2.credentials.add(self.cred) self.source2.save() self.scan_job2, self.scan_task2 = create_scan_job( self.source2, ScanTask.SCAN_TYPE_CONNECT, 'source2',) self.scan_task2.update_stats('TEST NETWORK CONNECT.', sys_failed=0) # Scans with options & no excluded hosts source_options = SourceOptions(use_paramiko=True) source_options.save() self.source3 = Source( name='source3', hosts='["1.2.3.4","1.2.3.5","1.2.3.6"]', source_type='network', port=22, options=source_options) self.source3.save() self.source3.credentials.add(self.cred) self.source3.save() scan_options = ScanOptions(max_concurrency=2) scan_options.save() self.scan_job3, self.scan_task3 = create_scan_job( self.source3, ScanTask.SCAN_TYPE_CONNECT, 'source3', scan_options) self.scan_task3.update_stats('TEST NETWORK CONNECT.', sys_failed=0) self.concurrency = ScanOptions.get_default_forks()