def test_inspect_scan_error(self, mock_scan):
     """Test scan flow with mocked manager and failure."""
     mock_scan.side_effect = AnsibleRunnerException()
     scanner = InspectTaskRunner(self.scan_job, self.scan_task)
     scan_task_status = scanner.run(Value('i', ScanJob.JOB_RUN))
     mock_scan.assert_called_with(ANY, self.host_list)
     self.assertEqual(scan_task_status[1], ScanTask.FAILED)
 def test_connect_exception(self, mock_run):
     """Test pause of connect."""
     # Test cancel at run() level
     mock_run.side_effect = AnsibleRunnerException('fail')
     scanner = ConnectTaskRunner(self.scan_job3, self.scan_task3)
     _, scan_result = scanner.run(Value('i', ScanJob.JOB_RUN))
     self.assertEqual(scan_result, ScanTask.FAILED)
 def test_inspect_scan_failure(self, mock_run):
     """Test scan flow with mocked manager and failure."""
     mock_run.side_effect = AnsibleRunnerException()
     scanner = InspectTaskRunner(self.scan_job, self.scan_task)
     scanner.connect_scan_task = self.connect_scan_task
     with self.assertRaises(AnsibleRunnerException):
         scanner._inspect_scan(Value('i', ScanJob.JOB_RUN), self.host_list)
         mock_run.assert_called()
 def test_secret_file_fail(self, mock_run):
     """Test modifying the log level."""
     mock_run.side_effect = AnsibleRunnerException()
     serializer = SourceSerializer(self.source2)
     source = serializer.data
     hosts = source['hosts']
     connection_port = source['port']
     with self.assertRaises(AnsibleRunnerException):
         _connect(Value('i', ScanJob.JOB_RUN),
                  self.scan_task, hosts, Mock(), self.cred,
                  connection_port, self.concurrency)
         mock_run.assert_called()
 def test_connect_runner_error(self, mock_run):
     """Test connect flow with mocked manager."""
     mock_run.side_effect = AnsibleRunnerException('Fail')
     serializer = SourceSerializer(self.source2)
     source = serializer.data
     hosts = source['hosts']
     connection_port = source['port']
     with self.assertRaises(AnsibleRunnerException):
         _connect(Value('i', ScanJob.JOB_RUN), self.scan_task,
                  hosts, Mock(), self.cred, connection_port,
                  self.concurrency)
         mock_run.assert_called()
示例#6
0
    def event_callback(self, event_dict=None):
        """Control the event callback for Ansible Runner."""
        try:
            okay = ['runner_on_ok', 'runner_item_on_ok']
            failed = ['runner_on_failed', 'runner_item_on_failed']
            unreachable = ['runner_on_unreachable']
            runner_ignore = ['runner_on_skipped',
                             'runner_item_on_skipped']
            event = event_dict.get('event')
            event_data = event_dict.get('event_data')

            ignore_states = ['runner_on_start']
            if event in ignore_states:
                return

            if event_dict:
                # Check if it is a task event
                if 'runner' in event:
                    if event in okay:
                        self.task_on_ok(event_dict)
                    elif event in failed:
                        self.task_on_failed(event_dict)
                    elif event in unreachable:
                        self.task_on_unreachable(event_dict)
                    else:
                        if event not in runner_ignore:
                            self.scan_task.log_message(
                                log_messages.TASK_UNEXPECTED_FAILURE % (
                                    'event_callback', event,
                                    event_dict),
                                log_level=logging.ERROR)
                # Save last role for task logging later
                if event == 'playbook_on_task_start':
                    if event_data:
                        event_role = event_data.get('role')
                        if event_role != self.last_role:
                            self.last_role = event_role
        except Exception as err_msg:
            raise AnsibleRunnerException(err_msg)
示例#7
0
def role_manager(args):
    if args.role:
        role = {'name': args.role}
        if args.role_vars:
            role_vars = {}
            for item in shlex.split(args.role_vars):
                key, value = item.split('=')
                role_vars[key] = value
            role['vars'] = role_vars

        kwargs = Bunch(**args.__dict__)
        kwargs.update(private_data_dir=args.private_data_dir,
                      json_mode=args.json,
                      ignore_logging=False,
                      rotate_artifacts=args.rotate_artifacts)
        if args.artifact_dir:
            kwargs.artifact_dir = args.artifact_dir

        project_path = os.path.join(args.private_data_dir, 'project')
        project_exists = os.path.exists(project_path)

        env_path = os.path.join(args.private_data_dir, 'env')
        env_exists = os.path.exists(env_path)

        envvars_path = os.path.join(args.private_data_dir, 'env/envvars')
        envvars_exists = os.path.exists(envvars_path)

        if args.cmdline:
            kwargs.cmdline = args.cmdline

        playbook = None
        tmpvars = None

        play = [{'hosts': args.hosts if args.hosts is not None else "all",
                 'gather_facts': not args.role_skip_facts,
                 'roles': [role]}]

        filename = str(uuid4().hex)

        playbook = dump_artifact(json.dumps(play), project_path, filename)
        kwargs.playbook = playbook
        output.debug('using playbook file %s' % playbook)

        if args.inventory:
            inventory_file = os.path.join(args.private_data_dir, 'inventory', args.inventory)
            if not os.path.exists(inventory_file):
                raise AnsibleRunnerException('location specified by --inventory does not exist')
            kwargs.inventory = inventory_file
            output.debug('using inventory file %s' % inventory_file)

        roles_path = args.roles_path or os.path.join(args.private_data_dir, 'roles')
        roles_path = os.path.abspath(roles_path)
        output.debug('setting ANSIBLE_ROLES_PATH to %s' % roles_path)

        envvars = {}
        if envvars_exists:
            with open(envvars_path, 'rb') as f:
                tmpvars = f.read()
                envvars = safe_load(tmpvars)

        envvars['ANSIBLE_ROLES_PATH'] = roles_path
        kwargs.envvars = envvars
    else:
        kwargs = args

    yield kwargs

    if args.role:
        if not project_exists and os.path.exists(project_path):
            logger.debug('removing dynamically generated project folder')
            shutil.rmtree(project_path)
        elif playbook and os.path.isfile(playbook):
            logger.debug('removing dynamically generated playbook')
            os.remove(playbook)

        # if a previous envvars existed in the private_data_dir,
        # restore the original file contents
        if tmpvars:
            with open(envvars_path, 'wb') as f:
                f.write(tmpvars)
        elif not envvars_exists and os.path.exists(envvars_path):
            logger.debug('removing dynamically generated envvars folder')
            os.remove(envvars_path)

        # since ansible-runner created the env folder, remove it
        if not env_exists and os.path.exists(env_path):
            logger.debug('removing dynamically generated env folder')
            shutil.rmtree(env_path)
示例#8
0
def main():
    parser = argparse.ArgumentParser(description='manage ansible execution')

    parser.add_argument('--version', action='version', version=VERSION)

    parser.add_argument('command', choices=['run', 'start', 'stop', 'is-alive'])

    parser.add_argument('private_data_dir',
                        help='Base directory containing Runner metadata (project, inventory, etc')

    group = parser.add_mutually_exclusive_group()

    group.add_argument("-p", "--playbook", default=DEFAULT_RUNNER_PLAYBOOK,
                       help="The name of the playbook to execute")

    group.add_argument("-r", "--role", default=DEFAULT_RUNNER_ROLE,
                       help="Invoke an Ansible role directly without a playbook")

    parser.add_argument("--hosts",
                        help="Define the set of hosts to execute against")

    parser.add_argument("-i", "--ident",
                        default=uuid4(),
                        help="An identifier that will be used when generating the"
                             "artifacts directory and can be used to uniquely identify a playbook run")

    parser.add_argument("--roles-path", default=DEFAULT_ROLES_PATH,
                        help="Path to the Ansible roles directory")

    parser.add_argument("--role-vars",
                        help="Variables to pass to the role at runtime")

    parser.add_argument("--role-skip-facts", action="store_true", default=False,
                        help="Disable fact collection when executing a role directly")
    parser.add_argument("--artifact-dir",
                        help="Optional Path for the artifact root directory, by default it is located inside the private data dir")

    parser.add_argument("--inventory",
                        help="Override the default inventory location in private_data_dir")

    parser.add_argument("-j", "--json", action="store_true",
                        help="Output the json event structure to stdout instead of Ansible output")

    parser.add_argument("-v", action="count",
                        help="Increase the verbosity with multiple v's (up to 5) of the ansible-playbook output")

    parser.add_argument("-q", "--quiet", action="store_true",
                        help="Disable all output")

    parser.add_argument("--cmdline",
                        help="Command line options to pass to ansible-playbook at execution time")
    parser.add_argument("--debug", action="store_true",
                        help="Enable debug output logging")

    parser.add_argument("--logfile",
                        help="Log output messages to a file")

    args = parser.parse_args()

    output.configure()

    # enable or disable debug mode
    output.set_debug('enable' if args.debug else 'disable')

    # set the output logfile
    if args.logfile:
        output.set_logfile(args.logfile)

    output.debug('starting debug logging')

    pidfile = os.path.join(args.private_data_dir, 'pid')

    try:
        os.makedirs(args.private_data_dir)
    except OSError as exc:
        if exc.errno == errno.EEXIST and os.path.isdir(args.private_data_dir):
            pass
        else:
            raise

    if args.command != 'run':
        stderr_path = os.path.join(args.private_data_dir, 'daemon.log')
        if not os.path.exists(stderr_path):
            os.close(os.open(stderr_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))
        stderr = open(stderr_path, 'w+')

    if args.command in ('start', 'run'):
        if args.role:
            role = {'name': args.role}
            if args.role_vars:
                role_vars = {}
                for item in shlex.split(args.role_vars):
                    key, value = item.split('=')
                    role_vars[key] = value
                role['vars'] = role_vars

            kwargs = dict(private_data_dir=args.private_data_dir,
                          json_mode=args.json)
            if args.artifact_dir:
                kwargs['artifact_dir'] = args.artifact_dir

            project_path = os.path.abspath(os.path.join(args.private_data_dir, 'project'))
            project_exists = os.path.exists(project_path)

            env_path = os.path.join(args.private_data_dir, 'env')
            env_exists = os.path.exists(env_path)

            envvars_path = os.path.join(args.private_data_dir, 'env/envvars')
            envvars_exists = os.path.exists(envvars_path)

            if args.cmdline:
                kwargs['cmdline'] = args.cmdline

            playbook = None
            tmpvars = None

            rc = 255
            errmsg = None

            try:
                play = [{'hosts': args.hosts if args.hosts is not None else "all",
                         'gather_facts': not args.role_skip_facts,
                         'roles': [role]}]

                filename = str(uuid4().hex)

                playbook = dump_artifact(json.dumps(play), project_path, filename)
                kwargs['playbook'] = playbook
                output.debug('using playbook file %s' % playbook)

                if args.inventory:
                    inventory_file = os.path.abspath(os.path.join(args.private_data_dir, 'inventory', args.inventory))
                    if not os.path.exists(inventory_file):
                        raise AnsibleRunnerException('location specified by --inventory does not exist')
                    kwargs['inventory'] = inventory_file
                    output.debug('using inventory file %s' % inventory_file)

                roles_path = args.roles_path or os.path.join(args.private_data_dir, 'roles')
                roles_path = os.path.abspath(roles_path)
                output.debug('setting ANSIBLE_ROLES_PATH to %s' % roles_path)

                envvars = {}
                if envvars_exists:
                    with open(envvars_path, 'rb') as f:
                        tmpvars = f.read()
                        envvars = safe_load(tmpvars)

                envvars['ANSIBLE_ROLES_PATH'] = roles_path
                kwargs['envvars'] = envvars

                res = run(**kwargs)
                rc = res.rc

            except AnsibleRunnerException as exc:
                errmsg = str(exc)

            finally:
                if not project_exists and os.path.exists(project_path):
                    logger.debug('removing dynamically generated project folder')
                    shutil.rmtree(project_path)
                elif playbook and os.path.isfile(playbook):
                    logger.debug('removing dynamically generated playbook')
                    os.remove(playbook)

                # if a previous envvars existed in the private_data_dir,
                # restore the original file contents
                if tmpvars:
                    with open(envvars_path, 'wb') as f:
                        f.write(tmpvars)
                elif not envvars_exists and os.path.exists(envvars_path):
                    logger.debug('removing dynamically generated envvars folder')
                    os.remove(envvars_path)

                # since ansible-runner created the env folder, remove it
                if not env_exists and os.path.exists(env_path):
                    logger.debug('removing dynamically generated env folder')
                    shutil.rmtree(env_path)

            if errmsg:
                print('ansible-runner: ERROR: %s' % errmsg)

            sys.exit(rc)

        elif args.command == 'start':
            import daemon
            from daemon.pidfile import TimeoutPIDLockFile
            context = daemon.DaemonContext(
                pidfile=TimeoutPIDLockFile(pidfile),
                stderr=stderr
            )
        else:
            context = threading.Lock()

        with context:
            run_options = dict(private_data_dir=args.private_data_dir,
                               ident=args.ident,
                               playbook=args.playbook,
                               verbosity=args.v,
                               quiet=args.quiet,
                               json_mode=args.json)

            if args.hosts is not None:
                run_options.update(inventory=args.hosts)

            if args.cmdline:
                run_options['cmdline'] = args.cmdline

            run(**run_options)
            sys.exit(0)

    try:
        with open(pidfile, 'r') as f:
            pid = int(f.readline())
    except IOError:
        sys.exit(1)

    if args.command == 'stop':
        try:
            with open(os.path.join(args.private_data_dir, 'args'), 'r') as args:
                Runner.handle_termination(pid, json.load(args), 'bwrap')
        except IOError:
            Runner.handle_termination(pid, [], 'bwrap')

    elif args.command == 'is-alive':
        try:
            os.kill(pid, signal.SIG_DFL)
            sys.exit(0)
        except OSError:
            sys.exit(1)
示例#9
0
    def _inspect_scan(self,
                      manager_interrupt,
                      connected,
                      base_ssh_executable=None,
                      ssh_timeout=None):
        """Execute the host scan with the initialized source.

        :param manager_interrupt: Signal used to communicate termination
            of scan
        :param connected: list of (host, credential) pairs to inspect
        :param roles: list of roles to execute
        :param base_ssh_executable: ssh executable, or None for
            'ssh'. Will be wrapped with a timeout before being passed
            to Ansible.
        :param ssh_timeout: string in the format of the 'timeout'
            command. Timeout for individual tasks.
        :returns: An array of dictionaries of facts

        Note: base_ssh_executable & ssh_timeout are parameters that
        are only used for testing.
        """
        # pylint: disable=too-many-locals,too-many-arguments
        # pylint: disable=too-many-branches,too-many-statements
        connection_port = self.scan_task.source.port

        if self.scan_task.source.options is not None:
            use_paramiko = self.scan_task.source.options.use_paramiko
        else:
            use_paramiko = False

        if self.scan_job.options is not None:
            forks = self.scan_job.options.max_concurrency
            extra_vars = self.scan_job.options.get_extra_vars()
        else:
            forks = ScanOptions.get_default_forks()
            extra_vars = ScanOptions.get_default_extra_vars()

        if extra_vars.get(ScanOptions.EXT_PRODUCT_SEARCH_DIRS) is None:
            extra_vars[ScanOptions.EXT_PRODUCT_SEARCH_DIRS] = \
                ' '.join(DEFAULT_SCAN_DIRS)

        ssh_executable = os.path.abspath(
            os.path.join(os.path.dirname(__file__),
                         '../../../bin/timeout_ssh'))

        base_ssh_executable = base_ssh_executable or 'ssh'
        ssh_timeout = ssh_timeout or settings.QPC_SSH_INSPECT_TIMEOUT
        # pylint: disable=line-too-long
        # the ssh arg is required for become-pass because
        # ansible checks for an exact string match of ssh
        # anywhere in the command array
        # See https://github.com/ansible/ansible/blob/stable-2.3/lib/ansible/plugins/connection/ssh.py#L490-L500 # noqa
        # timeout_ssh will remove the ssh argument before running the command
        ssh_args = [
            '--executable=' + base_ssh_executable, '--timeout=' + ssh_timeout,
            'ssh'
        ]

        group_names, inventory = _construct_scan_inventory(
            connected,
            connection_port,
            forks,
            ssh_executable=ssh_executable,
            ssh_args=ssh_args)
        inventory_file = write_to_yaml(inventory)

        error_msg = None
        log_message = 'START INSPECT PROCESSING GROUPS'\
            ' with use_paramiko: %s, '\
            '%d forks and extra_vars=%s' % (use_paramiko,
                                            forks,
                                            extra_vars)
        self.scan_task.log_message(log_message)
        scan_result = ScanTask.COMPLETED

        # Build Ansible Runner Dependencies
        for idx, group_name in enumerate(group_names):
            check_manager_interrupt(manager_interrupt.value)
            log_message = 'START INSPECT PROCESSING GROUP %d of %d' % (
                (idx + 1), len(group_names))
            self.scan_task.log_message(log_message)
            call = InspectResultCallback(self.scan_task, manager_interrupt)

            # Build Ansible Runner Parameters
            runner_settings = {
                'idle_timeout': int(settings.NETWORK_INSPECT_JOB_TIMEOUT),
                'job_timeout': int(settings.NETWORK_INSPECT_JOB_TIMEOUT),
                'pexpect_timeout': 5
            }
            playbook_path = os.path.join(settings.BASE_DIR,
                                         'scanner/network/runner/inspect.yml')
            extra_vars['variable_host'] = group_name
            cmdline_list = []
            vault_file_path = '--vault-password-file=%s' % (
                settings.DJANGO_SECRET_PATH)
            cmdline_list.append(vault_file_path)
            forks_cmd = '--forks=%s' % (forks)
            cmdline_list.append(forks_cmd)
            if use_paramiko:
                cmdline_list.append('--connection=paramiko')
            all_commands = ' '.join(cmdline_list)

            if int(settings.ANSIBLE_LOG_LEVEL) == 0:
                quiet_bool = True
                verbosity_lvl = 0
            else:
                quiet_bool = False
                verbosity_lvl = int(settings.ANSIBLE_LOG_LEVEL)

            try:
                runner_obj = ansible_runner.run(
                    quiet=quiet_bool,
                    settings=runner_settings,
                    inventory=inventory_file,
                    extravars=extra_vars,
                    event_handler=call.event_callback,
                    cancel_callback=call.cancel_callback,
                    playbook=playbook_path,
                    cmdline=all_commands,
                    verbosity=verbosity_lvl)
            except Exception as error:
                error_msg = error
                raise AnsibleRunnerException(error_msg)

            final_status = runner_obj.status
            if final_status != 'successful':
                if final_status == 'canceled':
                    interrupt = manager_interrupt.value
                    if interrupt == ScanJob.JOB_TERMINATE_CANCEL:
                        msg = log_messages.NETWORK_PLAYBOOK_STOPPED % (
                            'INSPECT', 'canceled')
                    else:
                        msg = log_messages.NETWORK_PLAYBOOK_STOPPED % (
                            'INSPECT', 'paused')
                    self.scan_task.log_message(msg)
                    check_manager_interrupt(interrupt)
                if final_status not in ['unreachable', 'failed']:
                    if final_status == 'timeout':
                        error_msg = log_messages.NETWORK_TIMEOUT_ERR
                    else:
                        error_msg = log_messages.NETWORK_UNKNOWN_ERR
                    scan_result = ScanTask.FAILED
                call.finalize_failed_hosts()
        return error_msg, scan_result
示例#10
0
def _connect(manager_interrupt,
             scan_task,
             hosts,
             result_store,
             credential,
             connection_port,
             forks,
             use_paramiko=False,
             exclude_hosts=None,
             base_ssh_executable=None,
             ssh_timeout=None):
    """Attempt to connect to hosts using the given credential.

    :param manager_interrupt: Signal used to communicate termination of scan
    :param scan_task: The scan task for this connection job
    :param hosts: The collection of hosts to test connections
    :param result_store: The result store to accept the results.
    :param credential: The credential used for connections
    :param connection_port: The connection port
    :param use_paramiko: use paramiko instead of ssh for connection
    :param forks: number of forks to run with
    :param exclude_hosts: Optional. Hosts to exclude from test connections
    :param base_ssh_executable: ssh executable, or None for
            'ssh'. Will be wrapped with a timeout before being passed
            to Ansible.
        :param ssh_timeout: string in the format of the 'timeout'
            command. Timeout for individual tasks.
    :returns: list of connected hosts credential tuples and
            list of host that failed connection
    """
    cred_data = CredentialSerializer(credential).data

    ssh_executable = os.path.abspath(
        os.path.join(os.path.dirname(__file__), '../../../bin/timeout_ssh'))

    base_ssh_executable = base_ssh_executable or 'ssh'
    ssh_timeout = ssh_timeout or settings.QPC_SSH_CONNECT_TIMEOUT

    # pylint: disable=line-too-long
    # the ssh arg is required for become-pass because
    # ansible checks for an exact string match of ssh
    # anywhere in the command array
    # See https://github.com/ansible/ansible/blob/stable-2.3/lib/ansible/plugins/connection/ssh.py#L490-L500 # noqa
    # timeout_ssh will remove the ssh argument before running the command
    ssh_args = [
        '--executable=' + base_ssh_executable, '--timeout=' + ssh_timeout,
        'ssh'
    ]
    group_names, inventory = _construct_connect_inventory(
        hosts, cred_data, connection_port, forks, exclude_hosts,
        ssh_executable, ssh_args)
    inventory_file = write_to_yaml(inventory)
    _handle_ssh_passphrase(cred_data)

    log_message = 'START CONNECT PROCESSING GROUPS'\
        ' with use_paramiko: %s and %d forks' % (use_paramiko, forks)
    scan_task.log_message(log_message)
    for idx, group_name in enumerate(group_names):
        check_manager_interrupt(manager_interrupt.value)
        group_ips = inventory.get('all').get('children').get(group_name).get(
            'hosts').keys()
        group_ips = ["'%s'" % ip for ip in group_ips]
        group_ip_string = ', '.join(group_ips)
        log_message = 'START CONNECT PROCESSING GROUP %d of %d. '\
            'About to connect to hosts [%s]' % (
                (idx + 1), len(group_names), group_ip_string)
        scan_task.log_message(log_message)
        call = ConnectResultCallback(result_store, credential,
                                     scan_task.source, manager_interrupt)

        # Create parameters for ansible runner
        runner_settings = {
            'job_timeout': int(settings.NETWORK_CONNECT_JOB_TIMEOUT)
        }
        extra_vars_dict = {'variable_host': group_name}
        playbook_path = os.path.join(settings.BASE_DIR,
                                     'scanner/network/runner/connect.yml')
        cmdline_list = []
        vault_file_path = '--vault-password-file=%s' % (
            settings.DJANGO_SECRET_PATH)
        cmdline_list.append(vault_file_path)
        forks_cmd = '--forks=%s' % (forks)
        cmdline_list.append(forks_cmd)
        if use_paramiko:
            cmdline_list.append('--connection=paramiko')  # paramiko conn
        all_commands = ' '.join(cmdline_list)
        if int(settings.ANSIBLE_LOG_LEVEL) == 0:
            quiet_bool = True
            verbosity_lvl = 0
        else:
            quiet_bool = False
            verbosity_lvl = int(settings.ANSIBLE_LOG_LEVEL)
        try:
            runner_obj = ansible_runner.run(
                quiet=quiet_bool,
                settings=runner_settings,
                inventory=inventory_file,
                extravars=extra_vars_dict,
                event_handler=call.event_callback,
                cancel_callback=call.cancel_callback,
                playbook=playbook_path,
                cmdline=all_commands,
                verbosity=verbosity_lvl)
        except Exception as err_msg:
            raise AnsibleRunnerException(err_msg)

        final_status = runner_obj.status
        if final_status != 'successful':
            if final_status == 'canceled':
                if manager_interrupt.value == ScanJob.JOB_TERMINATE_CANCEL:
                    msg = log_messages.NETWORK_PLAYBOOK_STOPPED % ('CONNECT',
                                                                   'canceled')
                    return msg, scan_task.CANCELED
                msg = log_messages.NETWORK_PLAYBOOK_STOPPED % ('CONNECT',
                                                               'paused')
                return msg, scan_task.PAUSED
            if final_status not in ['unreachable', 'failed', 'canceled']:
                if final_status == 'timeout':
                    error = log_messages.NETWORK_TIMEOUT_ERR
                else:
                    error = log_messages.NETWORK_UNKNOWN_ERR
                if scan_task.systems_scanned:
                    msg = log_messages.NETWORK_CONNECT_CONTINUE % (
                        final_status, str(scan_task.systems_scanned), error)
                    scan_task.log_message(msg, log_level=logging.ERROR)
                else:
                    msg = log_messages.NETWORK_CONNECT_FAIL % (final_status,
                                                               error)
                    return msg, scan_task.FAILED
    return None, scan_task.COMPLETED