Beispiel #1
0
def setup_logger():
    """Setup our logging instance.
    Returns:
        A logger for writing to two seperate files depending on error or debug messages
    """
    logger = logging.getLogger()
    logger.setLevel(logging.NOTSET)
    formatter = logging.Formatter(
            '%(asctime)s {%(pathname)s:%(lineno)d}: \t%(message)s')

    handler = logging.FileHandler(
        filename=join_abs(
            ROOT_DIR,
            "logs",
            "drrobot.err"))
    handler.setLevel(logging.ERROR)
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    handler = logging.FileHandler(
        filename=join_abs(
            ROOT_DIR,
            "logs",
            "drrobot.dbg"))
    handler.setLevel(logging.DEBUG)
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    logging.getLogger("urllib3.connectionpool").disabled = True

    return logger
Beispiel #2
0
def create_dirs(parser):
    """Create directories for domain under output folder
    """
    args = parser.parse_args()
    if getattr(args, 'domain', None):
        if not path.exists(
                        join_abs(ROOT_DIR, "output", getattr(args, 'domain'))):
            makedirs(
                join_abs(ROOT_DIR, "output", getattr(args, 'domain')))

        if not path.exists(
                        join_abs(ROOT_DIR, "output", getattr(args, 'domain'), "aggregated")):
            makedirs(
                join_abs(ROOT_DIR, "output", getattr(args, 'domain'), "aggregated"))
Beispiel #3
0
    def generate_output(self, _format, output_file):
        """Dumps contents of sqlite3 file into an alternative text format

        Args:
            _format:        format of output file [xml, json]
            output_file:    (Optional) filename to dump contents too

        Returns:
            (None)
        """
        if not output_file:
            output_file = join_abs(self.OUTPUT_DIR, f"output.{_format}")
        file_index = self.aggregation.gen_output()
        if 'json' in _format:
            print("Generating JSON")
            try:
                """
                need to dump json file here, error checking as well
                """
                with open(output_file, 'w') as _file:
                    json.dump(file_index, _file, indent="\t")
            except json.JSONDecodeError as error:
                self._print(str(error))
        elif "xml" in _format:
            try:
                with open(output_file, 'w') as _file:
                    xmlout = dicttoxml.dicttoxml(file_index)
                    dom = parseString(xmlout)
                    _file.write(dom.toprettyxml())
            except TypeError:
                self._print("Error in generate_output check logs")
                LOG.exception("Error in generate output")
            except AttributeError:
                self._print("Error in generate_output check logs")
                LOG.exception("Error in generate output")
Beispiel #4
0
def start_dumpdb(drrobot, parser):
    """Dump database to output folder for given domain

    Generates all header text files and aggregated files under
    $HOME/.drrobot/output/<domain>/aggregated
    """
    args = parser.parse_args()
    dbpath = getattr(args, "dbfile")

    if path.exists(dbpath):
        if not path.exists(
                        join_abs(ROOT_DIR, "output", getattr(args, 'domain'), "headers")):
            makedirs(join_abs(ROOT_DIR, "output", getattr(args, 'domain'), "headers"))
        drrobot.dumpdb()
    else:
        print("[!] DB file does not exists, try running gather first")
Beispiel #5
0
    def __init__(self, **kwargs):
        """Initialize Robot object.

        Args:
            dns (str): Added DNS for host configuration
            proxy (str): Proxy url of format "http://proxy.foo.bar:port
            domain (str): Target domain
            root_dir (str): Base directory containing config.json and template folders
            verbose (bool): verbose output on/off
            dbfile (str): Alternative database file to use

        Returns:
            None
        """
        self.domain = kwargs.get("domain", None)
        self.ROOT_DIR = kwargs.get("root_dir")
        if self.domain:
            self.OUTPUT_DIR = join_abs(self.ROOT_DIR, "output", self.domain)
            self.aggregation = Aggregation(kwargs.get("dbfile"), self.domain,
                                           self.OUTPUT_DIR)
        self.dns = kwargs.get("dns", None)
        self.proxy = kwargs.get("proxy", None)
        self.verbose = kwargs.get("verbose", False)
        self.dbfile = kwargs.get("dbfile")

        # Disable warnings for insecure requests
        requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
Beispiel #6
0
    def upload(self, **kwargs):
        """File upload

        Args:
            filepath (str): optional filepath to check for files to upload
        Returns:

        """
        print("[*] doing post message")
        try:
            self.inst.login()

            team_id = self.inst.teams.get_team_by_name(self.team_name)['id']
            channel_id = self.inst.channels.get_channel_by_name(
                channel_name=self.channel_name, team_id=team_id)['id']
        except exceptions.NoAccessTokenProvided as er:
            print(f"[!] NoAccessTokenProvided {er}")
            LOG.exception()
        except exceptions.InvalidOrMissingParameters as er:
            print(f"[!] InvalidOrMissingParameters {er}")
            LOG.exception()

        try:
            if isfile(self.filepath):
                file_ids = [
                    self.inst.files.upload_file(
                        channel_id=channel_id,
                        files={
                            'files': (basename(self.filepath),
                                      open(join_abs(self.filepath), 'rb'))
                        })['file_infos'][0]['id']
                ]

                self.inst.posts.create_post(
                    options={
                        'channel_id': channel_id,
                        'message': f"Recon Data {datetime.datetime.now()}",
                        'file_ids': file_ids
                    })

            elif isdir(self.filepath):
                file_location = abspath(self.filepath)

                self._upload_files(file_location, channel_id)

        except exceptions.ContentTooLarge:
            print(f"[!] ContentTooLarge in upload")
            LOG.exception()
        except exceptions.ResourceNotFound:
            print(f"[!] ResourceNotFound in upload")
            LOG.exception()
        except OSError:
            print(f"[!] File not found in upload")
            LOG.exception()
Beispiel #7
0
 def _upload_files(self, file_location, channel_id):
     file_ids = []
     for root, dirs, files in walk(file_location):
         for filename in files:
             # TODO add optional parameters for adjusting size. Implement
             # file splitting
             print(f"[...] Uploading {filename}")
             if stat(join_abs(root, filename)).st_size / 1024**2 > 49:
                 print(f"[!]\tFile {filename} is to big, ignoring for now")
                 continue
             else:
                 file_ids += [
                     self.inst.files.upload_file(
                         channel_id=channel_id,
                         files={
                             'files':
                             (filename, open(join_abs(root, filename),
                                             'rb'))
                         })['file_infos'][0]['id']
                 ]
                 if len(file_ids) >= 5:
                     self.inst.posts.create_post(
                         options={
                             'channel_id': channel_id,
                             'message':
                             f"Recon Data {datetime.datetime.now()}",
                             'file_ids': file_ids
                         })
                     file_ids = []
     if len(file_ids) > 0:
         self.inst.posts.create_post(
             options={
                 'channel_id': channel_id,
                 'message': f"Recon Data {datetime.datetime.now()}",
                 'file_ids': file_ids
             })
Beispiel #8
0
    def _get_files(self, file_location, max_filesize=50):
        """Generator to grab individual files for upload

        Args:
            file_location (str): Location of file(s) to upload
            max_filesize (int): Max allowed file size, in megabytes

        Returns:
            (Tuple) (filename, path to file)
        """
        if not exists(file_location):
            return None

        for root, _, files in walk(file_location):
            for filename in files:
                # TODO add optional parameters for adjusting size. Implement
                # file splitting
                print(f"[...] Uploading {filename}")
                if stat(join_abs(root, filename)).st_size / \
                        1024 ** 2 > max_filesize:
                    print(f"[!]\tFile {filename} is to big, ignoring for now")
                    continue
                else:
                    yield (filename, join_abs(root, filename))
Beispiel #9
0
    def __init__(self, **kwargs):
        """
        Build Ansible object

        Args:

            **kwargs: {
                "ansible_arguments" : {
                    "config" : "$config/httpscreenshot_play.yml",
                    "flags": "-e '$extra' -i configs/ansible_inventory",
                    "extra_flags":{
                        "1" : "variable_host=localhost",
                        "2" : "variable_user=user", 
                        "3" : "infile=$infile",
                        "4" : "outfile=$outfile/httpscreenshots.tar",
                        "5" : "outfolder=$outfile/httpscreenshots"
                    }
                },
                "ansible_file_location" : "location",
                "verbose" : True,
                "domain" : "target.domain"
            }

        Returns:

        """
        self.ansible_base = "ansible-playbook $flags $config"
        self.ansible_arguments = kwargs.get('ansible_arguments')
        self.ansible_file = kwargs.get('ansible_file_location', None)
        if not self.ansible_file:
            raise TypeError(
                "argument ansible_file must be of type string, not 'NoneType'")

        self.domain = kwargs.get('domain', None)
        if not self.domain:
            raise TypeError(
                "argument domain must be of type string, not 'NoneType'")
        self.output_dir = kwargs.get('output_dir')
        self.infile = kwargs.get('infile', None)
        if not self.infile:
            self.infile = join_abs(self.output_dir, "aggregated",
                                   "aggregated_protocol_hostnames.txt")

        self.verbose = kwargs.get('verbose', False)
        self.final_command = None
Beispiel #10
0
    def rebuild(self, **kwargs):
        """Rebuilds sqlite3 database by parsing output files found under HOME directory

        Args:
            files (List): list of files to include in this rebuild.

        Returns:

        """
        print("[*] Rebuilding DB")
        filenames = kwargs.get("files", None)
        output_files = []
        output_files += [f for f in filenames if isfile(f)]
        for root, dirs, files in walk(self.OUTPUT_DIR, topdown=True):
            dirs = [d for d in filenames if isdir(d)]
            for _file in files:
                output_files += [join_abs(root, _file)]
        self.aggregation.aggregate(output_files=output_files)
        if kwargs.get("headers", False):
            self.aggregation.headers()
        print("[*] Rebuilding complete")
Beispiel #11
0
    def gather(self, **kwargs):
        """Starts domain reconnaisance of target domain using the supplied tools

        Args:
            webtools (Dict): webtool dict
            scanners_dockers (Dict): scanners that use docker as their base
            scanners_ansible (Dict): scanners that use ansible as their base.
            headers (Boolean): if headers should be gathered

        Returns:

        """
        _threads = []

        output_folders = []
        output_files = []

        webtools = kwargs.get('webtools', {})

        output_files += [v['output_file'] for _, v in webtools.items()]

        if webtools:
            _threads += self._run_webtools(webtools)

        scanners_dockers = kwargs.get('scanners_dockers', {})

        output_folders += [
            v.get('output_folder') for _, v in scanners_dockers.items()
            if v.get("output_folder")
        ]
        output_files += [
            v.get('output_file') for _, v in scanners_dockers.items()
            if v.get('output_file')
        ]

        scanners_ansible = kwargs.get('scanners_ansible', {})

        output_folders += [
            v.get('output_folder', None) for _, v in scanners_ansible.items()
            if v.get("output_folder")
        ]
        output_files += [
            v.get('output_file', None) for _, v in scanners_ansible.items()
            if v.get("output_file")
        ]

        for folder in output_folders:
            if not exists(join_abs(self.OUTPUT_DIR, folder)):
                makedirs(join_abs(self.OUTPUT_DIR, folder))

        if scanners_dockers:
            scanner_threads, scanners = self._run_dockers(scanners_dockers)
            _threads += scanner_threads

        if scanners_ansible:
            _threads += self._run_ansible(scanners_ansible, None)

        if _threads:
            try:
                [thread.join() for thread in _threads if thread]
            except KeyboardInterrupt:
                self._print("Keyboard Interrupt sending kill signal to docker")
                try:
                    _ = [scanner.kill() for scanner in scanners]
                except:
                    pass
                raise KeyboardInterrupt

        verify = kwargs.get('verify', None)

        if verify and webtools:
            for k in webtools:
                if verify.lower() in k.lower():
                    verify = webtools[k].get('output_folder', None)
                    if not verify:
                        verify = webtools[k].get('output_file', None)
                    break
        if verify:
            print(f"[*] Omit addresses gathered from web tool: {verify}")

        self.aggregation.aggregate(output_folders=output_folders,
                                   output_files=output_files)

        self.aggregation.dump_to_file()

        if kwargs.get("headers", False):
            self.aggregation.headers()
        print("[*] Gather complete")
Beispiel #12
0
def run():
    """Main method for running Dr.ROBOT.

    Returns:
        Nothing.
    """
    try:
        if not path.exists(join_abs(ROOT_DIR, "logs")):
            makedirs(join_abs(ROOT_DIR, "logs"))

        log = setup_logger()

        tools = load_config(get_config())

        parser = parse_args(**tools, root_dir=ROOT_DIR)

        if len(sys.argv) <= 1:
            parser.print_help()
            sys.exit(1)

        args = parser.parse_args()

        log.debug(args)

        tool_check()

        drrobot = Robot(root_dir=ROOT_DIR,
                        user_config=get_config(),
                        **tools,
                        dns=getattr(args, 'dns', None),
                        proxy=getattr(args, 'proxy', None),
                        domain=getattr(args, 'domain', None),
                        verbose=getattr(args, 'verbose'),
                        dbfile=getattr(args, 'dbfile'),
                        verify=getattr(args, 'verify', None))

        if not path.exists(join_abs(ROOT_DIR, "dbs")):
            makedirs(join_abs(ROOT_DIR, "dbs"))

        create_dirs(parser)
        log.debug(f"Dumping tools for run :{tools}")

        if args.actions in "gather":
            start_gather(drrobot, tools, parser)

        if args.actions in 'inspect':
            start_inspect(drrobot, tools, parser)

        if args.actions in "upload":
            start_upload(drrobot, tools, parser)

        if args.actions in "rebuild":
            start_rebuild(drrobot, tools, parser)

        if args.actions in "output":
            start_output(drrobot, parser)

        if args.actions in "dumpdb":
            start_dumpdb(drrobot, parser)

    except json.JSONDecodeError as error:
        print(f"[!] JSON load error, configuration file is bad.\n {error}")
        log.exception(error)
    except DatabaseError as error:
        print(f"[!] Something went wrong with SQLite\n {error}")
        log.exception(error)
    except KeyboardInterrupt:
        print("[!] KeyboardInterrup, exiting...")
    except OSError as error:
        log.exception(error)
        print(f"[!] OSError {error}")
    except TypeError as error:
        log.exception(error)
        print(f"[!] {error}")
Beispiel #13
0
    def dump_to_file(self,
                     dump_ips=True,
                     dump_hostnames=True,
                     dump_headers=False):
        """Dump database to file

        Dumps contents of database to three seperate files:
            1. File with protocol (http/https)
            2. File with hostname only
            3. File with ip only

        Args:
            dump_ips (bool): dump ips
            dump_hostnames (bool): dump hostnames to file
            dump_headers (bool): dump headers to output firectory under headers


        Returns:
        """
        dbconn = sqlite3.connect(self.dbfile)
        try:
            dbcurs = dbconn.cursor()

            ips = dbcurs.execute(f"""SELECT DISTINCT ip
                FROM data
                WHERE domain='{self.domain.replace('.', '_')}'
                AND ip IS NOT NULL""").fetchall()
            hostnames = dbcurs.execute(f"""SELECT DISTINCT hostname
                    FROM data
                    WHERE domain='{self.domain.replace('.', '_')}'
                    AND hostname IS NOT NULL""").fetchall()
            """
                Header options require there to have been a scan otherwise
                there will be no output but that should be expected.
                Might change db to a dataframe later... possible
            """
            headers = dbcurs.execute(
                f"""SELECT DISTINCT ip, hostname, http_headers, https_headers
                        FROM data
                        WHERE domain='{self.domain.replace('.', '_')}'
                        AND (http_headers IS NOT NULL
                        AND https_headers IS NOT NULL)""").fetchall()

            if dump_ips:
                with open(
                        join_abs(self.output_dir, 'aggregated',
                                 'aggregated_ips.txt'), 'w') as _file:
                    _file.writelines("\n".join(list(ip[0] for ip in ips)))

            if dump_hostnames:
                with open(
                        join_abs(self.output_dir, 'aggregated',
                                 'aggregated_hostnames.txt'), 'w') as _file:
                    _file.writelines("\n".join(
                        list(f"{host[0]}" for host in hostnames)))

                with open(
                        join_abs(self.output_dir, 'aggregated',
                                 'aggregated_protocol_hostnames.txt'),
                        'w') as _file:
                    _file.writelines("\n".join(
                        list(f"https://{host[0]}\nhttp://{host[0]}"
                             for host in hostnames)))

            if dump_headers:
                keys = ["Ip", "Hostname", "Http", "Https"]
                for row in headers:
                    _rows = dict(zip(keys, row))
                    with open(
                            join_abs(self.output_dir, "headers",
                                     f"{_rows['Hostname']}_headers.txt"),
                            'w') as _file:
                        _file.write(json.dumps(_rows, indent=2))
        except sqlite3.Error:
            print("Failed to write to files in aggregated directory, exiting")
            self.logger.exception("Error in dump to file")
            return
        except OSError:
            print("Failed to write to files in aggregated directory, exiting")
            self.logger.exception("Error in dump to file")
            return
        finally:
            dbconn.close()
Beispiel #14
0
    def _run_webtools(self, webtools):
        """Create custom WebTool object from dictionary containing WebTools

        Args:
            webtools (Dict): WebTool modules to build and run
                Example:
                {
                    "Shodan" :
                    {
                      "short_name": "shodan",
                      "class_name": "Shodan",
                      "output_file" : "shodan.txt",
                      "api_key" : null,
                      "endpoint" : null,
                      "username" : null,
                      "password" : null
                    },
                }

        Returns:
            (List) Threads of WebTool objects that are running

        """
        threads = []
        for tool, tool_dict in webtools.items():
            try:
                output_file_loc = join_abs(self.ROOT_DIR, "output",
                                           self.domain,
                                           tool_dict.get('output_file', tool))
                attr = {
                    "proxies": {
                        'http': self.proxy,
                        'https': self.proxy
                    },
                    "api_key": tool_dict.get('api_key', None),
                    "domain": self.domain,
                    "output_file": output_file_loc,
                    "username": tool_dict.get('username', None),
                    "password": tool_dict.get('password', None),
                    "endpoint": tool_dict.get('endpoint', None),
                    "verbose": self.verbose,
                }
                self._print(f"Building webtool {tool} with options \n\t{attr}")
                """
                module contains the modules loaded in from web_resources
                    relative to __main__
                tool_class contains the class object with the name
                    specified in the default/user config file.
                tool_class_obj contains the instantiated object
                    using the tool_class init method.
                """
                module = importlib.import_module('robot_api.api.web_resources',
                                                 __name__)
                tool_class = getattr(module, tool_dict.get('class_name'))
                tool_class_obj = tool_class(**attr)
                threads += [
                    multiprocessing.Process(target=tool_class_obj.do_query,
                                            daemon=True)
                ]

            except KeyError:
                print("[!] Error locating key for tool. " +
                      "Check error log for details")
                LOG.exception("Key Error in run_webtools method")
            except json.JSONDecodeError:
                print("[!] Failure authenticating to service. " +
                      "Check error log for details")
                LOG.exception("Failure authenticating to service.")
            except ValueError:
                print("[!] Value Error thrown. Check error log for details")
                LOG.exception("Value error on init")

        for thread in threads:
            thread.start()

        return threads
Beispiel #15
0
    def _run_ansible(self, ansible_mods, infile):
        """Create ansible objects from dictionary containing the configurations.

        Args:
            ansible_mods (Dict): Dictionary of ansible modules to build and run
            infile (Strings): Path to file for upload to ansible server

            Example:
                {
                    "Eyewitness": {
                        "name" : "Eyewitness",
                        "short_name" : "eye",
                        "default" : 2,
                        "ansible_arguments" : {
                            "config" : "$config/ansible_plays/eyewitness_play.yml",
                            "flags": "-e '$extra'",
                            "extra_flags":{
                                "1" : "variable_host=localhost",
                                "2" : "infile=$infile/aggregated_ips.txt",
                                "3" : "outfile=$outfile/Eyewitness.tar",
                                "4" : "outfolder=$outfile/Eyewitness"
                            }
                        },
                        "output" : "/tmp/output",
                        "enabled" : false
                    }
                }

        Returns:

        """
        for ansible, ansible_json in ansible_mods.items():
            try:
                attr = {}
                print(f"[*] Running {ansible} as ansible Module")
                if infile is None:
                    if ansible_json.get('ansible_arguments', None) is not None:
                        if ansible_json.get('ansible_arguments').get(
                                'infile', None) is not None:
                            infile = join_abs(
                                self.OUTPUT_DIR,
                                ansible_json.get('ansible_arguments').get(
                                    'infile', None))
                    elif infile is None:
                        print("[*] No file provided, dumping db for input")
                        if getsize(self.dbfile) > 0:
                            self.aggregation.dump_to_file()
                        else:
                            print(
                                "[!] \tDatabase file is empty. Have you ran gather?"
                            )

                if not isfile(infile):
                    print("[!] file provided does not exist, terminating")
                    return

                attr['infile'] = infile
                attr['domain'] = self.domain
                attr['ansible_file_location'] = join_abs(
                    self.ROOT_DIR, "ansible_plays")
                attr['output_dir'] = self.OUTPUT_DIR
                attr['ansible_arguments'] = ansible_json.get(
                    "ansible_arguments")
                attr['verbose'] = self.verbose

                self._print(
                    f"Creating ansible {ansible} with attributes\n\t {attr}")
                ansible_mod = Ansible(**attr)
                ansible_mod.build()

                ansible_mod.run()

            except OSError:
                print(f"[!] Something went wrong. Check error log for details")
                LOG.exception("Error in ansible method")
            except TypeError:
                print(f"[!] Something went wrong. Check error log for details")
                LOG.exception("Error in ansible method")
Beispiel #16
0
 def gen_tarfile(self):
     tarname = join_abs(self._docker_options['tarfiles'], basename(self._active_config_path) + ".tar.gz")
     with tarfile.open(name=tarname, mode="w:gz") as tar:
         tar.add(self._active_config_path, "Dockerfile")
         tar.add(self._docker_options['certs'], 'certs')
     return tarname
Beispiel #17
0
    def gen_output(self):
        """Generate dictionary containing all data from the database

        Returns:
            A dictionary containing all data from database

        """
        if not path.exists(self.dbfile):
            print("No database file found. Exiting")
            return None

        dbconn = sqlite3.connect(self.dbfile)
        dbcurs = dbconn.cursor()

        db_headers = dbcurs.execute(f"""SELECT *
                            FROM data
                            WHERE domain='{self.domain.replace('.','_')}'
                            AND (http_headers IS NOT NULL OR https_headers IS NOT NULL)"""
                                    ).fetchall()
        db_ips = dbcurs.execute(f"""SELECT DISTINCT ip, hostname
                            FROM data
                            WHERE domain='{self.domain.replace('.', '_')}'"""
                                ).fetchall()
        """
        (IP, HOSTNAME, HTTP, HTTPS)
        """
        file_index = {}
        """
            Need to be smarter about this:

            Multiple different sql queries
                1. Grabs all those with headers:
                    most likely that if they have headers 
                    they have a screenshot
                    glob can run and take it's time.
                2. Grab all unique ips
                2a. Grab all unique hostnames

                3. Update json with all documents
        """
        for _, ipv4, hostname, http, https, _ in db_headers:
            ip_screenshots = glob.glob("**/*{}*".format(ipv4), recursive=True)
            hostname_screeshots = glob.glob("**/*{}*".format(hostname),
                                            recursive=True)

            image_files = []
            for _file in ip_screenshots:
                image_files += [join_abs(getcwd(), _file)]
            for _file in hostname_screeshots:
                image_files += [join_abs(getcwd(), _file)]
            file_index[ipv4] = {
                "hostnames": [hostname],
                "http_header": http,
                "https_header": https,
                "images": image_files
            }

        for ipv4, hostname in db_ips:
            if ipv4 not in file_index:
                file_index[ipv4] = {
                    "hostnames": [hostname],
                    "http_header": "",
                    "https_header": "",
                    "images": []
                }
            elif hostname not in file_index[ipv4]['hostnames']:
                file_index[ipv4]['hostnames'] += [hostname]
        return file_index
Beispiel #18
0
    def _run_dockers(self, dockers):
        """Build Docker containers provided dictionary of arguments for building

        Dockerize is a wrapper around the docker module. 
        This module allows Dr.ROBOT to specify
        required arguments for building its containers

        Args:
            dockers (Dict): dictionary with all docker objects to build

                Example:
                {
                "Aquatone" : {
                    "name": "Aquatone",
                    "docker_name": "aqua",
                    "default_conf": "docker_buildfiles/Dockerfile.Aquatone.tmp",
                    "active_conf": "docker_buildfiles/Dockerfile.Aquatone",
                    "description": "AQUATONE is a set of tools for performing
                                    reconnaissance on domain names",
                    "src": "https://github.com/michenriksen/aquatone",
                    "output": "/aqua",
                    "output_dir" : "aquatone"
                  }
                }

        Returns:
            A tuple containing the threads and the scanners being ran

        """
        scanners = []
        self._print(f"Creating scanners{dockers.keys()}")
        for scan, scan_dict in dockers.items():
            options = scan_dict
            options.update({"proxy": self.proxy or None})
            options.update({"dns": self.dns or None})
            options.update({"target": self.domain})
            options.update({"verbose": self.verbose})
            options.update({"tarfiles": join_abs(self.ROOT_DIR, "tarfiles")})
            options.update({"certs": join_abs(self.ROOT_DIR, "certs")})
            output_dir = self.OUTPUT_DIR
            if options.get("output_folder", None):
                output_dir = join_abs(self.OUTPUT_DIR,
                                      options.get("output_folder"))

            self._print(f"Creating scanner for {scan} with options: " +
                        "{json.dumps(options, indent=4)}")

            scanners += [
                Docker(active_config_path=join_abs(self.ROOT_DIR,
                                                   scan_dict['active_conf']),
                       default_config_path=join_abs(self.ROOT_DIR,
                                                    scan_dict['default_conf']),
                       docker_options=options,
                       output_dir=output_dir)
            ]

        self._print("Threading builds")
        build_threads = [
            threading.Thread(target=scanner.build, daemon=True)
            for scanner in scanners
        ]
        for build in build_threads:
            build.start()

        build_monitor_threads = [
            threading.Thread(target=scanner.monitor_build, daemon=True)
            for scanner in scanners
        ]
        for thread in build_monitor_threads:
            thread.start()

        for build in build_monitor_threads:
            build.join()

        for scanner in scanners:
            if scanner.error or scanner.image is None:
                print(f"[!] Error building {scanner.name}. Check logs")

        self._print("Images built, running containers")
        for scanner in scanners:
            scanner.run()

        status_threads = [
            threading.Thread(target=scanner.update_status, daemon=True)
            for scanner in scanners
        ]
        for stat in status_threads:
            stat.start()

        return (status_threads, scanners)
Beispiel #19
0
    def aggregate(self, output_files=[], output_folders=[]):
        """Aggregates all output from scanners into the database

        Args:
            output_files: list of output files referenced in config.json
            output_folders: list of folders to for aggregation

        Returns:
        """
        try:

            dbconn = sqlite3.connect(self.dbfile)
            dbcurs = dbconn.cursor()
            # Enable foreign key support
            dbcurs.execute("PRAGMA foreign_keys=1")
            # Simple database that contains list of domains to run against
            dbcurs.execute("""
                            CREATE TABLE IF NOT EXISTS domains (
                                domain VARCHAR PRIMARY KEY,
                                UNIQUE(domain)
                            )
                            """)
            # Setup database to keep all data from all targets. This allows us
            # to use a single model for hosting with Django
            dbcurs.execute("""
                            CREATE TABLE IF NOT EXISTS data (
                                domainid INTEGER PRIMARY KEY,
                                ip VARCHAR,
                                hostname VARCHAR,
                                http_headers TEXT,
                                https_headers TEXT,
                                domain VARCHAR,
                                found TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
                                FOREIGN KEY(domain) REFERENCES domains(domain),
                                UNIQUE(hostname)
                            )
                            """)
            # Quickly create entry in domains table.
            dbcurs.execute(
                f"INSERT OR IGNORE INTO domains(domain) VALUES ('{self.domain.replace('.', '_')}')"
            )
            dbconn.commit()

            all_files = []
            for name in output_files:
                if path.isfile(join_abs(self.output_dir, name)):
                    all_files += [join_abs(self.output_dir, name)]
                elif path.isfile(name):
                    all_files += [name]
                else:
                    print(
                        f"[!] File {name} does not exist, verify scan results")

            for folder in output_folders:
                for root, _, files in walk(join_abs(self.output_dir, folder)):
                    for _file in files:
                        if path.isfile(join_abs(root, _file)):
                            all_files += [join_abs(root, _file)]
            # multi_queue = multiprocessing.Queue()
            qu_manager = multiprocessing.Manager()
            pool = multiprocessing.Pool(5)
            queue = qu_manager.Queue()
            reverse_partial = partial(self._reverse_ip_lookup, queue)
            pool.map(reverse_partial, all_files)
            pool.close()
            self._build_db(queue, dbcurs)
            dbconn.commit()
        except sqlite3.Error:
            self.logger.exception("Error in aggregation")
        finally:
            dbconn.close()