Example #1
0
 def __init__(self, task):
     """@param task: task dictionary of the analysis to process."""
     self.task = task
     self.analysis_path = os.path.join(DETECTOR_ROOT, "storage", "analyses",
                                       str(task["id"]))
     self.baseline_path = os.path.join(DETECTOR_ROOT, "storage", "baseline")
     self.cfg = Config("processing")
Example #2
0
    def __init__(self, task, results):
        """@param analysis_path: analysis folder path."""
        self.task = task
        self.results = results
        self.analysis_path = os.path.join(DETECTOR_ROOT, "storage", "analyses",
                                          str(task["id"]))
        self.cfg = Config("reporting")

        self.task["options"] = parse_options(self.task["options"])
Example #3
0
    def __init__(self, *args, **kwargs):
        self.cfg = Config()
        self.analysistasks = {}
        self.analysishandlers = {}

        ip = self.cfg.resultserver.ip
        self.port = int(self.cfg.resultserver.port)
        while True:
            try:
                server_addr = ip, self.port
                SocketServer.ThreadingTCPServer.__init__(
                    self, server_addr, ResultHandler, *args, **kwargs)
            except Exception as e:
                # In Linux /usr/include/asm-generic/errno-base.h.
                # EADDRINUSE  98 (Address already in use)
                # In Mac OS X or FreeBSD:
                # EADDRINUSE 48 (Address already in use)
                if e.errno == 98 or e.errno == 48:
                    log.warning(
                        "Cannot bind ResultServer on port %s, "
                        "trying another port.", self.port)
                    self.port += 1
                else:
                    raise DetectorCriticalError(
                        "Unable to bind ResultServer on "
                        "{0}:{1}: {2}".format(ip, self.port, str(e)))
            else:
                log.debug("ResultServer running on %s:%s.", ip, self.port)
                self.servethread = Thread(target=self.serve_forever)
                self.servethread.setDaemon(True)
                self.servethread.start()
                break
Example #4
0
def store_temp_file(filedata, filename, path=None):
    """Store a temporary file.
    @param filedata: content of the original file.
    @param filename: name of the original file.
    @param path: optional path for temp directory.
    @return: path to the temporary file.
    """
    filename = get_filename_from_path(filename)

    # Reduce length (100 is arbitrary).
    filename = filename[:100]

    options = Config()
    # Create temporary directory path.
    if path:
        target_path = path
    else:
        tmp_path = options.detector.get("tmppath", "/tmp")
        target_path = os.path.join(tmp_path, "detector-tmp")
    if not os.path.exists(target_path):
        os.mkdir(target_path)

    tmp_dir = tempfile.mkdtemp(prefix="upload_", dir=target_path)
    tmp_file_path = os.path.join(tmp_dir, filename)
    with open(tmp_file_path, "wb") as tmp_file:
        # If filedata is file object, do chunked copy.
        if hasattr(filedata, "read"):
            chunk = filedata.read(1024)
            while chunk:
                tmp_file.write(chunk)
                chunk = filedata.read(1024)
        else:
            tmp_file.write(filedata)

    return tmp_file_path
Example #5
0
class RunAuxiliary(object):
    """Auxiliary modules manager."""
    def __init__(self, task, machine):
        self.task = task
        self.machine = machine
        self.cfg = Config("auxiliary")
        self.enabled = []

    def start(self):
        for module in list_plugins(group="auxiliary"):
            try:
                current = module()
            except:
                log.exception("Failed to load the auxiliary module "
                              "\"{0}\":".format(module))
                return

            module_name = inspect.getmodule(current).__name__
            if "." in module_name:
                module_name = module_name.rsplit(".", 1)[1]

            try:
                options = self.cfg.get(module_name)
            except DetectorOperationalError:
                log.debug(
                    "Auxiliary module %s not found in "
                    "configuration file", module_name)
                continue

            if not options.enabled:
                continue

            current.set_task(self.task)
            current.set_machine(self.machine)
            current.set_options(options)

            try:
                current.start()
            except NotImplementedError:
                pass
            except Exception as e:
                log.warning("Unable to start auxiliary module %s: %s",
                            module_name, e)
            else:
                log.debug("Started auxiliary module: %s",
                          current.__class__.__name__)
                self.enabled.append(current)

    def stop(self):
        for module in self.enabled:
            try:
                module.stop()
            except NotImplementedError:
                pass
            except Exception as e:
                log.warning("Unable to stop auxiliary module: %s", e)
            else:
                log.debug("Stopped auxiliary module: %s",
                          module.__class__.__name__)
Example #6
0
    def __init__(self):
        self.module_name = ""
        self.options = None
        self.options_globals = Config()
        # Database pointer.
        self.db = Database()

        # Machine table is cleaned to be filled from configuration file
        # at each start.
        self.db.clean_machines()
Example #7
0
    def process(self, module):
        """Run a single reporting module.
        @param module: reporting module.
        @param results: results results from analysis.
        """
        # Initialize current reporting module.
        try:
            current = module()
        except:
            log.exception(
                "Failed to load the reporting module \"{0}\":".format(module))
            return

        # Extract the module name.
        module_name = inspect.getmodule(current).__name__
        if "." in module_name:
            module_name = module_name.rsplit(".", 1)[1]

        try:
            options = self.cfg.get(module_name)
        except DetectorOperationalError:
            log.debug("Reporting module %s not found in configuration file",
                      module_name)
            return

        # If the reporting module is disabled in the config, skip it.
        if not options.enabled:
            return

        # Give it the path to the analysis results folder.
        current.set_path(self.analysis_path)
        # Give it the analysis task object.
        current.set_task(self.task)
        # Give it the the relevant reporting.conf section.
        current.set_options(options)
        # Load the content of the analysis.conf file.
        current.cfg = Config(cfg=current.conf_path)

        try:
            current.run(self.results)
            log.debug("Executed reporting module \"%s\"",
                      current.__class__.__name__)
        except DetectorDependencyError as e:
            log.warning(
                "The reporting module \"%s\" has missing dependencies: %s",
                current.__class__.__name__, e)
        except DetectorReportError as e:
            log.warning(
                "The reporting module \"%s\" returned the following error: %s",
                current.__class__.__name__, e)
        except:
            log.exception("Failed to run the reporting module \"%s\":",
                          current.__class__.__name__)
Example #8
0
    def __init__(self, task, error_queue):
        """@param task: task object containing the details for the analysis."""
        threading.Thread.__init__(self)

        self.task = task
        self.errors = error_queue
        self.cfg = Config()
        self.storage = ""
        self.binary = ""
        self.machine = None
        self.db = Database()

        self.task.options = parse_options(self.task.options)
Example #9
0
    def __init__(self, vm_id, ip, platform, task_id):
        """@param ip: guest's IP address.
        @param platform: guest's operating system type.
        """
        self.id = vm_id
        self.ip = ip
        self.platform = platform
        self.task_id = task_id

        self.cfg = Config()
        self.timeout = self.cfg.timeouts.critical

        url = "http://{0}:{1}".format(ip, DETECTOR_GUEST_PORT)
        self.server = TimeoutServer(url, allow_none=True,
                                    timeout=self.timeout)
Example #10
0
    def __init__(self, memfile, osprofile=None):
        self.mask_pid = []
        self.taint_pid = set()
        self.memfile = memfile

        conf_path = os.path.join(DETECTOR_ROOT, "conf", "memory.conf")
        if not os.path.exists(conf_path):
            log.error("Configuration file volatility.conf not found".format(
                conf_path))
            self.voptions = False
            return

        self.voptions = Config("memory")

        for pid in self.voptions.mask.pid_generic.split(","):
            pid = pid.strip()
            if pid:
                self.mask_pid.append(int(pid))

        self.no_filter = not self.voptions.mask.enabled
        if self.voptions.basic.guest_profile:
            self.osprofile = self.voptions.basic.guest_profile
        else:
            self.osprofile = osprofile or self.get_osprofile()
Example #11
0
    def __init__(self, vmid, ipaddr, platform, task_id):
        self.vmid = vmid
        self.ipaddr = ipaddr
        self.port = DETECTOR_GUEST_PORT
        self.platform = platform
        self.task_id = task_id

        self.timeout = Config().timeouts.critical

        # Just in case we have an old agent inside the Virtual Machine. This
        # allows us to remain backwards compatible (for now).
        self.old = OldGuestManager(vmid, ipaddr, platform, task_id)
        self.is_old = False

        # We maintain the path of the Detector Analyzer on the host.
        self.analyzer_path = None
        self.environ = {}
Example #12
0
class VolatilityManager(object):
    """Handle several volatility results."""
    PLUGINS = [
        "pslist",
        "psxview",
        "callbacks",
        ["idt", "x86"],
        "ssdt",
        ["gdt", "x86"],
        "timers",
        "messagehooks",
        "getsids",
        "privs",
        "malfind",
        "apihooks",
        "dlllist",
        "handles",
        "ldrmodules",
        "mutantscan",
        "devicetree",
        "svcscan",
        "modscan",
        "yarascan",
        ["sockscan", "winxp"],
        ["netscan", "vista", "win7"],
    ]

    def __init__(self, memfile, osprofile=None):
        self.mask_pid = []
        self.taint_pid = set()
        self.memfile = memfile

        conf_path = os.path.join(DETECTOR_ROOT, "conf", "memory.conf")
        if not os.path.exists(conf_path):
            log.error("Configuration file volatility.conf not found".format(
                conf_path))
            self.voptions = False
            return

        self.voptions = Config("memory")

        for pid in self.voptions.mask.pid_generic.split(","):
            pid = pid.strip()
            if pid:
                self.mask_pid.append(int(pid))

        self.no_filter = not self.voptions.mask.enabled
        if self.voptions.basic.guest_profile:
            self.osprofile = self.voptions.basic.guest_profile
        else:
            self.osprofile = osprofile or self.get_osprofile()

    def get_osprofile(self):
        """Get the OS profile"""
        return VolatilityAPI(self.memfile).imageinfo()["data"][0]["osprofile"]

    def run(self):
        results = {}

        # Exit if options were not loaded.
        if not self.voptions:
            return

        vol = VolatilityAPI(self.memfile, self.osprofile)

        for plugin_name in self.PLUGINS:
            if isinstance(plugin_name, list):
                plugin_name, profiles = plugin_name[0], plugin_name[1:]
            else:
                profiles = []

            # Some plugins can only run in certain profiles (i.e., only in
            # Windows XP/Vista/7, or only in x86 or x64).
            osp = self.osprofile.lower()
            for profile in profiles:
                if osp.startswith(profile) or osp.endswith(profile):
                    break
            else:
                if profiles:
                    continue

            plugin = self.voptions.get(plugin_name)
            if not plugin or not plugin.enabled:
                log.debug("Skipping '%s' volatility module", plugin_name)
                continue

            if plugin_name in vol.plugins:
                log.debug("Executing volatility '%s' module.", plugin_name)
                results[plugin_name] = getattr(vol, plugin_name)()

        self.find_taint(results)
        self.cleanup()

        return self.mask_filter(results)

    def mask_filter(self, old):
        """Filter out masked stuff. Keep tainted stuff."""
        new = {}

        for akey in old.keys():
            new[akey] = {"config": old[akey]["config"], "data": []}
            conf = getattr(self.voptions, akey, None)
            new[akey]["config"]["filter"] = conf.filter
            for item in old[akey]["data"]:
                # TODO: need to improve this logic.
                if not conf.filter:
                    new[akey]["data"].append(item)
                elif "process_id" in item and \
                        item["process_id"] in self.mask_pid and \
                        item["process_id"] not in self.taint_pid:
                    pass
                else:
                    new[akey]["data"].append(item)
        return new

    def find_taint(self, res):
        """Find tainted items."""
        if "malfind" in res:
            for item in res["malfind"]["data"]:
                self.taint_pid.add(item["process_id"])

    def cleanup(self):
        """Delete the memory dump (if configured to do so)."""

        if self.voptions.basic.delete_memdump:
            try:
                os.remove(self.memfile)
            except OSError:
                log.error("Unable to delete memory dump file at path \"%s\" ",
                          self.memfile)
Example #13
0
    def run(self):
        """Run analysis.
        @return: results dict.
        """
        self.cfg = Config()
        self.state = {}

        # these handlers will be present for any analysis, regardless of platform/format
        handlers = [
            GenericBehavior(self),
            ProcessTree(self),
            Summary(self),
            Anomaly(self),
            ApiStats(self),

            # platform specific stuff
            WindowsMonitor(self),
            LinuxSystemTap(self),
        ]

        # doesn't really work if there's no task, let's rely on the file name for now
        # # certain handlers only makes sense for a specific platform
        # # this allows us to use the same filenames/formats without confusion
        # if self.task.machine.platform == "windows":
        #     handlers += [
        #         WindowsMonitor(self),
        #     ]
        # elif self.task.machine.platform == "linux":
        #     handlers += [
        #         LinuxSystemTap(self),
        #     ]

        # create a lookup map
        interest_map = {}
        for h in handlers:
            for event_type in h.event_types:
                if event_type not in interest_map:
                    interest_map[event_type] = []

                # If available go for the specific event type handler rather
                # than the generic handle_event.
                if hasattr(h, "handle_%s_event" % event_type):
                    fn = getattr(h, "handle_%s_event" % event_type)
                    interest_map[event_type].append(fn)
                elif h.handle_event not in interest_map[event_type]:
                    interest_map[event_type].append(h.handle_event)

        # Each log file should be parsed by one of the handlers. This handler
        # then yields every event in it which are forwarded to the various
        # behavior/analysis/etc handlers.
        for path in self._enum_logs():
            for handler in handlers:
                # ... whether it is responsible
                if not handler.handles_path(path):
                    continue

                # ... and then let it parse the file
                for event in handler.parse(path):
                    # pass down the parsed message to interested handlers
                    for hhandler in interest_map.get(event["type"], []):
                        res = hhandler(event)
                        # We support one layer of "generating" new events,
                        # which we'll pass on again (in case the handler
                        # returns some).
                        if not res:
                            continue

                        for subevent in res:
                            for hhandler2 in interest_map.get(subevent["type"], []):
                                hhandler2(subevent)

        behavior = {}

        for handler in handlers:
            try:
                r = handler.run()
                if not r:
                    continue

                behavior[handler.key] = r
            except:
                log.exception("Failed to run partial behavior class \"%s\"", handler.key)

        return behavior
Example #14
0
class RunReporting(object):
    """Reporting Engine.

    This class handles the loading and execution of the enabled reporting
    modules. It receives the analysis results dictionary from the Processing
    Engine and pass it over to the reporting modules before executing them.
    """
    def __init__(self, task, results):
        """@param analysis_path: analysis folder path."""
        self.task = task
        self.results = results
        self.analysis_path = os.path.join(DETECTOR_ROOT, "storage", "analyses",
                                          str(task["id"]))
        self.cfg = Config("reporting")

        self.task["options"] = parse_options(self.task["options"])

    def process(self, module):
        """Run a single reporting module.
        @param module: reporting module.
        @param results: results results from analysis.
        """
        # Initialize current reporting module.
        try:
            current = module()
        except:
            log.exception(
                "Failed to load the reporting module \"{0}\":".format(module))
            return

        # Extract the module name.
        module_name = inspect.getmodule(current).__name__
        if "." in module_name:
            module_name = module_name.rsplit(".", 1)[1]

        try:
            options = self.cfg.get(module_name)
        except DetectorOperationalError:
            log.debug("Reporting module %s not found in configuration file",
                      module_name)
            return

        # If the reporting module is disabled in the config, skip it.
        if not options.enabled:
            return

        # Give it the path to the analysis results folder.
        current.set_path(self.analysis_path)
        # Give it the analysis task object.
        current.set_task(self.task)
        # Give it the the relevant reporting.conf section.
        current.set_options(options)
        # Load the content of the analysis.conf file.
        current.cfg = Config(cfg=current.conf_path)

        try:
            current.run(self.results)
            log.debug("Executed reporting module \"%s\"",
                      current.__class__.__name__)
        except DetectorDependencyError as e:
            log.warning(
                "The reporting module \"%s\" has missing dependencies: %s",
                current.__class__.__name__, e)
        except DetectorReportError as e:
            log.warning(
                "The reporting module \"%s\" returned the following error: %s",
                current.__class__.__name__, e)
        except:
            log.exception("Failed to run the reporting module \"%s\":",
                          current.__class__.__name__)

    def run(self):
        """Generates all reports.
        @raise DetectorReportError: if a report module fails.
        """
        # In every reporting module you can specify a numeric value that
        # represents at which position that module should be executed among
        # all the available ones. It can be used in the case where a
        # module requires another one to be already executed beforehand.
        reporting_list = list_plugins(group="reporting")

        # Return if no reporting modules are loaded.
        if reporting_list:
            reporting_list.sort(key=lambda module: module.order)

            # Run every loaded reporting module.
            for module in reporting_list:
                self.process(module)
        else:
            log.info("No reporting modules loaded")
Example #15
0
 def __init__(self, maxcount=None):
     self.running = True
     self.cfg = Config()
     self.db = Database()
     self.maxcount = maxcount
     self.total_analysis_count = 0
Example #16
0
class RunProcessing(object):
    """Analysis Results Processing Engine.

    This class handles the loading and execution of the processing modules.
    It executes the enabled ones sequentially and generates a dictionary which
    is then passed over the reporting engine.
    """
    def __init__(self, task):
        """@param task: task dictionary of the analysis to process."""
        self.task = task
        self.analysis_path = os.path.join(DETECTOR_ROOT, "storage", "analyses",
                                          str(task["id"]))
        self.baseline_path = os.path.join(DETECTOR_ROOT, "storage", "baseline")
        self.cfg = Config("processing")

    def process(self, module, results):
        """Run a processing module.
        @param module: processing module to run.
        @param results: results dict.
        @return: results generated by module.
        """
        # Initialize the specified processing module.
        try:
            current = module()
        except:
            log.exception("Failed to load the processing module "
                          "\"{0}\":".format(module))
            return None, None

        # Extract the module name.
        module_name = inspect.getmodule(current).__name__
        if "." in module_name:
            module_name = module_name.rsplit(".", 1)[1]

        try:
            options = self.cfg.get(module_name)
        except DetectorOperationalError:
            log.debug("Processing module %s not found in configuration file",
                      module_name)
            return None, None

        # If the processing module is disabled in the config, skip it.
        if not options.enabled:
            return None, None

        # Give it the path to the baseline directory.
        current.set_baseline(self.baseline_path)
        # Give it the path to the analysis results.
        current.set_path(self.analysis_path)
        # Give it the analysis task object.
        current.set_task(self.task)
        # Give it the options from the relevant processing.conf section.
        current.set_options(options)
        # Give the results that we have obtained so far.
        current.set_results(results)

        try:
            # Run the processing module and retrieve the generated data to be
            # appended to the general results container.
            data = current.run()

            log.debug(
                "Executed processing module \"%s\" on analysis at "
                "\"%s\"", current.__class__.__name__, self.analysis_path)

            # If succeeded, return they module's key name and the data.
            return current.key, data
        except DetectorDependencyError as e:
            log.warning(
                "The processing module \"%s\" has missing dependencies: %s",
                current.__class__.__name__, e)
        except DetectorProcessingError as e:
            log.warning(
                "The processing module \"%s\" returned the following "
                "error: %s", current.__class__.__name__, e)
        except:
            log.exception(
                "Failed to run the processing module \"%s\" for task #%d:",
                current.__class__.__name__, self.task["id"])

        return None, None

    def run(self):
        """Run all processing modules and all signatures.
        @return: processing results.
        """
        # This is the results container. It's what will be used by all the
        # reporting modules to make it consumable by humans and machines.
        # It will contain all the results generated by every processing
        # module available. Its structure can be observed through the JSON
        # dump in the analysis' reports folder. (If jsondump is enabled.)
        # We friendly call this "fat dict".
        results = {
            "_temp": {},
        }

        # Order modules using the user-defined sequence number.
        # If none is specified for the modules, they are selected in
        # alphabetical order.
        processing_list = list_plugins(group="processing")

        # If no modules are loaded, return an empty dictionary.
        if processing_list:
            processing_list.sort(key=lambda module: module.order)

            # Run every loaded processing module.
            for module in processing_list:
                key, result = self.process(module, results)

                # If the module provided results, append it to the fat dict.
                if key and result:
                    results[key] = result
        else:
            log.info("No processing modules loaded")

        results.pop("_temp", None)

        # Return the fat dict.
        return results
Example #17
0
 def __init__(self, task, machine):
     self.task = task
     self.machine = machine
     self.cfg = Config("auxiliary")
     self.enabled = []
Example #18
0
    def initialize(self):
        """Initialize the machine manager."""
        global machinery, machine_lock

        machinery_name = self.cfg.detector.machinery

        max_vmstartup_count = self.cfg.detector.max_vmstartup_count
        if max_vmstartup_count:
            machine_lock = threading.Semaphore(max_vmstartup_count)
        else:
            machine_lock = threading.Lock()

        log.info("Using \"%s\" as machine manager", machinery_name)

        # Get registered class name. Only one machine manager is imported,
        # therefore there should be only one class in the list.
        plugin = list_plugins("machinery")[0]
        # Initialize the machine manager.
        machinery = plugin()

        # Find its configuration file.
        conf = os.path.join(DETECTOR_ROOT, "conf", "%s.conf" % machinery_name)

        if not os.path.exists(conf):
            raise DetectorCriticalError(
                "The configuration file for machine "
                "manager \"{0}\" does not exist at path:"
                " {1}".format(machinery_name, conf))

        # Provide a dictionary with the configuration options to the
        # machine manager instance.
        machinery.set_options(Config(machinery_name))

        # Initialize the machine manager.
        try:
            machinery.initialize(machinery_name)
        except DetectorMachineError as e:
            raise DetectorCriticalError("Error initializing machines: %s" % e)

        # At this point all the available machines should have been identified
        # and added to the list. If none were found, Detector needs to abort the
        # execution.
        if not len(machinery.machines()):
            raise DetectorCriticalError("No machines available.")
        else:
            log.info("Loaded %s machine/s", len(machinery.machines()))

        if len(machinery.machines()) > 1 and self.db.engine.name == "sqlite":
            log.warning("As you've configured Detector to execute parallel "
                        "analyses, we recommend you to switch to a MySQL or"
                        "a PostgreSQL database as SQLite might cause some "
                        "issues.")

        if len(machinery.machines()) > 4 and self.cfg.detector.process_results:
            log.warning("When running many virtual machines it is recommended "
                        "to process the results in a separate process.py to "
                        "increase throughput and stability. Please read the "
                        "documentation about the `Processing Utility`.")

        # Drop all existing packet forwarding rules for each VM. Just in case
        # Detector was terminated for some reason and various forwarding rules
        # have thus not been dropped yet.
        for machine in machinery.machines():
            if not machine.interface:
                log.info(
                    "Unable to determine the network interface for VM "
                    "with name %s, Detector will not be able to give it "
                    "full internet access or route it through a VPN! "
                    "Please define a default network interface for the "
                    "machinery or define a network interface for each "
                    "VM.", machine.name)
                continue

            # Drop forwarding rule to each VPN.
            for vpn in vpns.values():
                rooter("forward_disable", machine.interface, vpn["interface"],
                       machine.ip)

            # Drop forwarding rule to the internet / dirty line.
            if self.cfg.routing.internet != "none":
                rooter("forward_disable", machine.interface,
                       self.cfg.routing.internet, machine.ip)
Example #19
0
    HAVE_HTTPREPLAY = True
except ImportError:
    HAVE_HTTPREPLAY = False

# Imports for the batch sort.
# http://stackoverflow.com/questions/10665925/how-to-sort-huge-files-with-python
# http://code.activestate.com/recipes/576755/
import heapq
from itertools import islice
from collections import namedtuple

Keyed = namedtuple("Keyed", ["key", "obj"])
Packet = namedtuple("Packet", ["raw", "ts"])

log = logging.getLogger(__name__)
cfg = Config()

class Pcap(object):
    """Reads network data from PCAP file."""
    ssl_ports = 443,

    notified_dpkt = False

    def __init__(self, filepath):
        """Creates a new instance.
        @param filepath: path to PCAP file
        """
        self.filepath = filepath

        # List of all hosts.
        self.hosts = []
Example #20
0
    def __init__(self, dsn=None, schema_check=True, echo=False):
        """
        @param dsn: database connection string.
        @param schema_check: disable or enable the db schema version check.
        @param echo: echo sql queries.
        """
        self._lock = SuperLock()
        cfg = Config()

        if dsn:
            self._connect_database(dsn)
        elif hasattr(cfg, "database") and cfg.database.connection:
            self._connect_database(cfg.database.connection)
        else:
            db_file = os.path.join(DETECTOR_ROOT, "db", "detector.db")
            if not os.path.exists(db_file):
                db_dir = os.path.dirname(db_file)
                if not os.path.exists(db_dir):
                    try:
                        create_folder(folder=db_dir)
                    except DetectorOperationalError as e:
                        raise DetectorDatabaseError(
                            "Unable to create database directory: {0}".format(
                                e))

            self._connect_database("sqlite:///%s" % db_file)

        # Disable SQL logging. Turn it on for debugging.
        self.engine.echo = echo

        # Connection timeout.
        if hasattr(cfg, "database") and cfg.database.timeout:
            self.engine.pool_timeout = cfg.database.timeout
        else:
            self.engine.pool_timeout = 60

        # Let's emit a warning just in case.
        if not hasattr(cfg, "database"):
            log.warning(
                "It appears you don't have a valid `database` "
                "section in conf/detector.conf, using sqlite3 instead.")

        # Create schema.
        try:
            Base.metadata.create_all(self.engine)
        except SQLAlchemyError as e:
            raise DetectorDatabaseError(
                "Unable to create or connect to database: {0}".format(e))

        # Get db session.
        self.Session = sessionmaker(bind=self.engine)

        # Deal with schema versioning.
        # TODO: it's a little bit dirty, needs refactoring.
        tmp_session = self.Session()
        if not tmp_session.query(AlembicVersion).count():
            # Set database schema version.
            tmp_session.add(AlembicVersion(version_num=SCHEMA_VERSION))
            try:
                tmp_session.commit()
            except SQLAlchemyError as e:
                raise DetectorDatabaseError(
                    "Unable to set schema version: {0}".format(e))
                tmp_session.rollback()
            finally:
                tmp_session.close()
        else:
            # Check if db version is the expected one.
            last = tmp_session.query(AlembicVersion).first()
            tmp_session.close()
            if last.version_num != SCHEMA_VERSION and schema_check:
                raise DetectorDatabaseError(
                    "DB schema version mismatch: found {0}, expected {1}. "
                    "Try to apply all migrations (cd utils/db_migration/ && "
                    "alembic upgrade head).".format(last.version_num,
                                                    SCHEMA_VERSION))