コード例 #1
0
def vm_conf(request):

    cfg = Config()
    machinery_name = cfg.cuckoo.machinery
    vm_conf = Config(
        os.path.join(CUCKOO_ROOT, "conf", "%s.conf" % machinery_name))
    options = vm_conf.get(machinery_name)
    machines = []

    #pprint.pprint(options)
    if options.get("machines"):
        for machine_id in options.get("machines").strip().split(","):
            machine_opts = vm_conf.get(machine_id.strip())
            machine = Dictionary()
            machine.id = machine_id.strip()
            machine.label = machine_opts["label"]
            machine.platform = machine_opts["platform"]
            machine.tags = machine_opts.get("tags", None)
            machine.ip = machine_opts["ip"]
            machine.snapshot = machine_opts.get("snapshot", None)
            machines.append(machine)
    else:
        machines = None

    return render_to_response("analysis/vm_conf.html", {
        "machines": machines,
        "options": options,
        "machinery": machinery_name
    },
                              context_instance=RequestContext(request))
コード例 #2
0
ファイル: views.py プロジェクト: AnyMaster/el-jefe
def vm_conf(request):
    
    cfg = Config()    
    machinery_name = cfg.cuckoo.machinery
    vm_conf = Config(os.path.join(CUCKOO_ROOT, "conf", "%s.conf" % machinery_name))
    options = vm_conf.get(machinery_name)
    machines = [] 
   
    #pprint.pprint(options)
    if options.get("machines"):
        for machine_id in options.get("machines").strip().split(","):
            machine_opts = vm_conf.get(machine_id.strip())
            machine = Dictionary()
            machine.id = machine_id.strip()
            machine.label = machine_opts["label"]
            machine.platform = machine_opts["platform"]
            machine.tags = machine_opts.get("tags", None)
            machine.ip = machine_opts["ip"]
            machine.snapshot = machine_opts.get("snapshot", None) 
            machines.append(machine)
    else:
        machines = None
    
    return render_to_response("analysis/vm_conf.html",
                              {"machines": machines,
                               "options": options,
                               "machinery": machinery_name},
                              context_instance=RequestContext(request))
コード例 #3
0
def startBaseline(machinery, vm):
    result = False

    cuckoo_conf = Config("cuckoo")
    vmarch = cuckoo_conf.get("cuckoo").get("machinery")

    try:
        # Starting VM
        machinery.start(vm.label)
        print "[INFO]: Starting VM " + vm.label + "."

        # Wait for connection
        time.sleep(10)

        # Create memory dump
        machinery.dump_memory(vm.label, MEMDUMP_ROOT + vm.label + ".dmp")
        print "[INFO]: Dumping VM " + vm.label + "."

        # Stopping VM
        machinery.stop(vm.label)
        print "[INFO]: Stopping VM " + vm.label + "."

        # Start analysis
        vm_conf = Config(vmarch)
        profile = vm_conf.get(vm.label).get("mem_profile")
        if not profile:
            profile = Config("memory").basic.guest_profile

        dumppath = os.path.join(MEMDUMP_ROOT, vm.label + ".dmp")
        vol = VolatilityManager(memfile=dumppath, osprofile=profile)

        print "[INFO]: Volatility analysis started..."
        data = vol.run(manager=vmarch, vm=vm.label)

        # Write to JSON
        with open(MEMDUMP_ROOT + vm.label + '.json', 'w') as outfile:
            json.dump(data,
                      outfile,
                      sort_keys=False,
                      indent=4,
                      encoding="utf-8")
        print "[INFO]: JSON dump of baseline VM " + vm.label + " succesfully created."

        # Delete memory dump
        os.remove(MEMDUMP_ROOT + vm.label + ".dmp")
        print "[INFO]: Memory dump of VM " + vm.label + " succesfully deleted."

        result = True
    except:
        pass

    return result
コード例 #4
0
    def setExternalParameters(self):
        '''
        Configures the plugin ratings and any other parameters which were passed.
        Uses two sources - the ratings.conf file and the tasks custom field (dictionary dumped as a json).
        '''
        try:
            self.analysisConfig = CuckooConfig(self.conf_path)
            ratingsConf = CuckooConfig(
                os.path.join(self.rulePath, "ratings.conf"))
            for rating in ratingsConf.get("ratings").iteritems():
                self.params[rating[0]] = rating[1]
        except Exception as e:
            log.warning("Preconfigure - %s" % str(e))
            return False

        try:
            params = json.loads(
                self.analysisConfig.get("analysis").get("custom"))
            for param in params.iteritems():
                self.params[param[0]] = param[1]
        except ValueError as e:
            if self.analysisConfig.get("analysis").get("custom") != "None":
                log.warning("Couldn't load json object from custom, skip")
                return False
        return True
コード例 #5
0
class RunAuxiliary(object):
    """Auxiliary modules manager."""
    def __init__(self, task, machine):
        self.task = task
        self.machine = machine
        self.cfg = Config(
            cfg=os.path.join(CUCKOO_ROOT, "conf", "auxiliary.conf"))
        self.enabled = []

    def start(self):
        auxiliary_list = list_plugins(group="auxiliary")
        if auxiliary_list:
            for module in auxiliary_list:
                try:
                    current = module()
                except:
                    log.exception("Failed to load the auxiliary module "
                                  "\"{0}\":".format(module))
                    return

                module_name = inspect.getmodule(current).__name__
                if "." in module_name:
                    module_name = module_name.rsplit(".", 1)[1]

                try:
                    options = self.cfg.get(module_name)
                except CuckooOperationalError:
                    log.debug(
                        "Auxiliary module %s not found in "
                        "configuration file", module_name)
                    continue

                if not options.enabled:
                    continue

                current.set_task(self.task)
                current.set_machine(self.machine)
                current.set_options(options)

                try:
                    current.start()
                except NotImplementedError:
                    pass
                #except Exception as e:
                #    log.warning("Unable to start auxiliary module %s: %s",
                #                module_name, e)
                else:
                    log.debug("Stopped auxiliary module: %s", module_name)
                    self.enabled.append(current)

    def stop(self):
        for module in self.enabled:
            try:
                module.stop()
            except NotImplementedError:
                pass
            except Exception as e:
                log.warning("Unable to stop auxiliary module: %s", e)
            else:
                log.debug("Stopped auxiliary module: %s", module)
コード例 #6
0
ファイル: plugins.py プロジェクト: 453483289/cuckoo
class RunAuxiliary(object):
    """Auxiliary modules manager."""

    def __init__(self, task, machine):
        self.task = task
        self.machine = machine
        self.cfg = Config("auxiliary")
        self.enabled = []

    def start(self):
        for module in list_plugins(group="auxiliary"):
            try:
                current = module()
            except:
                log.exception("Failed to load the auxiliary module "
                              "\"{0}\":".format(module))
                return

            module_name = inspect.getmodule(current).__name__
            if "." in module_name:
                module_name = module_name.rsplit(".", 1)[1]

            try:
                options = self.cfg.get(module_name)
            except CuckooOperationalError:
                log.debug("Auxiliary module %s not found in "
                          "configuration file", module_name)
                continue

            if not options.enabled:
                continue

            current.set_task(self.task)
            current.set_machine(self.machine)
            current.set_options(options)

            try:
                current.start()
            except NotImplementedError:
                pass
            except Exception as e:
                log.warning("Unable to start auxiliary module %s: %s",
                            module_name, e)
            else:
                log.debug("Started auxiliary module: %s",
                          current.__class__.__name__)
                self.enabled.append(current)

    def stop(self):
        for module in self.enabled:
            try:
                module.stop()
            except NotImplementedError:
                pass
            except Exception as e:
                log.warning("Unable to stop auxiliary module: %s", e)
            else:
                log.debug("Stopped auxiliary module: %s",
                          module.__class__.__name__)
コード例 #7
0
def init_routing():
    """Initialize and check whether the routing information is correct."""
    cuckoo = Config()
    vpn = Config("vpn")

    # Check whether all VPNs exist if configured and make their configuration
    # available through the vpns variable. Also enable NAT on each interface.
    if vpn.vpn.enabled:
        for name in vpn.vpn.vpns.split(","):
            name = name.strip()
            if not name:
                continue

            if not hasattr(vpn, name):
                raise CuckooStartupError(
                    "Could not find VPN configuration for %s" % name
                )

            entry = vpn.get(name)

            if not rooter("nic_available", entry.interface):
                raise CuckooStartupError(
                    "The network interface that has been configured for "
                    "VPN %s is not available." % entry.name
                )

            vpns[entry.name] = entry

            # Disable & enable NAT on this network interface. Disable it just
            # in case we still had the same rule from a previous run.
            rooter("disable_nat", entry.interface)
            rooter("enable_nat", entry.interface)

    # Check whether the default VPN exists if specified.
    if cuckoo.routing.route not in ("none", "internet"):
        if not vpn.vpn.enabled:
            raise CuckooStartupError(
                "A VPN has been configured as default routing interface for "
                "VMs, but VPNs have not been enabled in vpn.conf"
            )

        if cuckoo.routing.route not in vpns:
            raise CuckooStartupError(
                "The VPN defined as default routing target has not been "
                "configured in vpn.conf."
            )

    # Check whether the dirty line exists if it has been defined.
    if cuckoo.routing.internet != "none":
        if not rooter("nic_available", cuckoo.routing.internet):
            raise CuckooStartupError(
                "The network interface that has been configured as dirty "
                "line is not available."
            )

        # Enable NAT for this network interface.
        rooter("enable_nat", cuckoo.routing.internet)
コード例 #8
0
ファイル: cuckooinbox.py プロジェクト: primmus/cuckooinbox
def main():
        
    def checkConfigs():
        '''check for config file and define variables'''
        config = os.path.join(CUCKOO_ROOT,"cuckooinbox","cuckooinbox.conf")
        if not os.path.exists(config):
            raise CuckooStartupError("Config file does not exist at path: %s" % config)
    
    checkConfigs()
    config = Config(cfg=os.path.join(CUCKOO_ROOT,"cuckooinbox","cuckooinbox.conf"))
    config = config.get('cuckooinbox')
    username = config['username']
    passwd = config['passwd']
    imap = config['imap']
    imap_ssl = config['imap_ssl']
    email_whitelist = config['email_whitelist']
    interval = config['interval']
    
    '''welcome screen'''
    print '\n\n'
    print '\t\t@\tsend your malware to %s !\n' % (username)
    welcome_message = '           _,\n         ((\')\n        /\'--)\n        | _.\'\n       / |`=\n      \'^\''
    print welcome_message

    '''thread main function'''        
    def analyze(message):
        request = CuckooRequest(message)
        request.fetch(message)
        request.sendReport()
    
    '''define imap connection'''
    server = IMAPClient(imap, use_uid=True, ssl=imap_ssl)
    
    '''connect, login'''
    server.login(username, passwd)
   
    while True:
        try:
            '''set retrieve folder'''
            select_info = server.select_folder('INBOX')
            '''search for new message from email whitelist'''
            for account in email_whitelist.split(','):
                messages = server.search('UNSEEN FROM "%s"' % account)
                '''analyze emails from one account at a time'''
                if messages:
                    for message in messages:
                        thread = threading.Thread( target = analyze, args = (message,))
			thread.daemon = True
                        thread.start()
            time.sleep(interval)
        except:
            '''reconnect to mail account'''
            server = IMAPClient(imap, use_uid=True, ssl=imap_ssl)
            server.login(username, passwd)
            pass
コード例 #9
0
def render_index(request, kwargs={}):
    files = os.listdir(
        os.path.join(settings.CUCKOO_PATH, "analyzer", "windows", "modules",
                     "packages"))

    cfg_docker = Config()
    cfg_docker.__init__("docker-mach")
    #docker_section = cfg_docker.docker.images
    docker_section = cfg_docker.get("docker-mach").get("images")
    docker_images = []
    docker_images = re.split('\s*,\s*', docker_section)

    packages = []
    for name in files:
        name = os.path.splitext(name)[0]
        if name == "__init__":
            continue

        packages.append(name)

    # Prepare a list of VM names, description label based on tags.
    machines = []
    for machine in Database().list_machines():
        tags = []
        for tag in machine.tags:
            tags.append(tag.name)

        if tags:
            label = machine.label + ": " + ", ".join(tags)
        else:
            label = machine.label

        machines.append((machine.label, label))

    # Prepend ALL/ANY options.
    machines.insert(0, ("", "First available"))
    machines.insert(1, ("all", "All"))

    values = {
        "packages": sorted(packages),
        "machines": machines,
        "vpns": vpns.values(),
        "route": cfg.routing.route,
        "internet": cfg.routing.internet,
        "docker_images": docker_images,
    }

    values.update(kwargs)
    return render_to_response("submission/index.html",
                              values,
                              context_instance=RequestContext(request))
コード例 #10
0
def startDefault():
    cuckoo_conf = Config("cuckoo")
    vmarch = cuckoo_conf.get("cuckoo").get("machinery")

    import_plugin("modules.machinery." + vmarch)
    machinery_plugin = list_plugins("machinery")[0]

    conf = os.path.join(CUCKOO_ROOT, "conf", "%s.conf" % vmarch)

    machinery = machinery_plugin()
    machinery.set_options(Config(vmarch))
    machinery.initialize(vmarch)

    return machinery
コード例 #11
0
ファイル: views.py プロジェクト: open-nsm/dockoo-cuckoo
def render_index(request, kwargs={}):
    files = os.listdir(os.path.join(settings.CUCKOO_PATH, "analyzer", "windows", "modules", "packages"))


    cfg_docker = Config()
    cfg_docker.__init__("docker-mach")
    #docker_section = cfg_docker.docker.images
    docker_section = cfg_docker.get("docker-mach").get("images")
    docker_images = []
    docker_images = re.split('\s*,\s*', docker_section)

    packages = []
    for name in files:
        name = os.path.splitext(name)[0]
        if name == "__init__":
            continue

        packages.append(name)

    # Prepare a list of VM names, description label based on tags.
    machines = []
    for machine in Database().list_machines():
        tags = []
        for tag in machine.tags:
            tags.append(tag.name)

        if tags:
            label = machine.label + ": " + ", ".join(tags)
        else:
            label = machine.label

        machines.append((machine.label, label))

    # Prepend ALL/ANY options.
    machines.insert(0, ("", "First available"))
    machines.insert(1, ("all", "All"))

    values = {
        "packages": sorted(packages),
        "machines": machines,
        "vpns": vpns.values(),
        "route": cfg.routing.route,
        "internet": cfg.routing.internet,
        "docker_images": docker_images,
    }

    values.update(kwargs)
    return render_to_response("submission/index.html", values,
                              context_instance=RequestContext(request))
コード例 #12
0
ファイル: cuckooinbox.py プロジェクト: primmus/cuckooinbox
    def __init__(self, message):

        self.message = message
        
        '''cuckooinbox config variables'''
        config = Config(cfg=os.path.join(CUCKOO_ROOT,"cuckooinbox","cuckooinbox.conf"))
        config = config.get('cuckooinbox')
        self.username = config['username']
        self.passwd = config['passwd']
        self.imap = config['imap']
        self.imap_ssl = config['imap_ssl']
        self.smtp_server = config['smtp']
        self.interval = config['interval']
        self.email_whitelist = config['email_whitelist']
        self.url_limit = config['url_limit']
        self.attachment_limit = config['attachment_limit'] 
        self.zip_reports = config['zip_reports']
        self.zip_password = config['zip_password']
        self.url_blacklist = config['url_blacklist']
        self.url_file_backlist = config['url_file_backlist']
        self.machine = config['machine']
        
        '''imap variables'''
        self.server = IMAPClient(self.imap, use_uid=True, ssl=self.imap_ssl)
        self.server.login(self.username, self.passwd)
        self.attachment_counter = 0
        
        '''message variables'''
        self.msg = MIMEMultipart()
        self.response_msg = MIMEMultipart()
        self.response_urls = []
        self.response_attachments = []
        self.sender = ''
        self.subject = ''
        self.cc_list = []

        '''logging object'''
        self.log_entry = Logger('cuckooinbox.log')

        '''cuckoo variables'''
        self.taskids = []
        self.db  = Database()
        self.url_counter = 0 # tracks url count to not exceed url_limit
コード例 #13
0
ファイル: cuckooinbox.py プロジェクト: 0day1day/cuckooinbox
    def __init__(self, message):
        self.message = message
        
        '''cuckooinbox config variables'''
        config = Config(cfg=os.path.join(CUCKOO_ROOT,"cuckooinbox","cuckooinbox.conf"))
        config = config.get('cuckooinbox')
        self.username = config['username']
        self.passwd = config['passwd']
        self.imap = config['imap']
        self.imap_ssl = config['imap_ssl']
        self.smtp_server = config['smtp']
        self.interval = config['interval']
        self.archive_folder = config['archive_folder']
        self.email_whitelist = config['email_whitelist']
        self.url_limit = config['url_limit']
        self.attachment_limit = config['attachment_limit'] 
        self.zip_reports = config['zip_reports']
        self.zip_password = config['zip_password']
        self.url_blacklist = config['url_blacklist']
        self.url_file_backlist = config['url_file_backlist']
        self.machine = config['machine']
        
        '''imap variables'''
        self.server = IMAPClient(self.imap, use_uid=True, ssl=self.imap_ssl)
        self.server.login(self.username, self.passwd)
        self.attachment_counter = 0
        
        '''message variables'''
        self.msg = MIMEMultipart()
        self.response_msg = MIMEMultipart()
        self.response_urls = []
        self.response_attachments = []
        self.sender = ''
        self.subject = ''
        self.cc_list = []

        '''logging object'''
        self.log_entry = Logger('cuckooinbox.log')

        '''cuckoo variables'''
        self.taskids = []
        self.db  = Database()
        self.url_counter = 0 # tracks url count to not exceed url_limit
コード例 #14
0
ファイル: startup.py プロジェクト: Missuniverse110/cuckoo
def init_modules():
    """Initializes plugins."""
    log.debug("Importing modules...")

    # Import all processing modules.
    import_package(modules.processing)
    # Import all signatures.
    import_package(modules.signatures)

    # Import only enabled reporting modules.
    report_cfg = Config(cfg=os.path.join(CUCKOO_ROOT,
                                         "conf",
                                         "reporting.conf"))

    prefix = modules.reporting.__name__ + "."
    for loader, name, ispkg in pkgutil.iter_modules(modules.reporting.__path__):
        if ispkg:
            continue

        try:
            options = report_cfg.get(name)
        except AttributeError:
            log.debug("Reporting module %s not found in "
                      "configuration file" % module_name)

        if not options.enabled:
            continue

        import_plugin("%s.%s" % (modules.reporting.__name__, name))

    # Import machine manager.
    import_plugin("modules.machinemanagers.%s"
                  % Config().cuckoo.machine_manager)

    for category, mods in list_plugins().items():
        log.debug("Imported \"%s\" modules:" % category)

        for mod in mods:
            if mod == mods[-1]:
                log.debug("\t `-- %s" % mod.__name__)
            else:
                log.debug("\t |-- %s" % mod.__name__)
コード例 #15
0
ファイル: startup.py プロジェクト: xarly/cuckoo
def init_modules():
    """Initializes plugins."""
    log.debug("Importing modules...")

    # Import all processing modules.
    import_package(modules.processing)
    # Import all signatures.
    import_package(modules.signatures)

    # Import only enabled reporting modules.
    report_cfg = Config(
        cfg=os.path.join(CUCKOO_ROOT, "conf", "reporting.conf"))

    prefix = modules.reporting.__name__ + "."
    for loader, name, ispkg in pkgutil.iter_modules(
            modules.reporting.__path__):
        if ispkg:
            continue

        try:
            options = report_cfg.get(name)
        except (AttributeError, CuckooOperationalError):
            log.debug("Reporting module %s not found in "
                      "configuration file" % name)

        if not options.enabled:
            continue

        import_plugin("%s.%s" % (modules.reporting.__name__, name))

    # Import machine manager.
    import_plugin("modules.machinemanagers.%s" %
                  Config().cuckoo.machine_manager)

    for category, mods in list_plugins().items():
        log.debug("Imported \"%s\" modules:" % category)

        for mod in mods:
            if mod == mods[-1]:
                log.debug("\t `-- %s" % mod.__name__)
            else:
                log.debug("\t |-- %s" % mod.__name__)
コード例 #16
0
class TestConfig:
    CONF_EXAMPLE = """
[cuckoo]
debug = off
analysis_timeout = 120
critical_timeout = 600
delete_original = off
machine_manager = kvm
use_sniffer = no
tcpdump = /usr/sbin/tcpdump
interface = vboxnet0
"""

    def setUp(self):
        self.path = tempfile.mkstemp()[1]
        self._load_conf(self.CONF_EXAMPLE)
        self.c = Config(cfg=self.path)

    def _load_conf(self, conf):
        """Loads a configuration from a string.
        @param conf: configuration string.
        """
        f = open(self.path, "w")
        f.write(conf)
        f.close()

    def test_get_option_exist(self):
        """Fetch an option of each type from default config file."""
        assert_equals(self.c.get("cuckoo")["debug"], False)
        assert_equals(self.c.get("cuckoo")["tcpdump"], "/usr/sbin/tcpdump")
        assert_equals(self.c.get("cuckoo")["critical_timeout"], 600)

    def test_config_file_not_found(self):
        assert Config("foo")

    @raises(CuckooOperationalError)
    def test_get_option_not_found(self):
        self.c.get("foo")

    @raises(CuckooOperationalError)
    def test_get_option_not_found_in_file_not_found(self):
        self.c = Config("bar")
        self.c.get("foo")
コード例 #17
0
ファイル: config_tests.py プロジェクト: BwRy/test-av
class TestConfig:
    CONF_EXAMPLE = """
[cuckoo]
debug = off
analysis_timeout = 120
critical_timeout = 600
delete_original = off
machine_manager = kvm
use_sniffer = no
tcpdump = /usr/sbin/tcpdump
interface = vboxnet0
"""

    def setUp(self):
        self.file = tempfile.mkstemp()[1]
        self._load_conf(self.CONF_EXAMPLE)
        self.c = Config(self.file)

    def _load_conf(self, conf):
        """Loads a configuration from a string.
        @param conf: configuration string.
        """
        f = open(self.file, "w")
        f.write(conf)
        f.close()

    def test_get_option_exist(self):
        """Fetch an option of each type from default config file."""
        assert_equals(self.c.get("cuckoo")["debug"], False)
        assert_equals(self.c.get("cuckoo")["tcpdump"], "/usr/sbin/tcpdump")
        assert_equals(self.c.get("cuckoo")["critical_timeout"], 600)

    def test_config_file_not_found(self):
        assert Config("foo")

    @raises(CuckooOperationalError)
    def test_get_option_not_found(self):
        self.c.get("foo")

    @raises(CuckooOperationalError)
    def test_get_option_not_found_in_file_not_found(self):
        self.c = Config("bar")
        self.c.get("foo")
コード例 #18
0
    def setExternalParameters(self):
        '''
        Configures the plugin ratings and any other parameters which were passed.
        Uses two sources - the ratings.conf file and the tasks custom field (dictionary dumped as a json).
        '''
        try:
            self.analysisConfig = CuckooConfig(self.conf_path)
            ratingsConf = CuckooConfig(os.path.join(self.rulePath, "ratings.conf"))
            for rating in ratingsConf.get("ratings").iteritems():
                self.params[rating[0]] = rating[1]
        except Exception as e:
            log.warning("Preconfigure - %s" % str(e))
            return False

        try:
            params = json.loads(self.analysisConfig.get("analysis").get("custom"))
            for param in params.iteritems():
                self.params[param[0]] = param[1]
        except ValueError as e:
            if self.analysisConfig.get("analysis").get("custom") != "None":
                log.warning("Couldn't load json object from custom, skip")
                return False
        return True
コード例 #19
0
class VolatilityAnalysis(Processing):
    """Volatility memory dump analysis."""
    volatilityConfig = None
    analysisConfig = None
    rulePath = os.path.join(CUCKOO_ROOT, 'conf', 'volatility')
    params = None
    tagToRating = None
    rules = None

    def __init__(self):
        self.params = {
            "rating_whitelist": 0.5,
            "rating_services": 0.5,
            "rating_hidden": 1.5,
            "rating_orphan": 1.5,
            "rating_api_unknown": 1.5,
            "rating_api_known": 0.5,
            "rating_malfind_pe": 1.5,
            "rating_malfind": 0.5,
            "none": 0
        }
        self.tagToRating = {
            "connected_processes": "rating_whitelist",
            "running_services": "rating_services",
            "hidden_processes": "rating_hidden",
            "orphan_threads": "rating_orphan",
            "api_hooks_unknown": "rating_api_unknown",
            "api_hooks_known": "rating_api_known",
            "malfind_executable": "rating_malfind_pe",
            "malfind_no_executable": "rating_malfind",
            "none": "none"
        }
        self.rules = {}
        self.volatilityConfig = conf.ConfObject()
        self.volatilityConfig.final = True
        self.volatilityConfig.verbose = False
        cache.disable_caching(None, None, None, None)
        MemoryRegistry.Init()
        self.key = "volatility"

    def preConfigure(self):
        '''
        Checks if the plugin dependencies are satisfied.
        Loads the rating configuration and the analysis configuration.
        '''
        if len(MISSING_DEPENDENCIES) > 0:
            log.warning("Dependencies missing: %s, skip" % ','.join(MISSING_DEPENDENCIES))
            return False

        memdumpPath = os.path.abspath(os.path.join(self.analysis_path, 'post.memdump'))
        if not os.path.isfile(memdumpPath):
            log.warning("Memory dump '%s' not found for Volatility, skip" % memdumpPath)
            return False
        else:
            self.volatilityConfig.LOCATION = "file://%s" % memdumpPath

        if not self.setExternalParameters():
            return False

        if not self.setVolatilityProfile():
            log.warning("Couldn't determine which volatility profile to use, skip")
            return False

        return True

    def setExternalParameters(self):
        '''
        Configures the plugin ratings and any other parameters which were passed.
        Uses two sources - the ratings.conf file and the tasks custom field (dictionary dumped as a json).
        '''
        try:
            self.analysisConfig = CuckooConfig(self.conf_path)
            ratingsConf = CuckooConfig(os.path.join(self.rulePath, "ratings.conf"))
            for rating in ratingsConf.get("ratings").iteritems():
                self.params[rating[0]] = rating[1]
        except Exception as e:
            log.warning("Preconfigure - %s" % str(e))
            return False

        try:
            params = json.loads(self.analysisConfig.get("analysis").get("custom"))
            for param in params.iteritems():
                self.params[param[0]] = param[1]
        except ValueError as e:
            if self.analysisConfig.get("analysis").get("custom") != "None":
                log.warning("Couldn't load json object from custom, skip")
                return False
        return True

    def loadRuleFiles(self):
        try:
            if self.params.get("rating_whitelist", 0) > 0:
                self.rules["connected_processes"] = vprl.loadConnectedProcessesConf(os.path.join(self.rulePath, 'connected_processes.conf'))
            if self.params.get("rating_services", 0) > 0:
                self.rules["running_services"] = vprl.loadRunningServicesConf(os.path.join(self.rulePath, 'running_services.conf'))
            if self.params.get("rating_api_unknown", 0) > 0 or self.params.get("rating_api_known", 0) > 0:
                self.rules["api_hooks"] = vprl.loadApiHooksConf(os.path.join(self.rulePath, 'api_hooks.conf'))
        except Exception as e:
            log.warning("Volatility processor - %s, skip" % str(e))
            return False
        return True

    def run(self):
        """Run volatility processing.
        @return: list with matches.
        """
        matches = {'connected_processes': None,
                   'running_services': None,
                   'hidden_processes': None,
                   'orphan_threads': None,
                   'api_hooks': None,
                   'malfind': None
                   }

        if not self.preConfigure():
            return {}
        if not self.loadRuleFiles():
            return {}

        log.info("Volatile Systems Volatility Framework {0} - cuckoo processor\n".format(constants.VERSION))
        log.debug(self.params)

        if self.params.get("rating_whitelist", 0) > 0:
            matches['connected_processes'] = self.heuristicConnectedProcesses()
        if self.params.get("rating_services", 0) > 0:
            matches['running_services'] = self.heuristicRunningServices()
        if self.params.get("rating_hidden", 0) > 0:
            matches['hidden_processes'] = self.heuristicHiddenProcesses()
        if self.params.get("rating_orphan", 0) > 0:
            matches['orphan_threads'] = self.heuristicOrphanThreads()
        if self.params.get("rating_api_unknown", 0) > 0 or self.params.get("rating_api_known", 0) > 0:
            matches['api_hooks'] = self.heuristicApiHooks()
        if self.params.get("rating_malfind", 0) > 0 or self.params.get("rating_malfind_pe", 0) > 0:
            matches['malfind'] = self.heuristicMalfind()
        matches = self.combineResultsForPids(matches)
        log.debug(matches)
        return matches

    def setVolatilityProfile(self):
        '''
        Gets called in order to set the profile which will be used when processing the dump file.
        If a profile was supplied and it exists then the profile is set and the function returns.
        If a profile wasn't supplied or it doesn't exist then detection if performed by calling self.detectSystem.
        @return: True if successfully determined and set the profile and otherwise False.
        '''
        if self.params:
            profile = self.params.get('operating_system')
        else:
            profile = None

        if profile == 'None':
            profile = None

        if profile is not None and MemoryRegistry.PROFILES.objects.get(profile) is None:
            log.warning("Specified profile '%s' not found. Attempting to detect profile." % profile)
            profile = None

        if profile is None:
            profile = self.detectSystem()

        self.volatilityConfig.PROFILE = profile

        if profile is None:
            return False
        return True

    def detectSystem(self):
        '''
        Attempts to identify the profile to use for the supplied dump file.
        Uses the imageinfo command in order to determine the profile.
        @return: True if successfully determined and set the profile and otherwise False.
        '''
        profile = None
        result = self.runModule("imageinfo")
        profileSearch = re.compile(r"(\w+)")
        for line in result:
            if line[0] == "Suggested Profile(s)":
                match = profileSearch.match(line[1])
                break
        if match is not None:
            profile = match.group(1)
            if MemoryRegistry.PROFILES.objects.get(profile) is None:
                profile = None
        return profile

    def splitPath(self, str):
        result = ntpath.split(str)
        if result[0] == str:
            result = path.split(str)
        return result

    def runModule(self, module, method="calculate"):
        log.debug("Attempting to run %s" % module)
        try:
            if module in MemoryRegistry.PLUGIN_COMMANDS.commands:
                command = MemoryRegistry.PLUGIN_COMMANDS.commands[module](self.volatilityConfig)
                self.volatilityConfig.parse_options()
                if method:
                    return getattr(command, method)()
                else:
                    return command
        except exceptions.VolatilityException as e:
            log.error(e)

    def runHeuristic(self, gather, filter=None):
        objects = gather()
        log.debug(str(objects))
        if filter:
            objects = filter(objects)
            log.debug(str(objects))
        return objects

    def heuristicConnectedProcesses(self):
        return self.runHeuristic(self.heuristicConnectedProcessesGather, self.heuristicConnectedProcessesFilter)

    def heuristicConnectedProcessesGather(self):
        sockScan = self.runModule('sockscan')
        connScan = self.runModule('connscan')
        objects = {}

        for sock_obj in sockScan:
            obj = {'pid': str(sock_obj.Pid),
                   'source port': str(sock_obj.LocalPort),
                   'source port int': int(sock_obj.LocalPort),
                   'destination port': None,  # will be overridden by connScan result if found
                   'protocol': protos.protos.get(sock_obj.Protocol.v(), "-"),
                   'ip': str(sock_obj.LocalIpAddress)}
            log.debug("SOCK OBJ - %s" % str(obj))
            if objects.get(obj['pid']) is None:
                objects[obj['pid']] = {'filename': None, 'path': None}
            objects[obj['pid']][obj['source port']] = obj

        for tcp_obj in connScan:
            obj = {'source port': str(tcp_obj.LocalPort),
                   'source port int': int(tcp_obj.LocalPort),
                   'destination port': int(tcp_obj.RemotePort),
                   'remote ip': str(tcp_obj.RemoteIpAddress),
                   'protocol': 'TCP',
                   'ip': str(tcp_obj.LocalIpAddress),
                   'pid': str(tcp_obj.Pid)}
            log.debug("TCP OBJ - %s" % str(obj))
            if objects.get(obj['pid']) is None:
                objects[obj['pid']] = {'filename': None, 'path': None}

            if objects[obj['pid']].get(obj['source port']) is None:
                objects[obj['pid']][obj['source port']] = obj
            else:
                objects[obj['pid']][obj['source port']]['destination port'] = obj['destination port']
                objects[obj['pid']][obj['source port']]['remote ip'] = obj['remote ip']

        self.volatilityConfig.PID = ','.join(objects.iterkeys())
        dllList = self.runModule('dlllist')
        for task in dllList:
            pid = str(task.UniqueProcessId)
            filename = task.ImageFileName
            if task.Peb:
                objects[pid]['path'] = str(task.Peb.ProcessParameters.ImagePathName)
            objects[pid]['filename'] = filename
        self.volatilityConfig.PID = None
        return objects

    def heuristicConnectedProcessesFilter(self, objects):
        for rule in self.rules["connected_processes"]:
            for pidObjs in objects:
                if rule['path'] != "*":
                    if objects[pidObjs]['path']:
                        if objects[pidObjs]['path'] != rule['path']:
                            log.debug("%s skipped - diff path %s" % (pidObjs, rule['path']))
                            continue
                    elif rule['filename'] != objects[pidObjs]['filename']:
                        log.debug("%s skipped - diff filename %s" % (pidObjs, rule['filename']))
                        continue
                for obj in objects[pidObjs]:
                    if obj == "path" or obj == "filename":
                        continue
                    if objects[pidObjs][obj] is None:
                        log.debug('%s - %s - object already whitelisted' % (pidObjs, obj))
                        continue
                    if objects[pidObjs][obj]['protocol'] not in ['TCP', 'UDP']:
                        objects[pidObjs][obj] = None
                        continue
                    if objects[pidObjs][obj]['source port int'] < rule['source port'][0] or objects[pidObjs][obj]['source port int'] > rule['source port'][1]:
                        log.debug("%s skipped - diff src port %s" % (pidObjs, rule['source port']))
                        continue
                    if rule['protocol'] != "*" and objects[pidObjs][obj]['protocol'] != rule['protocol']:
                        log.debug("%s skipped - diff protocol %s" % (pidObjs, rule['protocol']))
                        continue
                    if rule['ip'] != "*" and objects[pidObjs][obj]['ip'] != rule['ip']:
                        log.debug("%s skipped - diff ip %s %s" % (pidObjs, objects[pidObjs][obj]['ip'], rule['ip']))
                        continue
                    if objects[pidObjs][obj]['protocol'] != "UDP":
                        if objects[pidObjs][obj]['destination port'] is None:
                            if rule['destination port'] != [0, 65535]:
                                log.debug("%s skipped - no dest port and rule without whole range port %s" % (pidObjs))
                                continue
                        elif objects[pidObjs][obj]['destination port'] < rule['destination port'][0] or objects[pidObjs][obj]['destination port'] > rule['destination port'][1]:
                            log.debug("%s skipped - diff dest port %s - %s" % (pidObjs, objects[pidObjs][obj]['destination port'], rule['destination port']))
                            continue
                    # reached here so deserves to be whitelisted.
                    log.debug("%s %s whitelisted" % (pidObjs, obj))
                    objects[pidObjs][obj] = None
        for pidObjs in objects:
            objects[pidObjs] = {key: val for (key, val) in objects[pidObjs].iteritems() if bool(val)}
        objects = {key: val for (key, val) in objects.iteritems() if len(val) > 2}
        return objects.keys()

    def heuristicRunningServices(self):
        return self.runHeuristic(self.heuristicRunningServicesGather, self.heuristicRunningServicesFilter)

    def heuristicRunningServicesGather(self):
        svcScan = self.runModule('svcscan')
        objects = {}
        for rec in svcScan:
            Type = '|'.join(get_flags(SERVICE_TYPES, rec.Type))
            State = '|'.join(get_flags(SERVICE_STATES, rec.State))
            if 'SERVICE_KERNEL_DRIVER' in Type or 'SERVICE_FILE_SYSTEM_DRIVER' in Type:
                Binary = wctomb(rec.Binary1, rec.obj_vm)
                ProcId = '-'
            else:
                Binary = wctomb(rec.Binary2.ServicePath, rec.obj_vm)
                ProcId = rec.Binary2.ProcessId
            if Binary is None:
                Binary = '-'
            if ProcId is None or isinstance(ProcId, volobj.NoneObject):
                ProcId = '-'
            obj = {
                'pid': str(ProcId),
                'name': wctomb(rec.ServiceName, rec.obj_vm),
                'state': str(State),
                'path': str(Binary),
            }
            log.debug("SVC obj: %s" % str(obj))
            if objects.get(obj['pid']) is None:
                objects[obj['pid']] = []
            objects[obj['pid']].append(obj)
        return objects

    def heuristicRunningServicesFilter(self, objects):
        for rule in self.rules["running_services"]:
            for pidObjs in objects:
                for obj in range(len(objects[pidObjs])):
                    if objects[pidObjs][obj] is None:
                        continue
                    if rule['pid'] != '*' and objects[pidObjs][obj]['pid'] != rule['pid']:
                        log.debug("%s skipped - diff pid %s - %s" % (pidObjs, objects[pidObjs][obj]['pid'], rule['pid']))
                        continue
                    if rule['name'] != '*' and objects[pidObjs][obj]['name'] != rule['name']:
                        log.debug("%s skipped - diff name %s - %s" % (pidObjs, objects[pidObjs][obj]['name'], rule['name']))
                        continue
                    if rule['state'] != '*' and objects[pidObjs][obj]['state'] != rule['state']:
                        log.debug("%s skipped - diff state %s - %s" % (pidObjs, objects[pidObjs][obj]['state'], rule['state']))
                        continue
                    if rule['path'] != '*' and objects[pidObjs][obj]['path'] != rule['path']:
                        log.debug("%s skipped - diff path %s - %s" % (pidObjs, objects[pidObjs][obj]['path'], rule['path']))
                        continue
                    objects[pidObjs][obj] = None
                    log.debug("%s %s whitelisted" % (pidObjs, obj))
        for pidObjs in objects:
            objects[pidObjs] = filter(bool, objects[pidObjs])
        objects = {key: val for (key, val) in objects.iteritems() if bool(val)}
        return objects.keys()

    def heuristicHiddenProcesses(self):
        return self.runHeuristic(self.heuristicHiddenProcessesGather)

    def heuristicHiddenProcessesGather(self):
        psxView = self.runModule('psxview')
        objects = {}
        for offset, process, ps_sources in psxView:
            if offset not in ps_sources['pslist'] and offset in ps_sources['psscan']:
                obj = {
                    'pid': str(process.UniqueProcessId),
                    'name': str(process.ImageFileName)
                }
                log.debug("hidden proc: %s" % str(obj))
                objects[obj['pid']] = obj
        return objects

    def heuristicOrphanThreads(self):
        return self.runHeuristic(self.heuristicOrphanThreadsGather)

    def heuristicOrphanThreadsGather(self):
        threads = self.runModule('threads')
        objects = []
        for thread, proc_offset, checks in threads:
            if checks['OrphanThread']:
                objects.append(thread)
        return objects

    def heuristicApiHooks(self):
        return self.runHeuristic(self.heuristicApiHooksGather, self.heuristicApiHooksFilter)

    def heuristicApiHooksGather(self):
        apiHooks = self.runModule('apihooks')
        objects = {}
        unknown = []
        for (proc, type, current_mod, mod, func, src, dst, hooker, instruction) in apiHooks:
            pid = str(proc.UniqueProcessId)
            dest = str(dst)
            if hooker == "UNKNOWN":
                unknown.append(pid)
                continue
            if objects.get(pid) is None:
                objects[pid] = {}
            if objects[pid].get(hooker) is None:
                objects[pid][hooker] = {}
            objects[pid][hooker][dest] = None
        self.volatilityConfig.PID = ','.join(objects.iterkeys())
        dllListModule = self.runModule('dlllist', None)
        dllListData = dllListModule.calculate()
        dlls = {}
        for task in dllListData:
            pid = str(task.UniqueProcessId)
            for m in dllListModule.list_modules(task):
                long = str(m.FullDllName)
                short = self.splitPath(long)[-1]
                start = int(m.DllBase)
                end = int(m.DllBase + m.SizeOfImage)
                if short in objects[pid]:
                    for dest in objects[pid][short]:
                        destInt = int(dest)
                        if start <= destInt <= end:
                            if dlls.get(long) is None:
                                dlls[long] = []
                            dlls[long].append(pid)
                            objects[pid][short][dest] = long
        self.volatilityConfig.PID = None
        dlls['UNKNOWN'] = unknown
        del objects
        return dlls

    def heuristicApiHooksFilter(self, objects):
        for rule in self.rules["api_hooks"]:
            for dll in objects:
                if rule['path'][-1] in ["\\"]:
                    if rule['path'] == dll[:len(rule['path'])]:
                        objects[dll] = None
                else:
                    if rule['path'] == dll:
                        objects[dll] = None
        objects = {key: val for (key, val) in objects.iteritems() if bool(val)}
        for key in objects:
            objects[key] = set(objects[key])
        return objects

    def heuristicMalfind(self):
        return self.runHeuristic(self.heuristicMalfindGather)

    def heuristicMalfindGather(self):
        self.volatilityConfig.DUMP_DIR = tempfile.gettempdir()
        malFind = self.runModule('malfind')
        objects = {'no_executable': set(), 'executable': set()}
        for (name, pid, start, end, tag, prx, fname, hits, chunk) in malFind:
            try:
                pefile.PE(fname)
            except:
                objects['no_executable'].add(str(pid))
            else:
                objects['executable'].add(str(pid))
            os.remove(fname)
        self.volatilityConfig.DUMP_DIR = None
        return objects

    def addTagToResults(self, results, pidIterable, tag):
        if pidIterable:
            rating = float(self.params.get(self.tagToRating.get(tag, "none")))
            for pid in pidIterable:
                try:
                    results[pid][tag] = rating
                    results[pid]["summed_rating"] += rating
                except KeyError:
                    results[pid] = {tag: rating, "summed_rating": rating}

    def combineResultsForPids(self, matches):
        results = {}
        if matches['orphan_threads']:
            results['-'] = ['orphan_threads']
        self.addTagToResults(results, matches['connected_processes'], 'connected_processes')
        self.addTagToResults(results, matches['running_services'], 'running_services')
        self.addTagToResults(results, matches['hidden_processes'], 'hidden_processes')
        if matches['api_hooks']:
            if 'UNKNOWN' in matches['api_hooks']:
                self.addTagToResults(results, matches['api_hooks']['UNKNOWN'], 'api_hooks_unknown')
            for dll in matches['api_hooks']:
                self.addTagToResults(results, matches['api_hooks'][dll], 'api_hooks_known')
        if matches['malfind']:
            for type in matches['malfind']:
                self.addTagToResults(results, matches['malfind'][type], 'malfind_' + type)
        return results
コード例 #20
0
class RunProcessing(object):
    """Analysis Results Processing Engine.

    This class handles the loading and execution of the processing modules.
    It executes the enabled ones sequentially and generates a dictionary which
    is then passed over the reporting engine.
    """

    def __init__(self, task, results):
        """@param task: task dictionary of the analysis to process."""
        self.task = task
        self.analysis_path = os.path.join(CUCKOO_ROOT, "storage", "analyses", str(task["id"]))
        self.cfg = Config("processing")
        self.results = results

    def process(self, module):
        """Run a processing module.
        @param module: processing module to run.
        @return: results generated by module.
        """
        # Initialize the specified processing module.
        try:
            current = module(self.results)
        except:
            log.exception("Failed to load the processing module "
                          "\"{0}\":".format(module))
            return

        # Extract the module name.
        module_name = inspect.getmodule(current).__name__
        if "." in module_name:
            module_name = module_name.rsplit(".", 1)[1]

        try:
            options = self.cfg.get(module_name)
        except CuckooOperationalError:
            log.debug("Processing module %s not found in configuration file",
                      module_name)
            return None

        # If the processing module is disabled in the config, skip it.
        if not options.enabled:
            return None

        # Give it path to the analysis results.
        current.set_path(self.analysis_path)
        # Give it the analysis task object.
        current.set_task(self.task)
        # Give it the options from the relevant processing.conf section.
        current.set_options(options)

        try:
            # Run the processing module and retrieve the generated data to be
            # appended to the general results container.
            log.debug("Executing processing module \"%s\" on analysis at "
                      "\"%s\"", current.__class__.__name__, self.analysis_path)
            pretime = datetime.now()
            data = current.run()
            posttime = datetime.now()
            timediff = posttime - pretime
            self.results["statistics"]["processing"].append({
                "name": current.__class__.__name__,
                "time": float("%d.%03d" % (timediff.seconds,
                                         timediff.microseconds / 1000)),
                })

            # If succeeded, return they module's key name and the data to be
            # appended to it.
            return {current.key: data}
        except CuckooDependencyError as e:
            log.warning("The processing module \"%s\" has missing dependencies: %s", current.__class__.__name__, e)
        except CuckooProcessingError as e:
            log.warning("The processing module \"%s\" returned the following "
                        "error: %s", current.__class__.__name__, e)
        except:
            log.exception("Failed to run the processing module \"%s\":",
                          current.__class__.__name__)

        return None

    def run(self):
        """Run all processing modules and all signatures.
        @return: processing results.
        """

        # Order modules using the user-defined sequence number.
        # If none is specified for the modules, they are selected in
        # alphabetical order.
        processing_list = list_plugins(group="processing")

        # If no modules are loaded, return an empty dictionary.
        if processing_list:
            processing_list.sort(key=lambda module: module.order)

            # Run every loaded processing module.
            for module in processing_list:
                result = self.process(module)
                # If it provided some results, append it to the big results
                # container.
                if result:
                    self.results.update(result)
        else:
            log.info("No processing modules loaded")

        return self.results
コード例 #21
0
WEB_AUTHENTICATION = False

# Get connection options from reporting.conf.
MONGO_HOST = cfg.mongodb.get("host", "127.0.0.1")
MONGO_PORT = cfg.mongodb.get("port", 27017)
MONGO_DB = cfg.mongodb.get("db", "cuckoo")

ELASTIC_HOST = cfg.elasticsearchdb.get("host", "127.0.0.1")
ELASTIC_PORT = cfg.elasticsearchdb.get("port", 9200)
ELASTIC_INDEX = cfg.elasticsearchdb.get("index", "cuckoo")

moloch_cfg = Config("reporting").moloch
aux_cfg = Config("auxiliary")
vtdl_cfg = Config("auxiliary").virustotaldl

MOLOCH_BASE = moloch_cfg.get("base", None)
MOLOCH_NODE = moloch_cfg.get("node", None)
MOLOCH_ENABLED = moloch_cfg.get("enabled", False)

GATEWAYS = aux_cfg.get("gateways")
VTDL_ENABLED = vtdl_cfg.get("enabled", False)
VTDL_PRIV_KEY = vtdl_cfg.get("dlprivkey", None)
VTDL_INTEL_KEY = vtdl_cfg.get("dlintelkey", None)
VTDL_PATH = vtdl_cfg.get("dlpath", None)

TEMP_PATH = Config().cuckoo.get("tmppath", "/tmp")

ipaddy_re = re.compile(
    r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$"
)
コード例 #22
0
ファイル: settings.py プロジェクト: Tal14/cuckoo-modified
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.

import sys
import os

# Cuckoo path.
CUCKOO_PATH = os.path.join(os.getcwd(), "..")
sys.path.append(CUCKOO_PATH)

from lib.cuckoo.common.config import Config

cfg = Config("reporting").mongodb

# Checks if mongo reporting is enabled in Cuckoo.
if not cfg.get("enabled"):
    raise Exception("Mongo reporting module is not enabled in cuckoo, aborting!")

# Get connection options from reporting.conf.
MONGO_HOST = cfg.get("host", "127.0.0.1")
MONGO_PORT = cfg.get("port", 27017)
MONGO_DB = cfg.get("db", "cuckoo")

moloch_cfg = Config("reporting").moloch
aux_cfg =  Config("auxiliary")
vtdl_cfg = Config("auxiliary").virustotaldl

MOLOCH_BASE = moloch_cfg.get("base", None)
MOLOCH_NODE = moloch_cfg.get("node", None)
MOLOCH_ENABLED = moloch_cfg.get("enabled", False)
コード例 #23
0
class RunReporting:
    """Reporting Engine.

    This class handles the loading and execution of the enabled reporting
    modules. It receives the analysis results dictionary from the Processing
    Engine and pass it over to the reporting modules before executing them.
    """
    def __init__(self, task_id, results):
        """@param analysis_path: analysis folder path."""
        self.task = Database().view_task(task_id).to_dict()
        self.results = results
        self.analysis_path = os.path.join(CUCKOO_ROOT, "storage", "analyses",
                                          str(task_id))
        self.cfg = Config(
            cfg=os.path.join(CUCKOO_ROOT, "conf", "reporting.conf"))

    def process(self, module):
        """Run a single reporting module.
        @param module: reporting module.
        @param results: results results from analysis.
        """
        # Initialize current reporting module.
        try:
            current = module()
        except:
            log.exception(
                "Failed to load the reporting module \"{0}\":".format(module))
            return

        # Extract the module name.
        module_name = inspect.getmodule(current).__name__
        if "." in module_name:
            module_name = module_name.rsplit(".", 1)[1]

        try:
            options = self.cfg.get(module_name)
        except CuckooOperationalError:
            log.debug("Reporting module %s not found in configuration file",
                      module_name)
            return

        # If the reporting module is disabled in the config, skip it.
        if not options.enabled:
            return

        # Give it the path to the analysis results folder.
        current.set_path(self.analysis_path)
        # Give it the analysis task object.
        current.set_task(self.task)
        # Give it the the relevant reporting.conf section.
        current.set_options(options)
        # Load the content of the analysis.conf file.
        current.cfg = Config(current.conf_path)

        try:
            current.run(self.results)
            log.debug("Executed reporting module \"%s\"",
                      current.__class__.__name__)
        except CuckooDependencyError as e:
            log.warning(
                "The reporting module \"%s\" has missing dependencies: %s",
                current.__class__.__name__, e)
        except CuckooReportError as e:
            log.warning(
                "The reporting module \"%s\" returned the following error: %s",
                current.__class__.__name__, e)
        except:
            log.exception("Failed to run the reporting module \"%s\":",
                          current.__class__.__name__)

    def run(self):
        """Generates all reports.
        @raise CuckooReportError: if a report module fails.
        """
        # In every reporting module you can specify a numeric value that
        # represents at which position that module should be executed among
        # all the available ones. It can be used in the case where a
        # module requires another one to be already executed beforehand.
        reporting_list = list_plugins(group="reporting")

        # Return if no reporting modules are loaded.
        if reporting_list:
            reporting_list.sort(key=lambda module: module.order)

            # Run every loaded reporting module.
            for module in reporting_list:
                self.process(module)
        else:
            log.info("No reporting modules loaded")
コード例 #24
0
ファイル: reporter.py プロジェクト: rajrakeshdr/test_AV
class Reporter:
    """Report generator."""
    def __init__(self, analysis_path, custom=""):
        """@param analysis_path: analysis folder path.
        @param custom: custom options.
        """
        self.analysis_path = analysis_path
        self.custom = custom
        self.cfg = Config(
            cfg=os.path.join(CUCKOO_ROOT, "conf", "reporting.conf"))
        self.__populate(plugins)

    def __populate(self, modules):
        """Load modules.
        @param modules: modules.
        """
        prefix = modules.__name__ + "."
        for loader, name, ispkg in pkgutil.iter_modules(modules.__path__):
            if ispkg:
                continue

            try:
                section = getattr(self.cfg, name)
            except AttributeError:
                continue

            if not section.enabled:
                continue

            path = "%s.%s" % (plugins.__name__, name)

            try:
                __import__(path, globals(), locals(), ["dummy"], -1)
            except CuckooDependencyError as e:
                log.warning("Unable to import reporting module \"%s\": %s" %
                            (name, e))

    def run(self, data):
        """Generates all reports.
        @param data: analysis results.
        @raise CuckooReportError: if a report module fails.
        """
        Report()

        for plugin in Report.__subclasses__():
            self._run_report(plugin, data)

    def _run_report(self, plugin, data):
        """Run a single report plugin.
        @param plugin: report plugin.
        @param data: results data from analysis.
        """
        current = plugin()
        current.set_path(self.analysis_path)
        current.cfg = Config(current.conf_path)
        module = inspect.getmodule(current)

        if "." in module.__name__:
            module_name = module.__name__.rsplit(".", 1)[1]
        else:
            module_name = module.__name__

        try:
            current.set_options(self.cfg.get(module_name))
        except CuckooOperationalError:
            raise CuckooReportError(
                "Reporting module %s not found in configuration file" %
                module_name)

        try:
            # Run report, for each report a brand new copy of results is
            # created, to prevent a reporting module to edit global
            # result set and affect other reporting modules.
            current.run(copy.deepcopy(data))
            log.debug("Executed reporting module \"%s\"" %
                      current.__class__.__name__)
        except NotImplementedError:
            return
        except CuckooReportError as e:
            log.warning("Failed to execute reporting module \"%s\": %s" %
                        (current.__class__.__name__, e))
コード例 #25
0
# See the file 'docs/LICENSE' for copying permission.

import sys
import os

# Cuckoo path.
CUCKOO_PATH = os.path.join(os.getcwd(), "..")
sys.path.append(CUCKOO_PATH)

from lib.cuckoo.common.config import Config

cfg = Config("reporting").mongodb
elcfg = Config("reporting").elastic

# Checks if mongo reporting is enabled in Cuckoo.
if not cfg.get("enabled") and not elcfg.get("enabled"):
    raise Exception("Mongo/Elastic reporting module is not enabled in cuckoo, aborting!")

# Get connection options from reporting.conf.
MONGO_HOST = cfg.get("host", "127.0.0.1")
MONGO_PORT = cfg.get("port", 27017)
MONGO_DB = cfg.get("db", "cuckoo")

ELASTIC_HOST = elcfg.get("host", "127.0.0.1")
ELASTIC_PORT = elcfg.get("port", "9200")

moloch_cfg = Config("reporting").moloch
aux_cfg =  Config("auxiliary")
vtdl_cfg = Config("auxiliary").virustotaldl

MOLOCH_BASE = moloch_cfg.get("base", None)
コード例 #26
0
ファイル: reporter.py プロジェクト: Missuniverse110/cuckoo
class Reporter:
    """Reporting Engine.

    This class handles the loading and execution of the enabled reporting
    modules. It receives the analysis results dictionary from the Processing
    Engine and pass it over to the reporting modules before executing them.
    """

    def __init__(self, analysis_path, custom=""):
        """@param analysis_path: analysis folder path.
        @param custom: custom options.
        """
        self.analysis_path = analysis_path
        self.custom = custom
        self.cfg = Config(cfg=os.path.join(CUCKOO_ROOT,
                                           "conf",
                                           "reporting.conf"))

    def _run_report(self, module, results):
        """Run a single reporting module.
        @param module: reporting module.
        @param results: results results from analysis.
        """
        # Initialize current reporting module.
        current = module()
        # Give it the path to the analysis results folder.
        current.set_path(self.analysis_path)
        # Load the content of the analysis.conf file.
        current.cfg = Config(current.conf_path)

        # Extract the module name.
        module_name = inspect.getmodule(current).__name__
        if "." in module_name:
            module_name = module_name.rsplit(".", 1)[1]

        options = self.cfg.get(module_name)

        # If the reporting module is disabled in the config, skip it.
        if not options.enabled:
            return

        # Give it the content of the relevant section from the reporting.conf
        # configuration file.
        current.set_options(options)

        try:
            # Run report, for each report a brand new copy of results is
            # created, to prevent a reporting module to edit global
            # result set and affect other reporting modules.
            current.run(LocalDict(results))
            log.debug("Executed reporting module \"%s\""
                      % current.__class__.__name__)
        except CuckooReportError as e:
            log.warning("The reporting module \"%s\" returned the following "
                        "error: %s" % (current.__class__.__name__, e))
        except Exception as e:
            log.exception("Failed to run the reporting module \"%s\":"
                          % (current.__class__.__name__))

    def run(self, results):
        """Generates all reports.
        @param results: analysis results.
        @raise CuckooReportError: if a report module fails.
        """
        # In every reporting module you can specify a numeric value that
        # represents at which position that module should be executed among
        # all the available ones. It can be used in the case where a
        # module requires another one to be already executed beforehand.
        modules_list = list_plugins(group="reporting")
        modules_list.sort(key=lambda module: module.order)

        # Run every loaded reporting module.
        for module in modules_list:
            self._run_report(module, results)
コード例 #27
0
class ProcMonAnalysis(Processing):
    """ProcMon CSV log analysis."""
    analysisConfig = None
    csvLog = None
    rulePath = RULE_PATH
    params = None
    rules = None

    def __init__(self):
        self.params = {
            "behavioral_importance": 1,
            "none": 0
        }
        self.rules = {}
        self.key = "procmon"

    def preConfigure(self):
        '''
        Checks if the plugin dependencies are satisfied.
        Loads the rating configuration and the analysis configuration.
        '''
        csvPath = os.path.abspath(os.path.join(self.analysis_path, 'procmon.csv'))
        if not os.path.isfile(csvPath):
            log.warning("Procmon csv log files '%s' not found, skip" % csvPath)
            return False
        else:
            try:
                self.csvLog = csv.reader(open(csvPath, 'rb'))
            except:
                log.warning("Couldn't open csv log file '%s', skip" % csvPath)

        if not self.setExternalParameters():
            return False
        return True

    def setExternalParameters(self):
        '''
        Configures the plugin ratings and any other parameters which were passed.
        Uses two sources - the ratings.conf file and the tasks custom field (dictionary dumped as a json).
        '''
        try:
            self.analysisConfig = CuckooConfig(self.conf_path)
            ratingsConf = CuckooConfig(os.path.join(self.rulePath, "ratings.conf"))
            for rating in ratingsConf.get("ratings").iteritems():
                self.params[rating[0]] = rating[1]
        except Exception as e:
            log.warning("Preconfigure - %s" % str(e))
            return False

        try:
            params = json.loads(self.analysisConfig.get("analysis").get("custom"))
            for param in params.iteritems():
                self.params[param[0]] = param[1]
        except ValueError as e:
            if self.analysisConfig.get("analysis").get("custom") != "None":
                log.warning("Couldn't load json object from custom, skip")
                return False
        return True

    def loadRuleFiles(self):
        try:
            if self.params.get("behavioral_importance", 0) > 0:
                self.rules["procmon"] = pprl.loadProcMonConf(os.path.join(self.rulePath, 'procmon.conf'))
        except Exception as e:
            log.warning("ProcMon processor - %s, skip" % str(e))
            return False
        return True

    def run(self):
        """Run procmon csv processing.
        @return: list with matches.
        """
        matches = {}

        if not self.preConfigure():
            return matches
        if not self.loadRuleFiles():
            return matches

        log.info("Procmon CSV analysis")
        log.debug(self.params)

        if self.params.get("behavioral_importance", 0) > 0:
            matches = self.blacklistProcMon()
        log.debug(matches)
        return matches

    def splitPath(self, str):
        result = ntpath.split(str)
        if result[0] == str:
            result = path.split(str)
        return result

    def blacklistProcMon(self):
        objects = {}
        title = self.csvLog.next()
        multiplier = self.params.get("behavioral_importance", 0)
        for line in self.csvLog:
            for rule in self.rules["procmon"]:
                if line[3] == rule["operation"] and line[4] == rule["path"]:
                    cause = "%s-%s" % (line[3], line[4])
                    try:
                        objects[line[2]]["summed_rating"] += rule["rating"] * multiplier
                        objects[line[2]][cause] += rule["rating"] * multiplier
                    except KeyError:
                        objects[line[2]] = {"summed_rating": rule["rating"] * multiplier, cause: rule["rating"] * multiplier}
        return objects
コード例 #28
0
ファイル: api.py プロジェクト: trogdorsey/cuckoo
def tasks_report(task_id, report_format="json"):
    formats = {
        "json": "report.json",
        "html": "report.html",
    }

    bz_formats = {
        "all": {"type": "-", "files": ["memory.dmp"]},
        "dropped": {"type": "+", "files": ["files"]},
    }

    tar_formats = {
        "bz2": "w:bz2",
        "gz": "w:gz",
        "tar": "w",
    }

    if report_format.lower() in formats:
        report_path = os.path.join(CUCKOO_ROOT, "storage", "analyses",
                                   "%d" % task_id, "reports",
                                   formats[report_format.lower()])
    elif report_format.lower() in bz_formats:
        bzf = bz_formats[report_format.lower()]
        srcdir = os.path.join(CUCKOO_ROOT, "storage",
                              "analyses", "%d" % task_id)
        s = StringIO()

        # By default go for bz2 encoded tar files (for legacy reasons).
        tarmode = tar_formats.get(request.args.get("tar"), "w:bz2")

        tar = tarfile.open(fileobj=s, mode=tarmode, dereference=True)
        for filedir in os.listdir(srcdir):
            filepath = os.path.join(srcdir, filedir)
            if bzf["type"] == "-" and filedir not in bzf["files"]:
                tar.add(filepath, arcname=filedir)
            if bzf["type"] == "+" and filedir in bzf["files"]:
                tar.add(filepath, arcname=filedir)
        tar.close()

        response = make_response(s.getvalue())
        response.headers["Content-Type"] = \
            "application/x-tar; charset=UTF-8"
        return response
    elif report_format.lower() == 'jsondb':
        if not HAS_MONGO:
            return json_error(400, 'pymongo not installed')

        try:
            options = Config('reporting')
            mongo_options = options.get('mongodb')
            if not mongo_options['enabled']:
                return json_error(400, 'Report format not supported')

            host = mongo_options['host']
            port = mongo_options['port']
            db = mongo_options['db']
        except Exception as e:
            return json_error(400, 'Error reading config file')

        try:
            mc = pymongo.MongoClient(host, port)
            cuckoo_db = mc[db]
            report = cuckoo_db.analysis.find_one({'info.id': task_id})
            mc.disconnect()
            del report['_id']
            del report['shots']
            del report['dropped']
            if 'network' in report:
                if 'pcap_id' in report['network']:
                    del report['network']['pcap_id']
                if 'sorted_pcap_id' in report['network']:
                    del report['network']['sorted_pcap_id']

            if 'target' in report:
                if 'file_id' in report['target']:
                    del report['target']['file_id']

            if "behavior" in report and "processes" in report["behavior"]:
                for process in report["behavior"]["processes"]:
                    first_seen = process['first_seen']
                    del process['first_seen']
                    process['first_seen'] = first_seen.ctime()
                    #process['first_seen'] = first_seen
                    process_calls = []
                    for call_id in process['calls']:
                        call_info = cuckoo_db.calls.find_one({'_id': call_id})
                        process_calls.extend(call_info['calls'])

                    del process['calls']
                    for pc in process_calls:
                        time = pc['time']
                        del pc['time']
                        pc['time'] = time.ctime()
                    process['calls'] = process_calls

            if "behavior" in report and "generic" in report["behavior"]:
                for process in report["behavior"]["generic"]:
                    first_seen = process['first_seen']
                    del process['first_seen']
                    process['first_seen'] = first_seen.ctime()

            if "behavior" in report and "processtree" in report["behavior"]:
                for process in report["behavior"]["processtree"]:
                    #first_seen = process['first_seen']['$data']
                    first_seen = process['first_seen']
                    del process['first_seen']
                    process['first_seen'] = first_seen.ctime()
                    if 'children' in process:
                        for child in process['children']:
                            first_seen = child['first_seen']
                            del child['first_seen']
                            child['first_seen'] = first_seen.ctime()

            data = bson.json_util.dumps(report)
            return data
        except Exception as e:
            return json_error(400, "Error connecting to mongo: %s" % str(e))
    else:
        return json_error(400, "Invalid report format")

    if os.path.exists(report_path):
        return open(report_path, "rb").read()
    else:
        return json_error(404, "Report not found")
コード例 #29
0
class ProcMonAnalysis(Processing):
    """ProcMon CSV log analysis."""
    analysisConfig = None
    csvLog = None
    rulePath = RULE_PATH
    params = None
    rules = None

    def __init__(self):
        self.params = {"behavioral_importance": 1, "none": 0}
        self.rules = {}
        self.key = "procmon"

    def preConfigure(self):
        '''
        Checks if the plugin dependencies are satisfied.
        Loads the rating configuration and the analysis configuration.
        '''
        csvPath = os.path.abspath(
            os.path.join(self.analysis_path, 'procmon.csv'))
        if not os.path.isfile(csvPath):
            log.warning("Procmon csv log files '%s' not found, skip" % csvPath)
            return False
        else:
            try:
                self.csvLog = csv.reader(open(csvPath, 'rb'))
            except:
                log.warning("Couldn't open csv log file '%s', skip" % csvPath)

        if not self.setExternalParameters():
            return False
        return True

    def setExternalParameters(self):
        '''
        Configures the plugin ratings and any other parameters which were passed.
        Uses two sources - the ratings.conf file and the tasks custom field (dictionary dumped as a json).
        '''
        try:
            self.analysisConfig = CuckooConfig(self.conf_path)
            ratingsConf = CuckooConfig(
                os.path.join(self.rulePath, "ratings.conf"))
            for rating in ratingsConf.get("ratings").iteritems():
                self.params[rating[0]] = rating[1]
        except Exception as e:
            log.warning("Preconfigure - %s" % str(e))
            return False

        try:
            params = json.loads(
                self.analysisConfig.get("analysis").get("custom"))
            for param in params.iteritems():
                self.params[param[0]] = param[1]
        except ValueError as e:
            if self.analysisConfig.get("analysis").get("custom") != "None":
                log.warning("Couldn't load json object from custom, skip")
                return False
        return True

    def loadRuleFiles(self):
        try:
            if self.params.get("behavioral_importance", 0) > 0:
                self.rules["procmon"] = pprl.loadProcMonConf(
                    os.path.join(self.rulePath, 'procmon.conf'))
        except Exception as e:
            log.warning("ProcMon processor - %s, skip" % str(e))
            return False
        return True

    def run(self):
        """Run procmon csv processing.
        @return: list with matches.
        """
        matches = {}

        if not self.preConfigure():
            return matches
        if not self.loadRuleFiles():
            return matches

        log.info("Procmon CSV analysis")
        log.debug(self.params)

        if self.params.get("behavioral_importance", 0) > 0:
            matches = self.blacklistProcMon()
        log.debug(matches)
        return matches

    def splitPath(self, str):
        result = ntpath.split(str)
        if result[0] == str:
            result = path.split(str)
        return result

    def blacklistProcMon(self):
        objects = {}
        title = self.csvLog.next()
        multiplier = self.params.get("behavioral_importance", 0)
        for line in self.csvLog:
            for rule in self.rules["procmon"]:
                if line[3] == rule["operation"] and line[4] == rule["path"]:
                    cause = "%s-%s" % (line[3], line[4])
                    try:
                        objects[line[2]][
                            "summed_rating"] += rule["rating"] * multiplier
                        objects[line[2]][cause] += rule["rating"] * multiplier
                    except KeyError:
                        objects[line[2]] = {
                            "summed_rating": rule["rating"] * multiplier,
                            cause: rule["rating"] * multiplier
                        }
        return objects
コード例 #30
0
ファイル: plugins.py プロジェクト: kevross33/CAPEv2
class RunProcessing(object):
    """Analysis Results Processing Engine.

    This class handles the loading and execution of the processing modules.
    It executes the enabled ones sequentially and generates a dictionary which
    is then passed over the reporting engine.
    """
    def __init__(self, task, results):
        """@param task: task dictionary of the analysis to process."""
        self.task = task
        self.analysis_path = os.path.join(CUCKOO_ROOT, "storage", "analyses",
                                          str(task["id"]))
        self.cfg = Config("processing")
        self.cuckoo_cfg = Config()
        self.results = results

    def process(self, module):
        """Run a processing module.
        @param module: processing module to run.
        @return: results generated by module.
        """
        # Initialize the specified processing module.
        try:
            current = module(self.results)
        except:
            log.exception("Failed to load the processing module "
                          "\"{0}\":".format(module))
            return

        # Extract the module name.
        module_name = inspect.getmodule(current).__name__
        if "." in module_name:
            module_name = module_name.rsplit(".", 1)[1]

        try:
            options = self.cfg.get(module_name)
        except CuckooOperationalError:
            log.debug("Processing module %s not found in configuration file",
                      module_name)
            return None

        # If the processing module is disabled in the config, skip it.
        if not options.enabled:
            return None

        # Give it path to the analysis results.
        current.set_path(self.analysis_path)
        # Give it the analysis task object.
        current.set_task(self.task)
        # Give it the options from the relevant processing.conf section.
        current.set_options(options)

        try:
            # Run the processing module and retrieve the generated data to be
            # appended to the general results container.
            log.debug(
                "Executing processing module \"%s\" on analysis at "
                "\"%s\"", current.__class__.__name__, self.analysis_path)
            pretime = datetime.now()
            data = current.run()
            posttime = datetime.now()
            timediff = posttime - pretime
            self.results["statistics"]["processing"].append({
                "name":
                current.__class__.__name__,
                "time":
                float("%d.%03d" %
                      (timediff.seconds, timediff.microseconds / 1000)),
            })

            # If succeeded, return they module's key name and the data to be
            # appended to it.
            return {current.key: data}
        except CuckooDependencyError as e:
            log.warning(
                "The processing module \"%s\" has missing dependencies: %s",
                current.__class__.__name__, e)
        except CuckooProcessingError as e:
            log.warning(
                "The processing module \"%s\" returned the following "
                "error: %s", current.__class__.__name__, e)
        except:
            log.exception("Failed to run the processing module \"%s\":",
                          current.__class__.__name__)

        return None

    def run(self):
        """Run all processing modules and all signatures.
        @return: processing results.
        """

        # Order modules using the user-defined sequence number.
        # If none is specified for the modules, they are selected in
        # alphabetical order.
        processing_list = list_plugins(group="processing")

        # If no modules are loaded, return an empty dictionary.
        if processing_list:
            processing_list.sort(key=lambda module: module.order)

            # Run every loaded processing module.
            for module in processing_list:
                result = self.process(module)
                # If it provided some results, append it to the big results
                # container.
                if result:
                    self.results.update(result)
        else:
            log.info("No processing modules loaded")

        # For correct error log on webgui
        logs = os.path.join(self.analysis_path, "logs")
        if os.path.exists(logs):
            for file_name in os.listdir(logs):
                file_path = os.path.join(logs, file_name)

                if os.path.isdir(file_path):
                    continue
                # Skipping the current log file if it's too big.
                if os.stat(
                        file_path
                ).st_size > self.cuckoo_cfg.processing.analysis_size_limit:
                    if not hasattr(self.results, "debug"):
                        self.results.setdefault("debug", dict()).setdefault(
                            "errors", list())
                    self.results["debug"]["errors"].append(
                        "Behavioral log {0} too big to be processed, skipped. Increase analysis_size_limit in cuckoo.conf"
                        .format(file_name))
                    continue
        else:
            log.info(
                "Logs folder doesn't exist, maybe something with with analyzer folder, any change?"
            )

        family = ""
        self.results["malfamily_tag"] = ""
        # add detection based on suricata here
        if not family and "suricata" in self.results and "alerts" in self.results[
                "suricata"] and self.results["suricata"]["alerts"]:
            for alert in self.results["suricata"]["alerts"]:
                if "signature" in alert and alert["signature"]:
                    if alert["signature"].startswith("ET TROJAN") or alert[
                            "signature"].startswith("ETPRO TROJAN"):
                        words = re.findall(r"[A-Za-z0-9]+", alert["signature"])
                        famcheck = words[2]
                        famchecklower = famcheck.lower()
                        if famchecklower == "win32" or famchecklower == "w32" or famchecklower == "ransomware":
                            famcheck = words[3]
                            famchecklower = famcheck.lower()

                        blacklist = [
                            "executable",
                            "potential",
                            "likely",
                            "rogue",
                            "supicious",
                            "generic",
                            "possible",
                            "known",
                            "common",
                            "troj",
                            "trojan",
                            "team",
                            "probably",
                            "w2km",
                            "http",
                            "abuse",
                            "win32",
                            "unknown",
                            "single",
                            "filename",
                            "worm",
                            "fake",
                            "malicious",
                        ]
                        isgood = True
                        for black in blacklist:
                            if black == famchecklower:
                                isgood = False
                                break
                        if len(famcheck) < 4:
                            isgood = False
                        if isgood:
                            family = famcheck.title()
                            self.results["malfamily_tag"] = "Suricata"

        if not family and self.results["info"][
                "category"] == "file" and "virustotal" in self.results and "results" in self.results[
                    "virustotal"] and self.results["virustotal"]["results"]:
            detectnames = []
            for res in self.results["virustotal"]["results"]:
                if res["sig"] and "Trojan.Heur." not in res["sig"]:
                    # weight Microsoft's detection, they seem to be more accurate than the rest
                    if res["vendor"] == "Microsoft":
                        detectnames.append(res["sig"])
                    detectnames.append(res["sig"])
            family = get_vt_consensus(detectnames)
            self.results["malfamily_tag"] = "VirusTotal"

        # fall back to ClamAV detection
        if not family and self.results["info"][
                "category"] == "file" and "clamav" in self.results["target"][
                    "file"] and self.results["target"]["file"][
                        "clamav"] and self.results["target"]["file"][
                            "clamav"].startswith("Win.Trojan."):
            words = re.findall(r"[A-Za-z0-9]+",
                               self.results["target"]["file"]["clamav"])
            family = words[2]
            self.results["malfamily_tag"] = "ClamAV"

        if self.results.get("cape", False):
            self.results["malfamily"] = self.results["cape"]
            self.results["malfamily_tag"] = "CAPE"
        else:
            self.results["malfamily"] = family

        return self.results
コード例 #31
0
ファイル: plugins.py プロジェクト: zhzcsp/cuckoo
class RunAuxiliary(object):
    """Auxiliary modules manager."""

    def __init__(self, task, machine, guest_manager):
        self.task = task
        self.machine = machine
        self.guest_manager = guest_manager

        self.cfg = Config("auxiliary")
        self.enabled = []

    def start(self):
        for module in list_plugins(group="auxiliary"):
            try:
                current = module()
            except:
                log.exception("Failed to load the auxiliary module "
                              "\"{0}\":".format(module))
                return

            module_name = inspect.getmodule(current).__name__
            if "." in module_name:
                module_name = module_name.rsplit(".", 1)[1]

            try:
                options = self.cfg.get(module_name)
            except CuckooOperationalError:
                log.debug("Auxiliary module %s not found in "
                          "configuration file", module_name)
                continue

            if not options.enabled:
                continue

            current.set_task(self.task)
            current.set_machine(self.machine)
            current.set_guest_manager(self.guest_manager)
            current.set_options(options)

            try:
                current.start()
            except NotImplementedError:
                pass
            except CuckooDisableModule:
                continue
            except Exception as e:
                log.warning("Unable to start auxiliary module %s: %s",
                            module_name, e)
            else:
                log.debug("Started auxiliary module: %s",
                          current.__class__.__name__)
                self.enabled.append(current)

    def callback(self, name, *args, **kwargs):
        def default(*args, **kwargs):
            pass

        enabled = []
        for module in self.enabled:
            try:
                getattr(module, "cb_%s" % name, default)(*args, **kwargs)
            except NotImplementedError:
                pass
            except CuckooDisableModule:
                continue
            except Exception as e:
                log.warning(
                    "Error performing callback %r on auxiliary module %r: %s",
                    name, module.__class__.__name__, e
                )

            enabled.append(module)
        self.enabled = enabled

    def stop(self):
        for module in self.enabled:
            try:
                module.stop()
            except NotImplementedError:
                pass
            except Exception as e:
                log.warning("Unable to stop auxiliary module: %s", e)
            else:
                log.debug("Stopped auxiliary module: %s",
                          module.__class__.__name__)
コード例 #32
0
# Copyright (C) 2010-2014 Cuckoo Sandbox Developers.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.

import sys
import os
from django.conf import settings

sys.path.append(settings.CUCKOO_PATH)

from lib.cuckoo.common.constants import CUCKOO_ROOT
from lib.cuckoo.common.config import Config

cfg = Config(cfg=os.path.join(CUCKOO_ROOT, "conf", "reporting.conf")).mongodb

# Checks if mongo reporting is enabled in Cuckoo.
if not cfg.get("enabled"):
    raise Exception("Mongo reporting module is not enabled in cuckoo, aborting!")

# Get connection options from reporting.conf.
settings.MONGO_HOST = cfg.get("host", "127.0.0.1")
settings.MONGO_PORT = cfg.get("port", 27017)
コード例 #33
0
ファイル: plugins.py プロジェクト: scottydo/cuckoo
class RunReporting:
    """Reporting Engine.

    This class handles the loading and execution of the enabled reporting
    modules. It receives the analysis results dictionary from the Processing
    Engine and pass it over to the reporting modules before executing them.
    """

    def __init__(self, task_id, results):
        """@param analysis_path: analysis folder path."""
        self.task = Database().view_task(task_id).to_dict()
        self.results = results
        self.analysis_path = os.path.join(CUCKOO_ROOT, "storage", "analyses", str(task_id))
        self.cfg = Config("reporting")

    def process(self, module):
        """Run a single reporting module.
        @param module: reporting module.
        @param results: results results from analysis.
        """
        # Initialize current reporting module.
        try:
            current = module()
        except:
            log.exception("Failed to load the reporting module \"{0}\":".format(module))
            return

        # Extract the module name.
        module_name = inspect.getmodule(current).__name__
        if "." in module_name:
            module_name = module_name.rsplit(".", 1)[1]

        try:
            options = self.cfg.get(module_name)
        except CuckooOperationalError:
            log.debug("Reporting module %s not found in configuration file", module_name)
            return

        # If the reporting module is disabled in the config, skip it.
        if not options.enabled:
            return

        # Give it the path to the analysis results folder.
        current.set_path(self.analysis_path)
        # Give it the analysis task object.
        current.set_task(self.task)
        # Give it the the relevant reporting.conf section.
        current.set_options(options)
        # Load the content of the analysis.conf file.
        current.cfg = Config(cfg=current.conf_path)

        try:
            current.run(self.results)
            log.debug("Executed reporting module \"%s\"", current.__class__.__name__)
        except CuckooDependencyError as e:
            log.warning("The reporting module \"%s\" has missing dependencies: %s", current.__class__.__name__, e)
        except CuckooReportError as e:
            log.warning("The reporting module \"%s\" returned the following error: %s", current.__class__.__name__, e)
        except:
            log.exception("Failed to run the reporting module \"%s\":", current.__class__.__name__)

    def run(self):
        """Generates all reports.
        @raise CuckooReportError: if a report module fails.
        """
        # In every reporting module you can specify a numeric value that
        # represents at which position that module should be executed among
        # all the available ones. It can be used in the case where a
        # module requires another one to be already executed beforehand.
        reporting_list = list_plugins(group="reporting")

        # Return if no reporting modules are loaded.
        if reporting_list:
            reporting_list.sort(key=lambda module: module.order)

            # Run every loaded reporting module.
            for module in reporting_list:
                self.process(module)
        else:
            log.info("No reporting modules loaded")
コード例 #34
0
ファイル: startup.py プロジェクト: spresec/CAPE
def init_routing():
    """Initialize and check whether the routing information is correct."""
    cuckoo = Config()
    vpn = Config("vpn")

    # Check whether all VPNs exist if configured and make their configuration
    # available through the vpns variable. Also enable NAT on each interface.
    if vpn.vpn.enabled:
        for name in vpn.vpn.vpns.split(","):
            name = name.strip()
            if not name:
                continue

            if not hasattr(vpn, name):
                raise CuckooStartupError(
                    "Could not find VPN configuration for %s" % name
                )

            entry = vpn.get(name)
            #add = 1
            #if not rooter("nic_available", entry.interface):
            #raise CuckooStartupError(
            #   "The network interface that has been configured for "
            #    "VPN %s is not available." % entry.name
            #)
            #    add = 0
            if not rooter("rt_available", entry.rt_table):
                raise CuckooStartupError(
                    "The routing table that has been configured for "
                    "VPN %s is not available." % entry.name
                )
            vpns[entry.name] = entry

            # Disable & enable NAT on this network interface. Disable it just
            # in case we still had the same rule from a previous run.
            rooter("disable_nat", entry.interface)
            rooter("enable_nat", entry.interface)

            # Populate routing table with entries from main routing table.
            if cuckoo.routing.auto_rt:
                rooter("flush_rttable", entry.rt_table)
                rooter("init_rttable", entry.rt_table, entry.interface)

    # Check whether the default VPN exists if specified.
    if cuckoo.routing.route not in ("none", "internet", "tor", "inetsim"):
        if not vpn.vpn.enabled:
            raise CuckooStartupError(
                "A VPN has been configured as default routing interface for "
                "VMs, but VPNs have not been enabled in vpn.conf"
            )

        if cuckoo.routing.route not in vpns:
            raise CuckooStartupError(
                "The VPN defined as default routing target has not been "
                "configured in vpn.conf."
            )

    # Check whether the dirty line exists if it has been defined.
    if cuckoo.routing.internet != "none":
        if not rooter("nic_available", cuckoo.routing.internet):
            raise CuckooStartupError(
                "The network interface that has been configured as dirty "
                "line is not available."
            )

        if not rooter("rt_available", cuckoo.routing.rt_table):
            raise CuckooStartupError(
                "The routing table that has been configured for dirty "
                "line interface is not available."
            )

        # Disable & enable NAT on this network interface. Disable it just
        # in case we still had the same rule from a previous run.
        rooter("disable_nat", cuckoo.routing.internet)
        rooter("enable_nat", cuckoo.routing.internet)

        # Populate routing table with entries from main routing table.
        if cuckoo.routing.auto_rt:
            rooter("flush_rttable", cuckoo.routing.rt_table)
            rooter("init_rttable", cuckoo.routing.rt_table,
                   cuckoo.routing.internet)

    # Check if tor interface exists, if yes then enable nat
    if cuckoo.routing.tor and cuckoo.routing.tor_interface:
        if not rooter("nic_available", cuckoo.routing.tor_interface):
            raise CuckooStartupError(
                "The network interface that has been configured as tor "
                "line is not available."
            )

        # Disable & enable NAT on this network interface. Disable it just
        # in case we still had the same rule from a previous run.
        rooter("disable_nat", cuckoo.routing.tor_interface)
        rooter("enable_nat", cuckoo.routing.tor_interface)

        # Populate routing table with entries from main routing table.
        if cuckoo.routing.auto_rt:
            rooter("flush_rttable", cuckoo.routing.rt_table)
            rooter("init_rttable", cuckoo.routing.rt_table,
                   cuckoo.routing.internet)

    # Check if inetsim interface exists, if yes then enable nat, if interface is not the same as tor
    #if cuckoo.routing.inetsim_interface and cuckoo.routing.inetsim_interface !=  cuckoo.routing.tor_interface:
    # Check if inetsim interface exists, if yes then enable nat
    if cuckoo.routing.inetsim and cuckoo.routing.inetsim_interface:
        if not rooter("nic_available", cuckoo.routing.inetsim_interface):
            raise CuckooStartupError(
                "The network interface that has been configured as inetsim "
                "line is not available."
            )

        # Disable & enable NAT on this network interface. Disable it just
        # in case we still had the same rule from a previous run.
        rooter("disable_nat", cuckoo.routing.inetsim_interface)
        rooter("enable_nat", cuckoo.routing.inetsim_interface)

        # Populate routing table with entries from main routing table.
        if cuckoo.routing.auto_rt:
            rooter("flush_rttable", cuckoo.routing.rt_table)
            rooter("init_rttable", cuckoo.routing.rt_table,
                   cuckoo.routing.internet)
コード例 #35
0
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.

import sys
import os

# Cuckoo path.
CUCKOO_PATH = os.path.join(os.getcwd(), "..")
sys.path.append(CUCKOO_PATH)

from lib.cuckoo.common.config import Config

cfg = Config("reporting").mongodb

# Checks if mongo reporting is enabled in Cuckoo.
if not cfg.get("enabled"):
    raise Exception("Mongo reporting module is not enabled in cuckoo, aborting!")

# Get connection options from reporting.conf.
MONGO_HOST = cfg.get("host", "127.0.0.1")
MONGO_PORT = cfg.get("port", 27017)

DEBUG = True
TEMPLATE_DEBUG = DEBUG

# Database settings. We don't need it.
DATABASES = {}

SITE_ID = 1

# If you set this to False, Django will make some optimizations so as not
コード例 #36
0
ファイル: settings.py プロジェクト: CIRCL/cuckoo-modified
WEB_AUTHENTICATION = False

# Get connection options from reporting.conf.
MONGO_HOST = cfg.mongodb.get("host", "127.0.0.1")
MONGO_PORT = cfg.mongodb.get("port", 27017)
MONGO_DB = cfg.mongodb.get("db", "cuckoo")

ELASTIC_HOST = cfg.elasticsearchdb.get("host", "127.0.0.1")
ELASTIC_PORT = cfg.elasticsearchdb.get("port", 9200)
ELASTIC_INDEX = cfg.elasticsearchdb.get("index", "cuckoo")

moloch_cfg = Config("reporting").moloch
aux_cfg =  Config("auxiliary")
vtdl_cfg = Config("auxiliary").virustotaldl

MOLOCH_BASE = moloch_cfg.get("base", None)
MOLOCH_NODE = moloch_cfg.get("node", None)
MOLOCH_ENABLED = moloch_cfg.get("enabled", False)

GATEWAYS = aux_cfg.get("gateways")
VTDL_ENABLED = vtdl_cfg.get("enabled",False)
VTDL_PRIV_KEY = vtdl_cfg.get("dlprivkey",None)
VTDL_INTEL_KEY = vtdl_cfg.get("dlintelkey",None)
VTDL_PATH = vtdl_cfg.get("dlpath",None)

TEMP_PATH = Config().cuckoo.get("tmppath", "/tmp")

ipaddy_re = re.compile(r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$")

if GATEWAYS:
    GATEWAYS_IP_MAP = {}
コード例 #37
0
import sys
import os
import json
from django.conf import settings

sys.path.append(settings.CUCKOO_PATH)

from lib.cuckoo.common.constants import CUCKOO_ROOT
from lib.cuckoo.common.config import Config

cfg = Config(cfg=os.path.join(CUCKOO_ROOT, "conf", "reporting.conf")).mongodb
moloch_cfg = Config(cfg=os.path.join(CUCKOO_ROOT, "conf", "reporting.conf")).moloch
aux_cfg =  Config(cfg=os.path.join(CUCKOO_ROOT, "conf", "auxiliary.conf"))
vtdl_cfg = Config(cfg=os.path.join(CUCKOO_ROOT, "conf", "auxiliary.conf")).virustotaldl
# Checks if mongo reporting is enabled in Cuckoo.
if not cfg.get("enabled"):
    raise Exception("Mongo reporting module is not enabled in cuckoo, aborting!")

# Get connection options from reporting.conf.
settings.MONGO_HOST = cfg.get("host", "127.0.0.1")
settings.MONGO_PORT = cfg.get("port", 27017)

settings.MONGO_PORT = cfg.get("port", 27017)
settings.MOLOCH_BASE = moloch_cfg.get("base", None)
settings.MOLOCH_NODE = moloch_cfg.get("node", None)
settings.MOLOCH_ENABLED = moloch_cfg.get("enabled", False)

settings.GATEWAYS = aux_cfg.get("gateways")
settings.VTDL_ENABLED = vtdl_cfg.get("enabled",False)
settings.VTDL_KEY = vtdl_cfg.get("dlkey",None)
settings.VTDL_PATH = vtdl_cfg.get("dlpath",None)
コード例 #38
0
ファイル: memory.py プロジェクト: sunacha3/cuckoo
class VolatilityManager(object):
    """Handle several volatility results."""
    PLUGINS = [
        "pslist",
        "psxview",
        "callbacks",
        "idt",
        "ssdt",
        "gdt",
        "timers",
        "messagehooks",
        "getsids",
        "privs",
        "malfind",
        "apihooks",
        "dlllist",
        "handles",
        "ldrmodules",
        "mutantscan",
        "devicetree",
        "svcscan",
        "modscan",
        "yarascan",
    ]

    def __init__(self, memfile, osprofile=None):
        self.mask_pid = []
        self.taint_pid = set()
        self.memfile = memfile

        conf_path = os.path.join(CUCKOO_ROOT, "conf", "memory.conf")
        if not os.path.exists(conf_path):
            log.error("Configuration file volatility.conf not found".format(
                conf_path))
            self.voptions = False
            return

        self.voptions = Config("memory")

        for pid in self.voptions.mask.pid_generic.split(","):
            pid = pid.strip()
            if pid:
                self.mask_pid.append(int(pid))

        self.no_filter = not self.voptions.mask.enabled
        if self.voptions.basic.guest_profile:
            self.osprofile = self.voptions.basic.guest_profile
        else:
            self.osprofile = osprofile or self.get_osprofile()

    def get_osprofile(self):
        """Get the OS profile"""
        return VolatilityAPI(self.memfile).imageinfo()["data"][0]["osprofile"]

    def run(self):
        results = {}

        # Exit if options were not loaded.
        if not self.voptions:
            return

        vol = VolatilityAPI(self.memfile, self.osprofile)

        for plugin_name in self.PLUGINS:
            plugin = self.voptions.get(plugin_name)
            if not plugin or not plugin.enabled:
                log.debug("Skipping '%s' volatility module", plugin_name)
                continue

            if plugin_name in vol.plugins:
                log.debug("Executing volatility '%s' module.", plugin_name)
                results[plugin_name] = getattr(vol, plugin_name)()

        self.find_taint(results)
        self.cleanup()

        return self.mask_filter(results)

    def mask_filter(self, old):
        """Filter out masked stuff. Keep tainted stuff."""
        new = {}

        for akey in old.keys():
            new[akey] = {"config": old[akey]["config"], "data": []}
            conf = getattr(self.voptions, akey, None)
            new[akey]["config"]["filter"] = conf.filter
            for item in old[akey]["data"]:
                # TODO: need to improve this logic.
                if not conf.filter:
                    new[akey]["data"].append(item)
                elif "process_id" in item and \
                        item["process_id"] in self.mask_pid and \
                        item["process_id"] not in self.taint_pid:
                    pass
                else:
                    new[akey]["data"].append(item)
        return new

    def find_taint(self, res):
        """Find tainted items."""
        if "malfind" in res:
            for item in res["malfind"]["data"]:
                self.taint_pid.add(item["process_id"])

    def cleanup(self):
        """Delete the memory dump (if configured to do so)."""

        if self.voptions.basic.delete_memdump:
            try:
                os.remove(self.memfile)
            except OSError:
                log.error("Unable to delete memory dump file at path \"%s\" ",
                          self.memfile)
コード例 #39
0
class RunProcessing(object):
    """Analysis Results Processing Engine.

    This class handles the loading and execution of the processing modules.
    It executes the enabled ones sequentially and generates a dictionary which
    is then passed over the reporting engine.
    """
    def __init__(self, task, results):
        """@param task: task dictionary of the analysis to process."""
        self.task = task
        self.analysis_path = os.path.join(CUCKOO_ROOT, "storage", "analyses",
                                          str(task["id"]))
        self.cfg = Config("processing")
        self.cuckoo_cfg = Config()
        self.results = results

    def process(self, module):
        """Run a processing module.
        @param module: processing module to run.
        @return: results generated by module.
        """
        # Initialize the specified processing module.
        try:
            current = module(self.results)
        except:
            log.exception("Failed to load the processing module "
                          '"{0}":'.format(module))
            return

        # Extract the module name.
        module_name = inspect.getmodule(current).__name__
        if "." in module_name:
            module_name = module_name.rsplit(".", 1)[1]

        try:
            options = self.cfg.get(module_name)
        except CuckooOperationalError:
            log.debug("Processing module %s not found in configuration file",
                      module_name)
            return None

        # If the processing module is disabled in the config, skip it.
        if not options.enabled:
            return None

        # Give it path to the analysis results.
        current.set_path(self.analysis_path)
        # Give it the analysis task object.
        current.set_task(self.task)
        # Give it the options from the relevant processing.conf section.
        current.set_options(options)

        try:
            # Run the processing module and retrieve the generated data to be
            # appended to the general results container.
            log.debug(
                'Executing processing module "%s" on analysis at '
                '"%s"', current.__class__.__name__, self.analysis_path)
            pretime = datetime.now()
            data = current.run()
            posttime = datetime.now()
            timediff = posttime - pretime
            self.results["statistics"]["processing"].append({
                "name":
                current.__class__.__name__,
                "time":
                float("%d.%03d" %
                      (timediff.seconds, timediff.microseconds / 1000))
            })

            # If succeeded, return they module's key name and the data to be
            # appended to it.
            return {current.key: data}
        except CuckooDependencyError as e:
            log.warning(
                'The processing module "%s" has missing dependencies: %s',
                current.__class__.__name__, e)
        except CuckooProcessingError as e:
            log.warning(
                'The processing module "%s" returned the following '
                "error: %s", current.__class__.__name__, e)
        except:
            log.exception('Failed to run the processing module "%s":',
                          current.__class__.__name__)

        return None

    def run(self):
        """Run all processing modules and all signatures.
        @return: processing results.
        """

        # Used for cases where we need to add time of execution between modules
        self.results["temp_processing_stats"] = {}
        # Order modules using the user-defined sequence number.
        # If none is specified for the modules, they are selected in
        # alphabetical order.
        processing_list = list_plugins(group="processing")

        # If no modules are loaded, return an empty dictionary.
        if processing_list:
            processing_list.sort(key=lambda module: module.order)

            # Run every loaded processing module.
            for module in processing_list:
                result = self.process(module)
                # If it provided some results, append it to the big results
                # container.
                if result:
                    self.results.update(result)
        else:
            log.info("No processing modules loaded")

        # Add temp_processing stats to global processing stats
        if self.results["temp_processing_stats"]:
            for plugin_name in self.results["temp_processing_stats"]:
                self.results["statistics"]["processing"].append({
                    "name":
                    plugin_name,
                    "time":
                    self.results["temp_processing_stats"][plugin_name].get(
                        "time", 0)
                })

        del self.results["temp_processing_stats"]

        # For correct error log on webgui
        logs = os.path.join(self.analysis_path, "logs")
        if os.path.exists(logs):
            for file_name in os.listdir(logs):
                file_path = os.path.join(logs, file_name)

                if os.path.isdir(file_path):
                    continue
                # Skipping the current log file if it's too big.
                if os.stat(
                        file_path
                ).st_size > self.cuckoo_cfg.processing.analysis_size_limit:
                    if not hasattr(self.results, "debug"):
                        self.results.setdefault("debug", dict()).setdefault(
                            "errors", list())
                    self.results["debug"]["errors"].append(
                        "Behavioral log {0} too big to be processed, skipped. Increase analysis_size_limit in cuckoo.conf"
                        .format(file_name))
                    continue
        else:
            log.info(
                "Logs folder doesn't exist, maybe something with with analyzer folder, any change?"
            )

        family = ""
        self.results["malfamily_tag"] = ""
        if self.cfg.detections.enabled:
            if self.results.get("detections",
                                False) and self.cfg.detections.yara:
                family = self.results["detections"]
                self.results["malfamily_tag"] = "Yara"
            elif self.cfg.detections.suricata and not family and self.results.get(
                    "suricata", {}).get("alerts", []):
                for alert in self.results["suricata"]["alerts"]:
                    if alert.get("signature",
                                 "") and alert["signature"].startswith(
                                     (et_categories)):
                        family = get_suricata_family(alert["signature"])
                        if family:
                            self.results["malfamily_tag"] = "Suricata"
                            self.results["detections"] = family

            elif self.cfg.detections.virustotal and not family and self.results[
                    "info"]["category"] == "file" and self.results.get(
                        "virustotal", {}).get("detection"):
                family = self.results["virustotal"]["detection"]
                self.results["malfamily_tag"] = "VirusTotal"

            # fall back to ClamAV detection
            elif self.cfg.detections.clamav and not family and self.results[
                    "info"]["category"] == "file" and self.results.get(
                        "target", {}).get("file", {}).get("clamav"):
                for detection in self.results["target"]["file"]["clamav"]:
                    if detection.startswith("Win.Trojan."):
                        words = re.findall(r"[A-Za-z0-9]+", detection)
                        family = words[2]
                        self.results["malfamily_tag"] = "ClamAV"

            if family:
                self.results["detections"] = family

        return self.results
コード例 #40
0
ファイル: settings.py プロジェクト: tdzmont/cuckoo-modified
CUCKOO_PATH = os.path.join(os.getcwd(), "..")
sys.path.append(CUCKOO_PATH)

from lib.cuckoo.common.config import Config

cfg = Config("reporting").mongodb
moloch_cfg = Config("reporting").moloch
aux_cfg =  Config("auxiliary")
vtdl_cfg = Config("auxiliary").virustotaldl
tor_cfg = Config("auxiliary").tor
ie_martians_cfg = Config("auxiliary").iemartiansinwebui
display_zmon_cfg = Config("auxiliary").displayzmoninwebui
display_shrike_cfg = Config("auxiliary").displayshrikeinwebui
display_et_portal_cfg = Config("auxiliary").displayetportalinwebui
# Checks if mongo reporting is enabled in Cuckoo.
if not cfg.get("enabled"):
    raise Exception("Mongo reporting module is not enabled in cuckoo, aborting!")

# Get connection options from reporting.conf.
MONGO_HOST = cfg.get("host", "127.0.0.1")
MONGO_PORT = cfg.get("port", 27017)
MONGO_DB = cfg.get("db", "cuckoo")

MOLOCH_BASE = moloch_cfg.get("base", None)
MOLOCH_NODE = moloch_cfg.get("node", None)
MOLOCH_ENABLED = moloch_cfg.get("enabled", False)

GATEWAYS = aux_cfg.get("gateways")
DISPLAY_IE_MARTIANS = ie_martians_cfg.get("enabled", False)
VTDL_ENABLED = vtdl_cfg.get("enabled",False)
VTDL_PRIV_KEY = vtdl_cfg.get("dlprivkey",None)
コード例 #41
0
class RunProcessing(object):
    """Analysis Results Processing Engine.

    This class handles the loading and execution of the processing modules.
    It executes the enabled ones sequentially and generates a dictionary which
    is then passed over the reporting engine.
    """
    def __init__(self, task_id):
        """@param task_id: ID of the analyses to process."""
        self.task = Database().view_task(task_id).to_dict()
        self.task_id = task_id
        self.analysis_path = os.path.join(CUCKOO_ROOT, "storage", "analyses",
                                          str(task_id))
        self.cfg = Config("processing")

    def process(self, module):
        """Run a processing module.
        @param module: processing module to run.
        @param results: results dict.
        @return: results generated by module.
        """
        # Initialize the specified processing module.
        try:
            current = module()
        except:
            log.exception("Failed to load the processing module "
                          "\"{0}\":".format(module))
            return

        # Extract the module name.
        module_name = inspect.getmodule(current).__name__
        if "." in module_name:
            module_name = module_name.rsplit(".", 1)[1]

        try:
            options = self.cfg.get(module_name)
        except CuckooOperationalError:
            log.debug("Processing module %s not found in configuration file",
                      module_name)
            return None

        # If the processing module is disabled in the config, skip it.
        if not options.enabled:
            return None

        # Give it path to the analysis results.
        current.set_path(self.analysis_path)
        # Give it the analysis task object.
        current.set_task(self.task)
        # Give it the options from the relevant processing.conf section.
        current.set_options(options)

        try:
            # Run the processing module and retrieve the generated data to be
            # appended to the general results container.
            data = current.run()

            log.debug(
                "Executed processing module \"%s\" on analysis at "
                "\"%s\"", current.__class__.__name__, self.analysis_path)

            # If succeeded, return they module's key name and the data to be
            # appended to it.
            return {current.key: data}
        except CuckooDependencyError as e:
            log.warning(
                "The processing module \"%s\" has missing dependencies: %s",
                current.__class__.__name__, e)
        except CuckooProcessingError as e:
            log.warning(
                "The processing module \"%s\" returned the following "
                "error: %s", current.__class__.__name__, e)
        except:
            log.exception("Failed to run the processing module \"%s\":",
                          current.__class__.__name__)

        return None

    def run(self):
        """Run all processing modules and all signatures.
        @return: processing results.
        """
        # This is the results container. It's what will be used by all the
        # reporting modules to make it consumable by humans and machines.
        # It will contain all the results generated by every processing
        # module available. Its structure can be observed through the JSON
        # dump in the analysis' reports folder. (If jsondump is enabled.)
        # We friendly call this "fat dict".
        results = {}

        Database().set_statistics_time(self.task_id, PROCESSING_STARTED)
        # Order modules using the user-defined sequence number.
        # If none is specified for the modules, they are selected in
        # alphabetical order.
        processing_list = list_plugins(group="processing")

        # If no modules are loaded, return an empty dictionary.
        if processing_list:
            processing_list.sort(key=lambda module: module.order)

            # Run every loaded processing module.
            for module in processing_list:
                result = self.process(module)
                # If it provided some results, append it to the big results
                # container.
                if result:
                    results.update(result)
        else:
            log.info("No processing modules loaded")

        Database().set_statistics_time(self.task_id, PROCESSING_FINISHED)

        if "behavior" in results and "enhanced" in results["behavior"]:
            regwrite = 0
            filewrite = 0
            for entry in results["behavior"]["enhanced"]:
                if entry["object"] == "registry" and entry["event"] == "write":
                    regwrite += 1
                if entry["object"] == "file" and entry["event"] == "write":
                    filewrite += 1
            Database().set_statistics_counter(self.task_id, FILES_WRITTEN,
                                              filewrite)
            Database().set_statistics_counter(self.task_id,
                                              REGISTRY_KEYS_MODIFIED, regwrite)

        if "behavior" in results and "summary" in results[
                "behavior"] and "files" in results["behavior"]["summary"]:
            Database().set_statistics_counter(
                self.task_id, DROPPED_FILES,
                len(results["behavior"]["summary"]["files"]))

        if "behavior" in results and "processes" in results["behavior"]:
            Database().set_statistics_counter(
                self.task_id, RUNNING_PROCESSES,
                len(results["behavior"]["processes"]))
            api_calls = 0
            for process in results["behavior"]["processes"]:
                for call in process["calls"]:
                    api_calls += 1
            Database().set_statistics_counter(self.task_id, API_CALLS,
                                              api_calls)

        if "network" in results and "domains" in results["network"]:
            Database().set_statistics_counter(
                self.task_id, ACCESSED_DOMAINS,
                len(results["network"]["domains"]))

        # Return the fat dict.
        return results
コード例 #42
0
ファイル: web.py プロジェクト: EmergingThreats/cuckoo
        return 1
    return 0


# Templating engine.
env = Environment()
env.loader = FileSystemLoader(os.path.join(CUCKOO_ROOT, "data", "html"))
# Global db pointer.
db = Database()
redirectors=[]
rddict={}
pools=[]

try:
    pcfg = Config(cfg=os.path.join(CUCKOO_ROOT,"conf","redirectors.conf"))
    rddict = pcfg.get('redirectors')
    for entry in rddict:
        redirectors.append(entry)
except:
    redirectors = None
    print("failed to get redirectors")

try:
    pcfg = Config(cfg=os.path.join(CUCKOO_ROOT,"conf","processing.conf"))
    suricfg = pcfg.get('suricata')
    molochcfg = pcfg.get('network-moloch')
except:
    suricfg = None
    molochcfg = None
    print("failed to get suri/moloch config blocks")
コード例 #43
0
class VolatilityAnalysis(Processing):
    """Volatility memory dump analysis."""
    volatilityConfig = None
    analysisConfig = None
    rulePath = os.path.join(CUCKOO_ROOT, 'conf', 'volatility')
    params = {
        "rating_whitelist" : 0.5,
        "rating_services" : 0.5,
        "rating_hidden" : 1.5,
        "rating_orphan" : 1.5,
        "rating_api_unknown" : 1.5,
        "rating_api_known" : 0.5,
        "rating_malfind_pe" : 1.5,
        "rating_malfind" : 0.5,
        "none" : 0
    }

    tagToRating = {
        "connected_processes" : "rating_whitelist",
        "running_services" : "rating_services",
        "hidden_processes" : "rating_hidden",
        "orphan_threads" : "rating_orphan",
        "api_hooks_unknown" : "rating_api_unknown",
        "api_hooks_known" : "rating_api_known",
        "malfind_executable" : "rating_malfind_pe",
        "malfind_no_executable" : "rating_malfind",
        "none" : "none"
    }

    rules = {}

    def __init__(self):
        self.volatilityConfig = conf.ConfObject()
        self.volatilityConfig.final = True
        self.volatilityConfig.verbose = False
        cache.disable_caching(None, None, None, None)
        MemoryRegistry.Init()
        self.key = "volatility"

    def preConfigure(self):
        '''
        Checks if the plugin dependencies are satisfied.
        Loads the rating configuration and the analysis configuration.
        '''
        if len(MISSING_DEPENDENCIES) > 0:
            log.warning("Dependencies missing: %s, skip" % ','.join(MISSING_DEPENDENCIES))
            return False

        memdumpPath = os.path.abspath(os.path.join(self.analysis_path, 'post.memdump'))
        if not os.path.isfile(memdumpPath):
            log.warning("Memory dump '%s' not found for Volatility, skip" % memdumpPath)
            return False
        else:
            self.volatilityConfig.LOCATION = "file://%s" % memdumpPath

        if not self.setExternalParameters():
            return False

        if not self.setVolatilityProfile():
            log.warning("Couldn't determine which volatility profile to use, skip")
            return False

        return True

    def setExternalParameters(self):
        '''
        Configures the plugin ratings and any other parameters which were passed.
        Uses two sources - the ratings.conf file and the tasks custom field (dictionary dumped as a json).
        '''
        try:
            self.analysisConfig = CuckooConfig(self.conf_path)
            ratingsConf = CuckooConfig(os.path.join(self.rulePath, "ratings.conf"))
            for rating in ratingsConf.get("ratings").iteritems():
                self.params[rating[0]] = rating[1]
        except Exception as e:
            log.warning("Preconfigure - %s" % str(e))
            return False

        try:
            params = json.loads(self.analysisConfig.get("analysis").get("custom"))
            for param in params.iteritems():
                self.params[param[0]] = param[1]
        except ValueError as e:
            if self.analysisConfig.get("analysis").get("custom") != "None":
                log.warning("Couldn't load json object from custom, skip")
                return False
        return True

    def loadRuleFiles(self):
        try:
            if self.params.get("rating_whitelist", 0) > 0:
                self.rules["connected_processes"] = vprl.loadConnectedProcessesConf(os.path.join(self.rulePath, 'connected_processes.conf'))
            if self.params.get("rating_services", 0) > 0:
                self.rules["running_services"] = vprl.loadRunningServicesConf(os.path.join(self.rulePath, 'running_services.conf'))
            if self.params.get("rating_api_unknown", 0) > 0 or self.params.get("rating_api_known", 0) > 0:
                self.rules["api_hooks"] = vprl.loadApiHooksConf(os.path.join(self.rulePath, 'api_hooks.conf'))
        except Exception as e:
            log.warning("Volatility processor - %s, skip" % str(e))
            return False
        return True

    def run(self):
        """Run volatility processing.
        @return: list with matches.
        """
        matches = { 'connected_processes':None,
                'running_services': None,
                'hidden_processes': None,
                'orphan_threads': None,
                'api_hooks':None,
                'malfind': None
            }

        if not self.preConfigure():
            return {}
        if not self.loadRuleFiles():
            return {}

        log.info("Volatile Systems Volatility Framework {0} - cuckoo processor\n".format(constants.VERSION))
        log.debug(self.params)

        if self.params.get("rating_whitelist", 0) > 0:
            matches['connected_processes'] = self.heuristicConnectedProcesses()
        if self.params.get("rating_services", 0) > 0:
            matches['running_services'] = self.heuristicRunningServices()
        if self.params.get("rating_hidden", 0) > 0:
            matches['hidden_processes'] = self.heuristicHiddenProcesses()
        if self.params.get("rating_orphan", 0) > 0:
            matches['orphan_threads'] = self.heuristicOrphanThreads()
        if self.params.get("rating_api_unknown", 0) > 0 or self.params.get("rating_api_known", 0) > 0:
            matches['api_hooks'] = self.heuristicApiHooks()
        if self.params.get("rating_malfind", 0) > 0 or self.params.get("rating_malfind_pe", 0) > 0:
            matches['malfind'] = self.heuristicMalfind()
        matches = self.combineResultsForPids(matches)
        log.debug(matches)
        return matches

    def setVolatilityProfile(self):
        '''
        Gets called in order to set the profile which will be used when processing the dump file.
        If a profile was supplied and it exists then the profile is set and the function returns.
        If a profile wasn't supplied or it doesn't exist then detection if performed by calling self.detectSystem. 
        @return: True if successfully determined and set the profile and otherwise False.
        '''
        if self.params:
            profile = self.params.get('operating_system')
        else:
            profile = None

        if profile == 'None':
            profile = None

        if profile is not None and MemoryRegistry.PROFILES.objects.get(profile) is None:
            log.warning("Specified profile '%s' not found. Attempting to detect profile." % profile)
            profile = None

        if profile is None:
            profile = self.detectSystem()

        self.volatilityConfig.PROFILE = profile

        if profile is None:
            return False
        return True

    def detectSystem(self):
        '''
        Attempts to identify the profile to use for the supplied dump file.
        Uses the imageinfo command in order to determine the profile.
        @return: True if successfully determined and set the profile and otherwise False.
        '''
        profile = None
        result = self.runModule("imageinfo")
        profileSearch = re.compile(r"(\w+)")
        for line in result:
            if line[0] == "Suggested Profile(s)":
                match = profileSearch.match(line[1])
                break
        if match is not None:
            profile = match.group(1)
            if MemoryRegistry.PROFILES.objects.get(profile) is None:
                profile = None
        return profile

    def splitPath(self, str):
        result = ntpath.split(str)
        if result[0] == str:
            result = path.split(str)
        return result

    def runModule(self, module, method = "calculate"):
        log.debug("Attempting to run %s" % module)
        try:
            if module in MemoryRegistry.PLUGIN_COMMANDS.commands:
                command = MemoryRegistry.PLUGIN_COMMANDS.commands[module](self.volatilityConfig)
                self.volatilityConfig.parse_options()
                if method:
                    return getattr(command, method)()
                else:
                    return command
        except exceptions.VolatilityException, e:
            log.error(e)
コード例 #44
0
class Pcap:
    """Reads network data from PCAP file."""

    def __init__(self, filepath):
        """Creates a new instance.
        @param filepath: path to PCAP file
        """
        self.filepath = filepath

        # List of all hosts.
        self.hosts = []
        # List containing all non-private IP addresses.
        self.unique_hosts = []
        # List of unique domains.
        self.unique_domains = []
        # List containing all TCP packets.
        self.tcp_connections = []
        self.tcp_connections_seen = set()
        # List containing all UDP packets.
        self.udp_connections = []
        self.udp_connections_seen = set()
        # List containing all ICMP requests.
        self.icmp_requests = []
        # List containing all HTTP requests.
        self.http_requests = OrderedDict()
        # List containing all DNS requests.
        self.dns_requests = OrderedDict()
        self.dns_answers = set()
        # List containing all SMTP requests.
        self.smtp_requests = []
        # Reconstruncted SMTP flow.
        self.smtp_flow = {}
        # List containing all IRC requests.
        self.irc_requests = []
        # Dictionary containing all the results of this processing.
        self.results = {}
        # Config
        self.config = Config()
        self.cfg = Config("dumi")
        db_cfg = self.cfg.get("dbAnalyzer")
        self.cmd_path = os.path.join(UTILS_ROOT, db_cfg.cmd_path)

        ccdm_cfg = self.cfg.get("ccdmAnalyzer")
        markov_path = os.path.join(CONTENT_ROOT, ccdm_cfg.markov_path)
        tld_path = os.path.join(CONTENT_ROOT, ccdm_cfg.tld_path)
        ngram_path = os.path.join(CONTENT_ROOT, ccdm_cfg.ngram_path)
        hmm_path = os.path.join(CONTENT_ROOT, ccdm_cfg.hmm_path)
        svm_path = os.path.join(CONTENT_ROOT, ccdm_cfg.svm_path)
        self.libccdm = cdll.LoadLibrary(os.path.join(UTILS_ROOT, "ccdm/libccdm.so"))
        self.libccdm.pecker_ccdm_init(markov_path, tld_path, ngram_path, hmm_path, svm_path)

    def _ip_domain_check(self, target, category):
        cmd = self.cmd_path + " --query-" + category + " " + target
        ret = os.popen(cmd)
        result = ret.read().strip('\n').split(" ")
        if category in "ip" or result[0] in "1":
            return {"dbAnalyzer": result}

        result = str(self.libccdm.pecker_ccdm_match(target)).split()
        return {"ccdmAnalyzer": result}

    def _dns_gethostbyname(self, name):
        """Get host by name wrapper.
        @param name: hostname.
        @return: IP address or blank
        """
        if self.config.processing.resolve_dns:
            ip = resolve(name)
        else:
            ip = ""
        return ip

    def _is_private_ip(self, ip):
        """Check if the IP belongs to private network blocks.
        @param ip: IP address to verify.
        @return: boolean representing whether the IP belongs or not to
                 a private network block.
        """
        networks = [
            ("0.0.0.0", 8),
            ("10.0.0.0", 8),
            ("100.64.0.0", 10),
            ("127.0.0.0", 8),
            ("169.254.0.0", 16),
            ("172.16.0.0", 12),
            ("192.0.0.0", 24),
            ("192.0.2.0", 24),
            ("192.88.99.0", 24),
            ("192.168.0.0", 16),
            ("198.18.0.0", 15),
            ("198.51.100.0", 24),
            ("203.0.113.0", 24),
            ("240.0.0.0", 4),
            ("255.255.255.255", 32),
            ("224.0.0.0", 4),
            ("10.14.24.1", 24)
        ]

        try:
            ipaddr = struct.unpack(">I", socket.inet_aton(ip))[0]
            for netaddr, bits in networks:
                network_low = struct.unpack(">I", socket.inet_aton(netaddr))[0]
                network_high = network_low | (1 << (32 - bits)) - 1
                if ipaddr <= network_high and ipaddr >= network_low:
                    return True
        except:
            pass

        return False

    def _get_cn(self, ip):
        cn = "unknown"
        log = logging.getLogger("Processing.Pcap")
        if IS_GEOIP:
            try:
                temp_cn = gi.country_name_by_addr(ip)
                if temp_cn:
                    cn = temp_cn
            except:
                log.error("Unable to GEOIP resolve %s" % ip)
        return cn

    def _add_hosts(self, connection):
        """Add IPs to unique list.
        @param connection: connection data
        """
        try:
            if connection["dst"] not in self.hosts:
                ip = convert_to_printable(connection["dst"])

                if ip not in self.hosts:
                    self.hosts.append(ip)

                    # We add external IPs to the list, only the first time
                    # we see them and if they're the destination of the
                    # first packet they appear in.
                    if not self._is_private_ip(ip):
                        self.unique_hosts.append(ip)
        except:
            pass

    def _enrich_hosts(self, unique_hosts):
        enriched_hosts = []

        if self.config.processing.reverse_dns:
            d = dns.resolver.Resolver()
            d.timeout = 5.0
            d.lifetime = 5.0

        while unique_hosts:
            ip = unique_hosts.pop()
            inaddrarpa = ""
            hostname = ""
            if self.config.processing.reverse_dns:
                try:
                    inaddrarpa = d.query(from_address(ip), "PTR").rrset[0].to_text()
                except:
                    pass
            for request in self.dns_requests.values():
                for answer in request['answers']:
                    if answer["data"] == ip:
                        hostname = request["request"]
                        break
                if hostname:
                    break

            enriched_hosts.append({"ip": ip, "country_name": self._get_cn(ip),
                                   "hostname": hostname, "inaddrarpa": inaddrarpa})
        return enriched_hosts

    def _tcp_dissect(self, conn, data):
        """Runs all TCP dissectors.
        @param conn: connection.
        @param data: payload data.
        """
        if self._check_http(data):
            self._add_http(conn, data)
        # SMTP.
        if conn["dport"] == 25 or conn["dport"] == 587:
            self._reassemble_smtp(conn, data)
        # IRC.
        if conn["dport"] != 21 and self._check_irc(data):
            self._add_irc(conn, data)

    def _udp_dissect(self, conn, data):
        """Runs all UDP dissectors.
        @param conn: connection.
        @param data: payload data.
        """
        # Select DNS and MDNS traffic.
        if conn["dport"] == 53 or conn["sport"] == 53 or conn["dport"] == 5353 or conn["sport"] == 5353:
            if self._check_dns(data):
                self._add_dns(data)

    def _check_icmp(self, icmp_data):
        """Checks for ICMP traffic.
        @param icmp_data: ICMP data flow.
        """
        try:
            return isinstance(icmp_data, dpkt.icmp.ICMP) and \
                   len(icmp_data.data) > 0
        except:
            return False

    def _icmp_dissect(self, conn, data):
        """Runs all ICMP dissectors.
        @param conn: connection.
        @param data: payload data.
        """

        if self._check_icmp(data):
            # If ICMP packets are coming from the host, it probably isn't
            # relevant traffic, hence we can skip from reporting it.
            if conn["src"] == self.config.resultserver.ip:
                return

            entry = {}
            entry["src"] = conn["src"]
            entry["dst"] = conn["dst"]
            entry["type"] = data.type

            # Extract data from dpkg.icmp.ICMP.
            try:
                entry["data"] = convert_to_printable(data.data.data)
            except:
                entry["data"] = ""

            self.icmp_requests.append(entry)

    def _check_dns(self, udpdata):
        """Checks for DNS traffic.
        @param udpdata: UDP data flow.
        """
        try:
            dpkt.dns.DNS(udpdata)
        except:
            return False

        return True

    def _add_dns(self, udpdata):
        """Adds a DNS data flow.
        @param udpdata: UDP data flow.
        """
        dns = dpkt.dns.DNS(udpdata)

        # DNS query parsing.
        query = {}

        if dns.rcode == dpkt.dns.DNS_RCODE_NOERR or \
                        dns.qr == dpkt.dns.DNS_R or \
                        dns.opcode == dpkt.dns.DNS_QUERY or True:
            # DNS question.
            try:
                q_name = dns.qd[0].name
                q_type = dns.qd[0].type
            except IndexError:
                return False

            query["request"] = q_name
            if q_type == dpkt.dns.DNS_A:
                query["type"] = "A"
            if q_type == dpkt.dns.DNS_AAAA:
                query["type"] = "AAAA"
            elif q_type == dpkt.dns.DNS_CNAME:
                query["type"] = "CNAME"
            elif q_type == dpkt.dns.DNS_MX:
                query["type"] = "MX"
            elif q_type == dpkt.dns.DNS_PTR:
                query["type"] = "PTR"
            elif q_type == dpkt.dns.DNS_NS:
                query["type"] = "NS"
            elif q_type == dpkt.dns.DNS_SOA:
                query["type"] = "SOA"
            elif q_type == dpkt.dns.DNS_HINFO:
                query["type"] = "HINFO"
            elif q_type == dpkt.dns.DNS_TXT:
                query["type"] = "TXT"
            elif q_type == dpkt.dns.DNS_SRV:
                query["type"] = "SRV"

            # DNS answer.
            query["answers"] = []
            for answer in dns.an:
                ans = {}
                if answer.type == dpkt.dns.DNS_A:
                    ans["type"] = "A"
                    try:
                        ans["data"] = socket.inet_ntoa(answer.rdata)
                    except socket.error:
                        continue
                elif answer.type == dpkt.dns.DNS_AAAA:
                    ans["type"] = "AAAA"
                    try:
                        ans["data"] = socket.inet_ntop(socket.AF_INET6,
                                                       answer.rdata)
                    except (socket.error, ValueError):
                        continue
                elif answer.type == dpkt.dns.DNS_CNAME:
                    ans["type"] = "CNAME"
                    ans["data"] = answer.cname
                elif answer.type == dpkt.dns.DNS_MX:
                    ans["type"] = "MX"
                    ans["data"] = answer.mxname
                elif answer.type == dpkt.dns.DNS_PTR:
                    ans["type"] = "PTR"
                    ans["data"] = answer.ptrname
                elif answer.type == dpkt.dns.DNS_NS:
                    ans["type"] = "NS"
                    ans["data"] = answer.nsname
                elif answer.type == dpkt.dns.DNS_SOA:
                    ans["type"] = "SOA"
                    ans["data"] = ",".join([answer.mname,
                                            answer.rname,
                                            str(answer.serial),
                                            str(answer.refresh),
                                            str(answer.retry),
                                            str(answer.expire),
                                            str(answer.minimum)])
                elif answer.type == dpkt.dns.DNS_HINFO:
                    ans["type"] = "HINFO"
                    ans["data"] = " ".join(answer.text)
                elif answer.type == dpkt.dns.DNS_TXT:
                    ans["type"] = "TXT"
                    ans["data"] = " ".join(answer.text)

                # TODO: add srv handling
                query["answers"].append(ans)

            if dns.rcode == dpkt.dns.DNS_RCODE_NXDOMAIN:
                ans = {}
                ans["type"] = "NXDOMAIN"
                ans["data"] = ""
                query["answers"].append(ans)

            self._add_domain(query["request"])

            reqtuple = query["type"], query["request"]
            if reqtuple not in self.dns_requests:
                self.dns_requests[reqtuple] = query
            new_answers = set((i["type"], i["data"]) for i in query["answers"]) - self.dns_answers
            self.dns_answers.update(new_answers)
            self.dns_requests[reqtuple]["answers"] += [dict(type=i[0], data=i[1]) for i in new_answers]

        return True

    def _add_domain(self, domain):
        """Add a domain to unique list.
        @param domain: domain name.
        """
        filters = [
            ".*\\.windows\\.com$",
            ".*\\.in\\-addr\\.arpa$"
        ]

        regexps = [re.compile(filter) for filter in filters]
        for regexp in regexps:
            if regexp.match(domain):
                return

        for entry in self.unique_domains:
            if entry["domain"] == domain:
                return
        ip = self._dns_gethostbyname(domain)
        self.unique_domains.append({"domain": domain, "ip": ip})

    def _check_http(self, tcpdata):
        """Checks for HTTP traffic.
        @param tcpdata: TCP data flow.
        """
        try:
            r = dpkt.http.Request()
            r.method, r.version, r.uri = None, None, None
            r.unpack(tcpdata)
        except dpkt.dpkt.UnpackError:
            if r.method is not None or r.version is not None or \
                            r.uri is not None:
                return True
            return False

        return True

    def _add_http(self, conn, tcpdata):
        """Adds an HTTP flow.
        @param conn: TCP connection info.
        @param tcpdata: TCP data flow.
        """
        if tcpdata in self.http_requests:
            self.http_requests[tcpdata]["count"] += 1
            return True

        try:
            http = dpkt.http.Request()
            http.unpack(tcpdata)
        except dpkt.dpkt.UnpackError:
            pass

        try:
            entry = {"count": 1}

            if "host" in http.headers and re.match(
                    r'^([A-Z0-9]|[A-Z0-9][A-Z0-9\-]{0,61}[A-Z0-9])(\.([A-Z0-9]|[A-Z0-9][A-Z0-9\-]{0,61}[A-Z0-9]))+(:[0-9]{1,5})?$',
                    http.headers["host"], re.IGNORECASE):
                entry["host"] = convert_to_printable(http.headers["host"])
            else:
                entry["host"] = conn["dst"]

            entry["port"] = conn["dport"]

            # Manually deal with cases when destination port is not the default one,
            # and it is  not included in host header.
            netloc = entry["host"]
            if entry["port"] != 80 and ":" not in netloc:
                netloc += ":" + str(entry["port"])

            entry["data"] = convert_to_printable(tcpdata)
            entry["uri"] = convert_to_printable(urlunparse(("http",
                                                            netloc,
                                                            http.uri, None,
                                                            None, None)))
            entry["body"] = convert_to_printable(http.body)
            entry["path"] = convert_to_printable(http.uri)

            if "user-agent" in http.headers:
                entry["user-agent"] = \
                    convert_to_printable(http.headers["user-agent"])
            else:
                entry["user-agent"] = ""

            entry["version"] = convert_to_printable(http.version)
            entry["method"] = convert_to_printable(http.method)

            self.http_requests[tcpdata] = entry
        except Exception:
            return False

        return True

    def _reassemble_smtp(self, conn, data):
        """Reassemble a SMTP flow.
        @param conn: connection dict.
        @param data: raw data.
        """
        if conn["dst"] in self.smtp_flow:
            self.smtp_flow[conn["dst"]] += data
        else:
            self.smtp_flow[conn["dst"]] = data

    def _process_smtp(self):
        """Process SMTP flow."""
        for conn, data in self.smtp_flow.iteritems():
            # Detect new SMTP flow.
            if data.startswith(("EHLO", "HELO")):
                self.smtp_requests.append({"dst": conn, 
                                           "raw": convert_to_printable(data)})

    def _check_irc(self, tcpdata):
        """
        Checks for IRC traffic.
        @param tcpdata: tcp data flow
        """
        try:
            req = ircMessage()
        except Exception:
            return False

        return req.isthereIRC(tcpdata)

    def _add_irc(self, conn, tcpdata):
        """
        Adds an IRC communication.
        @param conn: TCP connection info.
        @param tcpdata: TCP data in flow
        """

        try:
            reqc = ircMessage()
            reqs = ircMessage()
            filters_sc = ["266"]
            client = reqc.getClientMessages(tcpdata)
            for message in client:
                message.update(conn)
            server = reqs.getServerMessagesFilter(tcpdata, filters_sc)
            for message in server:
                message.update(conn)
            self.irc_requests = self.irc_requests + \
                                client + \
                                server
        except Exception:
            return False

        return True

    def run(self):
        """Process PCAP.
        @return: dict with network analysis data.
        """
        log = logging.getLogger("Processing.Pcap")

        if not IS_DPKT:
            log.error("Python DPKT is not installed, aborting PCAP analysis.")
            return self.results

        if not os.path.exists(self.filepath):
            log.warning("The PCAP file does not exist at path \"%s\".",
                        self.filepath)
            return self.results

        if os.path.getsize(self.filepath) == 0:
            log.error("The PCAP file at path \"%s\" is empty." % self.filepath)
            return self.results

        try:
            file = open(self.filepath, "rb")
        except (IOError, OSError):
            log.error("Unable to open %s" % self.filepath)
            return self.results

        try:
            pcap = dpkt.pcap.Reader(file)
        except dpkt.dpkt.NeedData:
            log.error("Unable to read PCAP file at path \"%s\".",
                      self.filepath)
            return self.results
        except ValueError:
            log.error("Unable to read PCAP file at path \"%s\". File is "
                      "corrupted or wrong format." % self.filepath)
            return self.results

        offset = file.tell()
        first_ts = None
        for ts, buf in pcap:
            if not first_ts:
                first_ts = ts

            try:
                ip = iplayer_from_raw(buf, pcap.datalink())

                connection = {}
                if isinstance(ip, dpkt.ip.IP):
                    connection["src"] = socket.inet_ntoa(ip.src)
                    connection["dst"] = socket.inet_ntoa(ip.dst)
                elif isinstance(ip, dpkt.ip6.IP6):
                    connection["src"] = socket.inet_ntop(socket.AF_INET6,
                                                         ip.src)
                    connection["dst"] = socket.inet_ntop(socket.AF_INET6,
                                                         ip.dst)
                else:
                    offset = file.tell()
                    continue

                self._add_hosts(connection)

                if ip.p == dpkt.ip.IP_PROTO_TCP:
                    tcp = ip.data
                    if not isinstance(tcp, dpkt.tcp.TCP):
                        tcp = dpkt.tcp.TCP(tcp)

                    if len(tcp.data) > 0:
                        connection["sport"] = tcp.sport
                        connection["dport"] = tcp.dport
                        self._tcp_dissect(connection, tcp.data)

                        src, sport, dst, dport = (
                            connection["src"], connection["sport"], connection["dst"], connection["dport"])
                        if not ((dst, dport, src, sport) in self.tcp_connections_seen or (
                                src, sport, dst, dport) in self.tcp_connections_seen):
                            self.tcp_connections.append((src, sport, dst, dport, offset, ts - first_ts))
                            self.tcp_connections_seen.add((src, sport, dst, dport))

                elif ip.p == dpkt.ip.IP_PROTO_UDP:
                    udp = ip.data
                    if not isinstance(udp, dpkt.udp.UDP):
                        udp = dpkt.udp.UDP(udp)

                    if len(udp.data) > 0:
                        connection["sport"] = udp.sport
                        connection["dport"] = udp.dport
                        self._udp_dissect(connection, udp.data)

                        src, sport, dst, dport = (
                            connection["src"], connection["sport"], connection["dst"], connection["dport"])
                        if not ((dst, dport, src, sport) in self.udp_connections_seen or (
                                src, sport, dst, dport) in self.udp_connections_seen):
                            self.udp_connections.append((src, sport, dst, dport, offset, ts - first_ts))
                            self.udp_connections_seen.add((src, sport, dst, dport))

                elif ip.p == dpkt.ip.IP_PROTO_ICMP:
                    icmp = ip.data
                    if not isinstance(icmp, dpkt.icmp.ICMP):
                        icmp = dpkt.icmp.ICMP(icmp)

                    self._icmp_dissect(connection, icmp)

                offset = file.tell()
            except AttributeError:
                continue
            except dpkt.dpkt.NeedData:
                continue
            except Exception as e:
                log.exception("Failed to process packet: %s", e)

        file.close()

        # Post processors for reconstructed flows.
        self._process_smtp()

        # Build results dict.
        self.results["hosts"] = self._enrich_hosts(self.unique_hosts)
        self.results["domains"] = self.unique_domains
        self.results["tcp"] = [conn_from_flowtuple(i) for i in self.tcp_connections]
        self.results["udp"] = [conn_from_flowtuple(i) for i in self.udp_connections]
        self.results["icmp"] = self.icmp_requests
        self.results["http"] = self.http_requests.values()
        self.results["dns"] = self.dns_requests.values()
        self.results["smtp"] = self.smtp_requests
        self.results["irc"] = self.irc_requests

        tmp_hosts = []
        for host in self.results["hosts"]:
            if host["ip"] not in NATIVE_GATEWAY:
                result = self._ip_domain_check(host["ip"], "ip")
                host["result"] = result
                if result.values()[0][0] in "1":
                    tmp_hosts.append(host)
                    continue
            if host["hostname"] not in NATIVE_GATEWAY:
                host["result"] = self._ip_domain_check(host["hostname"], "domain")
            if "result" in host:
                tmp_hosts.append(host)
        self.results["hosts"] = tmp_hosts

        tmp_domains = []
        for domain in self.results["domains"]:
            if domain["ip"] not in NATIVE_GATEWAY:
                result = self._ip_domain_check(domain["ip"], "ip")
                domain["result"] = result
                if result.values()[0][0] in "1":
                    tmp_domains.append(domain)
                    continue
            if domain["domain"] not in NATIVE_GATEWAY:
                domain["result"] = self._ip_domain_check(domain["domain"], "domain")
            if "result" in domain:
                tmp_hosts.append(domain)
        self.results["domains"] = tmp_domains

        tmp_tcp = []
        for tcp in self.results["tcp"]:
            if tcp["dst"] in NATIVE_GATEWAY:
                continue
            tcp["result"] = self._ip_domain_check(tcp["dst"], "ip")
            tmp_tcp.append(tcp)
        self.results["tcp"] = tmp_tcp

        tmp_udp = []
        for udp in self.results["udp"]:
            if udp["dst"] in NATIVE_GATEWAY:
                continue
            udp["result"] = self._ip_domain_check(udp["dst"], "ip")
            tmp_tcp.append(udp)
        self.results["udp"] = tmp_udp

        return self.results
コード例 #45
0
ファイル: settings.py プロジェクト: Draft2007/Scripts
    'django.contrib.staticfiles',
    'bootstrap3',
    'home',
    #'south',
    'taggit',
    'analysis',
    'bootstrap3',
    'alerts',
    #'debug_toolbar'
)


SESSION_ENGINE = 'django.contrib.sessions.backends.file'

if CUCKOO_FOUND:
    
    sys.path.append(CUCKOO_PATH)
    
    from lib.cuckoo.common.constants import CUCKOO_ROOT
    from lib.cuckoo.common.config import Config
    
    cfg = Config(cfg=os.path.join(CUCKOO_PATH,"conf", "reporting.conf")).mongodb
    
    # Checks if mongo reporting is enabled in Cuckoo.
    if not cfg.get("enabled"):
        raise Exception("Mongo reporting module is not enabled in cuckoo, aborting!")
    
    # Get connection options from reporting.conf.
    MONGO_HOST = cfg.get("host", "127.0.0.1")
    MONGO_PORT = cfg.get("port", 27017)
コード例 #46
0
class RunProcessing(object):
    """Analysis Results Processing Engine.

    This class handles the loading and execution of the processing modules.
    It executes the enabled ones sequentially and generates a dictionary which
    is then passed over the reporting engine.
    """

    def __init__(self, task_id, results):
        """@param task_id: ID of the analyses to process."""
        self.task = Database().view_task(task_id).to_dict()
        self.task_id = task_id
        self.analysis_path = os.path.join(CUCKOO_ROOT, "storage", "analyses", str(task_id))
        self.cfg = Config("processing")
        self.results = results

    def process(self, module):
        """Run a processing module.
        @param module: processing module to run.
        @return: results generated by module.
        """
        # Initialize the specified processing module.
        try:
            current = module(self.results)
        except:
            log.exception("Failed to load the processing module "
                          "\"{0}\":".format(module))
            return

        # Extract the module name.
        module_name = inspect.getmodule(current).__name__
        if "." in module_name:
            module_name = module_name.rsplit(".", 1)[1]

        try:
            options = self.cfg.get(module_name)
        except CuckooOperationalError:
            log.debug("Processing module %s not found in configuration file",
                      module_name)
            return None

        # If the processing module is disabled in the config, skip it.
        if not options.enabled:
            return None

        # Give it path to the analysis results.
        current.set_path(self.analysis_path)
        # Give it the analysis task object.
        current.set_task(self.task)
        # Give it the options from the relevant processing.conf section.
        current.set_options(options)

        try:
            # Run the processing module and retrieve the generated data to be
            # appended to the general results container.
            data = current.run()

            log.debug("Executed processing module \"%s\" on analysis at "
                      "\"%s\"", current.__class__.__name__, self.analysis_path)

            # If succeeded, return they module's key name and the data to be
            # appended to it.
            return {current.key: data}
        except CuckooDependencyError as e:
            log.warning("The processing module \"%s\" has missing dependencies: %s", current.__class__.__name__, e)
        except CuckooProcessingError as e:
            log.warning("The processing module \"%s\" returned the following "
                        "error: %s", current.__class__.__name__, e)
        except:
            log.exception("Failed to run the processing module \"%s\":",
                          current.__class__.__name__)

        return None

    def run(self):
        """Run all processing modules and all signatures.
        @return: processing results.
        """
        Database().set_statistics_time(self.task_id, PROCESSING_STARTED)
        # Order modules using the user-defined sequence number.
        # If none is specified for the modules, they are selected in
        # alphabetical order.
        processing_list = list_plugins(group="processing")

        # If no modules are loaded, return an empty dictionary.
        if processing_list:
            processing_list.sort(key=lambda module: module.order)

            # Run every loaded processing module.
            for module in processing_list:
                result = self.process(module)
                # If it provided some results, append it to the big results
                # container.
                if result:
                    self.results.update(result)
        else:
            log.info("No processing modules loaded")

        Database().set_statistics_time(self.task_id, PROCESSING_FINISHED)

        if "behavior" in self.results and "enhanced" in self.results["behavior"]:
            regwrite = 0
            filewrite = 0
            for entry in self.results["behavior"]["enhanced"]:
                if entry["object"] == "registry" and entry["event"] == "write":
                    regwrite += 1
                if entry["object"] == "file" and entry["event"] == "write":
                    filewrite += 1
            Database().set_statistics_counter(self.task_id, FILES_WRITTEN, filewrite)
            Database().set_statistics_counter(self.task_id, REGISTRY_KEYS_MODIFIED, regwrite)

        if "behavior" in self.results and "summary" in self.results["behavior"] and "files" in self.results["behavior"]["summary"]:
            Database().set_statistics_counter(self.task_id, DROPPED_FILES, len(self.results["behavior"]["summary"]["files"]))

        if "behavior" in self.results and "processes" in self.results["behavior"]:
            Database().set_statistics_counter(self.task_id, RUNNING_PROCESSES, len(self.results["behavior"]["processes"]))
            api_calls = 0
            for process in self.results["behavior"]["processes"]:
                for call in process["calls"]:
                    api_calls += 1
            Database().set_statistics_counter(self.task_id, API_CALLS, api_calls)

        if "network" in self.results and "domains" in self.results["network"]:
            Database().set_statistics_counter(self.task_id, ACCESSED_DOMAINS, len(self.results["network"]["domains"]))

        return self.results
コード例 #47
0
ファイル: plugins.py プロジェクト: kevross33/CAPEv2
class RunReporting:
    """Reporting Engine.

    This class handles the loading and execution of the enabled reporting
    modules. It receives the analysis results dictionary from the Processing
    Engine and pass it over to the reporting modules before executing them.
    """
    def __init__(self, task, results, reprocess=False):
        """@param analysis_path: analysis folder path."""
        self.task = task
        # remove unwanted/duplicate information from reporting
        for process in results["behavior"]["processes"]:
            process["calls"].begin_reporting()

        self.results = results
        self.analysis_path = os.path.join(CUCKOO_ROOT, "storage", "analyses",
                                          str(task["id"]))
        self.cfg = Config("reporting")
        self.reprocess = reprocess

    def process(self, module):
        """Run a single reporting module.
        @param module: reporting module.
        @param results: results results from analysis.
        """
        # Initialize current reporting module.
        try:
            current = module()
        except:
            log.exception(
                "Failed to load the reporting module \"{0}\":".format(module))
            return

        # Extract the module name.
        module_name = inspect.getmodule(current).__name__
        if "." in module_name:
            module_name = module_name.rsplit(".", 1)[1]

        try:
            options = self.cfg.get(module_name)
        except CuckooOperationalError:
            log.info("Reporting module %s not found in configuration file",
                     module_name)
            return

        # If the reporting module is disabled in the config, skip it.
        if not options.enabled:
            return

        # Give it the path to the analysis results folder.
        current.set_path(self.analysis_path)
        # Give it the analysis task object.
        current.set_task(self.task)
        # Give it the the relevant reporting.conf section.
        current.set_options(options)
        # Load the content of the analysis.conf file.
        current.cfg = Config(cfg=current.conf_path)

        try:
            log.debug("Executing reporting module \"%s\"",
                      current.__class__.__name__)
            pretime = datetime.now()

            if module_name == "submitCAPE" and self.reprocess:
                tasks = db.list_parents(self.task["id"])
                if tasks:
                    self.results["CAPE_children"] = tasks
                return
            else:
                current.run(self.results)
            posttime = datetime.now()
            timediff = posttime - pretime
            self.results["statistics"]["reporting"].append({
                "name":
                current.__class__.__name__,
                "time":
                float("%d.%03d" %
                      (timediff.seconds, timediff.microseconds / 1000)),
            })

        except CuckooDependencyError as e:
            log.warning(
                "The reporting module \"%s\" has missing dependencies: %s",
                current.__class__.__name__, e)
        except CuckooReportError as e:
            log.warning(
                "The reporting module \"%s\" returned the following error: %s",
                current.__class__.__name__, e)
        except:
            log.exception("Failed to run the reporting module \"%s\":",
                          current.__class__.__name__)

    def run(self):
        """Generates all reports.
        @raise CuckooReportError: if a report module fails.
        """
        # In every reporting module you can specify a numeric value that
        # represents at which position that module should be executed among
        # all the available ones. It can be used in the case where a
        # module requires another one to be already executed beforehand.

        reporting_list = list_plugins(group="reporting")

        # Return if no reporting modules are loaded.
        if reporting_list:
            reporting_list.sort(key=lambda module: module.order)

            # Run every loaded reporting module.
            for module in reporting_list:
                self.process(module)
        else:
            log.info("No reporting modules loaded")
コード例 #48
0
ファイル: processor.py プロジェクト: 0day1day/cuckoo
class Processor:
    """Analysis Results Processing Engine.

    This class handles the loading and execution of the processing modules.
    It executes the enabled ones sequentially and generates a dictionary which
    is then passed over the reporting engine.
    """

    def __init__(self, task_id):
        """@param task_id: ID of the analyses to process."""
        self.task = Database().view_task(task_id).to_dict()
        self.analysis_path = os.path.join(CUCKOO_ROOT,
                                          "storage",
                                          "analyses",
                                          str(task_id))
        self.cfg = Config(cfg=os.path.join(CUCKOO_ROOT,
                                           "conf",
                                           "processing.conf"))

    def _run_processing(self, module):
        """Run a processing module.
        @param module: processing module to run.
        @param results: results dict.
        @return: results generated by module.
        """
        # Initialize the specified processing module.
        current = module()

        # Extract the module name.
        module_name = inspect.getmodule(current).__name__
        if "." in module_name:
            module_name = module_name.rsplit(".", 1)[1]

        try:
            options = self.cfg.get(module_name)
        except CuckooOperationalError:
            log.debug("Processing module %s not found in "
                      "configuration file", module_name)
            return None

        # If the processing module is disabled in the config, skip it.
        if not options.enabled:
            return None

        # Give it path to the analysis results.
        current.set_path(self.analysis_path)
        # Give it the analysis task object.
        current.set_task(self.task)
        # Give it the options from the relevant processing.conf section.
        current.set_options(options)

        try:
            # Run the processing module and retrieve the generated data to be
            # appended to the general results container.
            data = current.run()

            log.debug("Executed processing module \"%s\" on analysis at \"%s\"",
                      current.__class__.__name__, self.analysis_path)

            # If succeeded, return they module's key name and the data to be
            # appended to it.
            return {current.key : data}
        except CuckooProcessingError as e:
            log.warning("The processing module \"%s\" returned the following "
                        "error: %s", current.__class__.__name__, e)
        except Exception as e:
            log.exception("Failed to run the processing module \"%s\":",
                          current.__class__.__name__)

        return None

    def _run_signature(self, signature, results):
        """Run a signature.
        @param signature: signature to run.
        @param signs: signature results dict.
        @return: matched signature.
        """
        # Initialize the current signature.
        current = signature(results)

        log.debug("Running signature \"%s\"", current.name)

        # If the signature is disabled, skip it.
        if not current.enabled:
            return None

        # Since signatures can hardcode some values or checks that might
        # become obsolete in future versions or that might already be obsolete,
        # I need to match its requirements with the running version of Cuckoo.
        version = CUCKOO_VERSION.split("-")[0]

        # If provided, check the minimum working Cuckoo version for this
        # signature.
        if current.minimum:
            try:
                # If the running Cuckoo is older than the required minimum
                # version, skip this signature.
                if StrictVersion(version) < StrictVersion(current.minimum.split("-")[0]):
                    log.debug("You are running an older incompatible version "
                              "of Cuckoo, the signature \"%s\" requires "
                              "minimum version %s", current.name, current.minimum)
                    return None
            except ValueError:
                log.debug("Wrong minor version number in signature %s", current.name)
                return None

        # If provided, check the maximum working Cuckoo version for this
        # signature.
        if current.maximum:
            try:
                # If the running Cuckoo is newer than the required maximum
                # version, skip this signature.
                if StrictVersion(version) > StrictVersion(current.maximum.split("-")[0]):
                    log.debug("You are running a newer incompatible version "
                              "of Cuckoo, the signature \"%s\" requires "
                              "maximum version %s", current.name, current.maximum)
                    return None
            except ValueError:
                log.debug("Wrong major version number in signature %s", current.name)
                return None

        try:
            # Run the signature and if it gets matched, extract key information
            # from it and append it to the results container.
            if current.run():
                matched = {"name" : current.name,
                           "description" : current.description,
                           "severity" : current.severity,
                           "references" : current.references,
                           "data" : current.data,
                           "alert" : current.alert,
                           "families": current.families}

                log.debug("Analysis at \"%s\" matched signature \"%s\"",
                          self.analysis_path, current.name)

                # Return information on the matched signature.
                return matched
        except Exception as e:
            log.exception("Failed to run signature \"%s\":", current.name)

        return None

    def run(self):
        """Run all processing modules and all signatures.
        @return: processing results.
        """
        # This is the results container. It's what will be used by all the
        # reporting modules to make it consumable by humans and machines.
        # It will contain all the results generated by every processing
        # module available. Its structure can be observed throgh the JSON
        # dump in the the analysis' reports folder.
        # We friendly call this "fat dict".
        results = {}

        # Order modules using the user-defined sequence number.
        # If none is specified for the modules, they are selected in
        # alphabetical order.
        modules_list = list_plugins(group="processing")

        # If no modules are loaded, return an empty dictionary.
        if not modules_list:
            log.debug("No processing modules loaded")
            return results

        modules_list.sort(key=lambda module: module.order)

        # Run every loaded processing module.
        for module in modules_list:
            result = self._run_processing(module)
            # If it provided some results, append it to the big results
            # container.
            if result:
                results.update(result)

        # This will contain all the matched signatures.
        sigs = []

        # Run every loaded signature.
        for signature in list_plugins(group="signatures"):
            match = self._run_signature(signature, results)
            # If the signature is matched, add it to the list.
            if match:
                sigs.append(match)

        # Sort the matched signatures by their severity level.
        sigs.sort(key=lambda key: key["severity"])

        # Append the signatures to the fat dict.
        results["signatures"] = sigs

        # Return the fat dict.
        return results
コード例 #49
0
ファイル: memory.py プロジェクト: NickyCM/cuckoo
class VolatilityManager(object):
    """Handle several volatility results."""
    PLUGINS = [
        "pslist",
        "psxview",
        "callbacks",
        "idt",
        "ssdt",
        "gdt",
        "timers",
        "messagehooks",
        "getsids",
        "privs",
        "malfind",
        "apihooks",
        "dlllist",
        "handles",
        "ldrmodules",
        "mutantscan",
        "devicetree",
        "svcscan",
        "modscan",
        "yarascan",
        ["sockscan", "winxp"],
        ["netscan", "vista", "win7"],
    ]

    def __init__(self, memfile, osprofile=None):
        self.mask_pid = []
        self.taint_pid = set()
        self.memfile = memfile

        conf_path = os.path.join(CUCKOO_ROOT, "conf", "memory.conf")
        if not os.path.exists(conf_path):
            log.error("Configuration file volatility.conf not found".format(conf_path))
            self.voptions = False
            return

        self.voptions = Config("memory")

        for pid in self.voptions.mask.pid_generic.split(","):
            pid = pid.strip()
            if pid:
                self.mask_pid.append(int(pid))

        self.no_filter = not self.voptions.mask.enabled
        if self.voptions.basic.guest_profile:
            self.osprofile = self.voptions.basic.guest_profile
        else:
            self.osprofile = osprofile or self.get_osprofile()

    def get_osprofile(self):
        """Get the OS profile"""
        return VolatilityAPI(self.memfile).imageinfo()["data"][0]["osprofile"]

    def run(self):
        results = {}

        # Exit if options were not loaded.
        if not self.voptions:
            return

        vol = VolatilityAPI(self.memfile, self.osprofile)

        for plugin_name in self.PLUGINS:
            if isinstance(plugin_name, list):
                plugin_name, profiles = plugin_name[0], plugin_name[1:]
            else:
                profiles = []

            # Some plugins can only run in certain profiles.
            for profile in profiles:
                if self.osprofile.lower().startswith(profile):
                    break
            else:
                if profiles:
                    continue

            plugin = self.voptions.get(plugin_name)
            if not plugin or not plugin.enabled:
                log.debug("Skipping '%s' volatility module", plugin_name)
                continue

            if plugin_name in vol.plugins:
                log.debug("Executing volatility '%s' module.", plugin_name)
                results[plugin_name] = getattr(vol, plugin_name)()

        self.find_taint(results)
        self.cleanup()

        return self.mask_filter(results)

    def mask_filter(self, old):
        """Filter out masked stuff. Keep tainted stuff."""
        new = {}

        for akey in old.keys():
            new[akey] = {"config": old[akey]["config"], "data": []}
            conf = getattr(self.voptions, akey, None)
            new[akey]["config"]["filter"] = conf.filter
            for item in old[akey]["data"]:
                # TODO: need to improve this logic.
                if not conf.filter:
                    new[akey]["data"].append(item)
                elif "process_id" in item and \
                        item["process_id"] in self.mask_pid and \
                        item["process_id"] not in self.taint_pid:
                    pass
                else:
                    new[akey]["data"].append(item)
        return new

    def find_taint(self, res):
        """Find tainted items."""
        if "malfind" in res:
            for item in res["malfind"]["data"]:
                self.taint_pid.add(item["process_id"])

    def cleanup(self):
        """Delete the memory dump (if configured to do so)."""

        if self.voptions.basic.delete_memdump:
            try:
                os.remove(self.memfile)
            except OSError:
                log.error("Unable to delete memory dump file at path \"%s\" ", self.memfile)
コード例 #50
0
import json
from django.conf import settings

sys.path.append(settings.CUCKOO_PATH)

from lib.cuckoo.common.constants import CUCKOO_ROOT
from lib.cuckoo.common.config import Config

cfg = Config(cfg=os.path.join(CUCKOO_ROOT, "conf", "reporting.conf")).mongodb
moloch_cfg = Config(
    cfg=os.path.join(CUCKOO_ROOT, "conf", "reporting.conf")).moloch
aux_cfg = Config(cfg=os.path.join(CUCKOO_ROOT, "conf", "auxiliary.conf"))
vtdl_cfg = Config(
    cfg=os.path.join(CUCKOO_ROOT, "conf", "auxiliary.conf")).virustotaldl
# Checks if mongo reporting is enabled in Cuckoo.
if not cfg.get("enabled"):
    raise Exception(
        "Mongo reporting module is not enabled in cuckoo, aborting!")

# Get connection options from reporting.conf.
settings.MONGO_HOST = cfg.get("host", "127.0.0.1")
settings.MONGO_PORT = cfg.get("port", 27017)

settings.MONGO_PORT = cfg.get("port", 27017)
settings.MOLOCH_BASE = moloch_cfg.get("base", None)
settings.MOLOCH_NODE = moloch_cfg.get("node", None)
settings.MOLOCH_ENABLED = moloch_cfg.get("enabled", False)

settings.GATEWAYS = aux_cfg.get("gateways")
settings.VTDL_ENABLED = vtdl_cfg.get("enabled", False)
settings.VTDL_KEY = vtdl_cfg.get("dlkey", None)
コード例 #51
0
ファイル: plugins.py プロジェクト: scottydo/cuckoo
class RunProcessing(object):
    """Analysis Results Processing Engine.

    This class handles the loading and execution of the processing modules.
    It executes the enabled ones sequentially and generates a dictionary which
    is then passed over the reporting engine.
    """

    def __init__(self, task_id):
        """@param task_id: ID of the analyses to process."""
        self.task = Database().view_task(task_id).to_dict()
        self.analysis_path = os.path.join(CUCKOO_ROOT, "storage", "analyses", str(task_id))
        self.cfg = Config("processing")

    def process(self, module):
        """Run a processing module.
        @param module: processing module to run.
        @param results: results dict.
        @return: results generated by module.
        """
        # Initialize the specified processing module.
        try:
            current = module()
        except:
            log.exception("Failed to load the processing module "
                          "\"{0}\":".format(module))
            return

        # Extract the module name.
        module_name = inspect.getmodule(current).__name__
        if "." in module_name:
            module_name = module_name.rsplit(".", 1)[1]

        try:
            options = self.cfg.get(module_name)
        except CuckooOperationalError:
            log.debug("Processing module %s not found in configuration file",
                      module_name)
            return None

        # If the processing module is disabled in the config, skip it.
        if not options.enabled:
            return None

        # Give it path to the analysis results.
        current.set_path(self.analysis_path)
        # Give it the analysis task object.
        current.set_task(self.task)
        # Give it the options from the relevant processing.conf section.
        current.set_options(options)

        try:
            # Run the processing module and retrieve the generated data to be
            # appended to the general results container.
            data = current.run()

            log.debug("Executed processing module \"%s\" on analysis at "
                      "\"%s\"", current.__class__.__name__, self.analysis_path)

            # If succeeded, return they module's key name and the data to be
            # appended to it.
            return {current.key: data}
        except CuckooDependencyError as e:
            log.warning("The processing module \"%s\" has missing dependencies: %s", current.__class__.__name__, e)
        except CuckooProcessingError as e:
            log.warning("The processing module \"%s\" returned the following "
                        "error: %s", current.__class__.__name__, e)
        except:
            log.exception("Failed to run the processing module \"%s\":",
                          current.__class__.__name__)

        return None

    def run(self):
        """Run all processing modules and all signatures.
        @return: processing results.
        """
        # This is the results container. It's what will be used by all the
        # reporting modules to make it consumable by humans and machines.
        # It will contain all the results generated by every processing
        # module available. Its structure can be observed through the JSON
        # dump in the analysis' reports folder. (If jsondump is enabled.)
        # We friendly call this "fat dict".
        results = {}

        # Order modules using the user-defined sequence number.
        # If none is specified for the modules, they are selected in
        # alphabetical order.
        processing_list = list_plugins(group="processing")

        # If no modules are loaded, return an empty dictionary.
        if processing_list:
            processing_list.sort(key=lambda module: module.order)

            # Run every loaded processing module.
            for module in processing_list:
                result = self.process(module)
                # If it provided some results, append it to the big results
                # container.
                if result:
                    results.update(result)
        else:
            log.info("No processing modules loaded")

        # Return the fat dict.
        return results
コード例 #52
0
class RunProcessing(object):
    """Analysis Results Processing Engine.

    This class handles the loading and execution of the processing modules.
    It executes the enabled ones sequentially and generates a dictionary which
    is then passed over the reporting engine.
    """
    def __init__(self, task, results):
        """@param task: task dictionary of the analysis to process."""
        self.task = task
        self.analysis_path = os.path.join(CUCKOO_ROOT, "storage", "analyses",
                                          str(task["id"]))
        self.cfg = Config("processing")
        self.results = results

    def process(self, module):
        """Run a processing module.
        @param module: processing module to run.
        @return: results generated by module.
        """
        # Initialize the specified processing module.
        try:
            current = module(self.results)
        except:
            log.exception("Failed to load the processing module "
                          "\"{0}\":".format(module))
            return

        # Extract the module name.
        module_name = inspect.getmodule(current).__name__
        if "." in module_name:
            module_name = module_name.rsplit(".", 1)[1]

        try:
            options = self.cfg.get(module_name)
        except CuckooOperationalError:
            log.debug("Processing module %s not found in configuration file",
                      module_name)
            return None

        # If the processing module is disabled in the config, skip it.
        if not options.enabled:
            return None

        # Give it path to the analysis results.
        current.set_path(self.analysis_path)
        # Give it the analysis task object.
        current.set_task(self.task)
        # Give it the options from the relevant processing.conf section.
        current.set_options(options)

        try:
            # Run the processing module and retrieve the generated data to be
            # appended to the general results container.
            log.debug(
                "Executing processing module \"%s\" on analysis at "
                "\"%s\"", current.__class__.__name__, self.analysis_path)
            pretime = datetime.now()
            data = current.run()
            posttime = datetime.now()
            timediff = posttime - pretime
            self.results["statistics"]["processing"].append({
                "name":
                current.__class__.__name__,
                "time":
                float("%d.%03d" %
                      (timediff.seconds, timediff.microseconds / 1000)),
            })

            # If succeeded, return they module's key name and the data to be
            # appended to it.
            return {current.key: data}
        except CuckooDependencyError as e:
            log.warning(
                "The processing module \"%s\" has missing dependencies: %s",
                current.__class__.__name__, e)
        except CuckooProcessingError as e:
            log.warning(
                "The processing module \"%s\" returned the following "
                "error: %s", current.__class__.__name__, e)
        except:
            log.exception("Failed to run the processing module \"%s\":",
                          current.__class__.__name__)

        return None

    def run(self):
        """Run all processing modules and all signatures.
        @return: processing results.
        """

        # Order modules using the user-defined sequence number.
        # If none is specified for the modules, they are selected in
        # alphabetical order.
        processing_list = list_plugins(group="processing")

        # If no modules are loaded, return an empty dictionary.
        if processing_list:
            processing_list.sort(key=lambda module: module.order)

            # Run every loaded processing module.
            for module in processing_list:
                result = self.process(module)
                # If it provided some results, append it to the big results
                # container.
                if result:
                    self.results.update(result)
        else:
            log.info("No processing modules loaded")

        return self.results
コード例 #53
0
class RunProcessing(object):
    """Analysis Results Processing Engine.

    This class handles the loading and execution of the processing modules.
    It executes the enabled ones sequentially and generates a dictionary which
    is then passed over the reporting engine.
    """
    def __init__(self, task):
        """@param task_id: ID of the analyses to process."""
        # CHANGED: We want to preserve changes in task
        self.task = task.to_dict()
        self.analysis_path = os.path.join(CUCKOO_ROOT, "storage", "analyses",
                                          str(task.id))
        self.cfg = Config(
            cfg=os.path.join(CUCKOO_ROOT, "conf", "processing.conf"))

    def process(self, module):
        """Run a processing module.
        @param module: processing module to run.
        @param results: results dict.
        @return: results generated by module.
        """
        # Initialize the specified processing module.
        try:
            current = module()
        except:
            log.exception("Failed to load the processing module "
                          "\"{0}\":".format(module))
            return

        # Extract the module name.
        module_name = inspect.getmodule(current).__name__
        if "." in module_name:
            module_name = module_name.rsplit(".", 1)[1]

        try:
            options = self.cfg.get(module_name)
        except CuckooOperationalError:
            log.debug("Processing module %s not found in configuration file",
                      module_name)
            return None

        # If the processing module is disabled in the config, skip it.
        if not options.enabled:
            return None

        # Give it path to the analysis results.
        current.set_path(self.analysis_path)
        # Give it the analysis task object.
        current.set_task(self.task)
        # Give it the options from the relevant processing.conf section.
        current.set_options(options)

        try:
            # Run the processing module and retrieve the generated data to be
            # appended to the general results container.
            data = current.run()

            log.debug(
                "Executed processing module \"%s\" on analysis at "
                "\"%s\"", current.__class__.__name__, self.analysis_path)

            # If succeeded, return they module's key name and the data to be
            # appended to it.
            return {current.key: data}
        except CuckooDependencyError as e:
            log.warning(
                "The processing module \"%s\" has missing dependencies: %s",
                current.__class__.__name__, e)
        except CuckooProcessingError as e:
            log.warning(
                "The processing module \"%s\" returned the following "
                "error: %s", current.__class__.__name__, e)
        except:
            log.exception("Failed to run the processing module \"%s\":",
                          current.__class__.__name__)

        return None

    def run(self):
        """Run all processing modules and all signatures.
        @return: processing results.
        """
        # This is the results container. It's what will be used by all the
        # reporting modules to make it consumable by humans and machines.
        # It will contain all the results generated by every processing
        # module available. Its structure can be observed through the JSON
        # dump in the analysis' reports folder. (If jsondump is enabled.)
        # We friendly call this "fat dict".
        results = {}

        # Order modules using the user-defined sequence number.
        # If none is specified for the modules, they are selected in
        # alphabetical order.
        processing_list = list_plugins(group="processing")

        # If no modules are loaded, return an empty dictionary.
        if processing_list:
            processing_list.sort(key=lambda module: module.order)

            # Run every loaded processing module.
            for module in processing_list:
                result = self.process(module)
                # If it provided some results, append it to the big results
                # container.
                if result:
                    results.update(result)
        else:
            log.info("No processing modules loaded")

        # Return the fat dict.
        return results
コード例 #54
0
MONGO_PORT = cfg.mongodb.get("port", 27017)
MONGO_DB = cfg.mongodb.get("db", "cuckoo")
MONGO_USER = cfg.mongodb.get("username", None)
MONGO_PASS = cfg.mongodb.get("password", None)

ELASTIC_HOST = cfg.elasticsearchdb.get("host", "127.0.0.1")
ELASTIC_PORT = cfg.elasticsearchdb.get("port", 9200)
ELASTIC_INDEX = cfg.elasticsearchdb.get("index", "cuckoo")

moloch_cfg = Config("reporting").moloch
vtdl_cfg = aux_cfg.virustotaldl
zip_cfg = aux_cfg.zipped_download

DLNEXEC = aux_cfg.dlnexec.get("enabled", False)
ZIP_PWD = zip_cfg.get("zip_pwd", "infected")
MOLOCH_BASE = moloch_cfg.get("base", None)
MOLOCH_NODE = moloch_cfg.get("node", None)
MOLOCH_ENABLED = moloch_cfg.get("enabled", False)

VTDL_ENABLED = vtdl_cfg.get("enabled", False)
VTDL_PRIV_KEY = vtdl_cfg.get("dlprivkey", None)
VTDL_INTEL_KEY = vtdl_cfg.get("dlintelkey", None)
VTDL_PATH = vtdl_cfg.get("dlpath", None)

TEMP_PATH = Config().cuckoo.get("tmppath", "/tmp")

# Enabled/Disable Zer0m0n tickbox on the submission page
OPT_ZER0M0N = False

# To disable comment support, change the below to False
COMMENTS = True
コード例 #55
0
ファイル: __init__.py プロジェクト: amohanta/elastic-cuckoo
# Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.

import sys
import os
from django.conf import settings

sys.path.append(settings.CUCKOO_PATH)

from lib.cuckoo.common.constants import CUCKOO_ROOT
from lib.cuckoo.common.config import Config

cfg = Config("reporting").elastic

# Checks if mongo reporting is enabled in Cuckoo.
if not cfg.get("enabled"):
    raise Exception("Elastic reporting module is not enabled in cuckoo, aborting!")

# Get connection options from reporting.conf.
settings.ELASTIC_HOST = cfg.get("host", "127.0.0.1")
settings.ELASTIC_PORT = cfg.get("port", 9200)
コード例 #56
0
ファイル: reporter.py プロジェクト: Fuitad/cuckoo-1
class Reporter:
    """Report generator."""

    def __init__(self, analysis_path, custom=""):
        """@param analysis_path: analysis folder path.
        @param custom: custom options.
        """
        self.analysis_path = analysis_path
        self.custom = custom
        self.cfg = Config(cfg=os.path.join(CUCKOO_ROOT, "conf", "reporting.conf"))
        self.__populate(plugins)

    def __populate(self, modules):
        """Load modules.
        @param modules: modules.
        """
        prefix = modules.__name__ + "."
        for loader, name, ispkg in pkgutil.iter_modules(modules.__path__):
            if ispkg:
                continue

            try:
                section = getattr(self.cfg, name)
            except AttributeError:
                continue

            if not section.enabled:
                continue

            path = "%s.%s" % (plugins.__name__, name)

            try:
                __import__(path, globals(), locals(), ["dummy"], -1)
            except CuckooDependencyError as e:
                log.warning("Unable to import reporting module \"%s\": %s" % (name, e.message))

    def run(self, data):
        """Generates all reports.
        @param data: analysis results.
        @raise CuckooReportError: if a report module fails.
        """
        Report()

        for plugin in Report.__subclasses__():
            current = plugin()
            current.set_path(self.analysis_path)
            current.cfg = Config(current.conf_path)
            module = inspect.getmodule(current)
            module_name = module.__name__.rsplit(".", 1)[1]
            current.set_options(self.cfg.get(module_name))

            try:
                # Run report, for each report a brand new copy of results is
                # created, to prevent a reporting module to edit global 
                # result set and affect other reporting modules.
                current.run(copy.deepcopy(data))
                log.debug("Executed reporting module \"%s\"" % current.__class__.__name__)
            except NotImplementedError:
                continue
            except CuckooReportError as e:
                log.warning("Failed to execute reporting module \"%s\": %s" % (current.__class__.__name__, e.message))