コード例 #1
0
    def log_call(self, context, apiname, modulename, arguments):
        if not self.rawlogfd:
            raise CuckooOperationalError(
                "Netlog failure, call before process.")

        apiindex, status, returnval, tid, timediff = context

        #log.debug("log_call> tid:{0} apiname:{1}".format(tid, apiname))

        current_time = self.connect_time + datetime.timedelta(
            0, 0, timediff * 1000)
        timestring = logtime(current_time)

        argumentstrings = [
            "{0}->{1}".format(argname, r) for argname, r in arguments
        ]

        if self.logfd:
            print >> self.logfd, ",".join("\"{0}\"".format(i) for i in [
                timestring,
                self.pid,
                self.procname,
                tid,
                self.ppid,
                modulename,
                apiname,
                status,
                returnval,
            ] + argumentstrings)
コード例 #2
0
ファイル: behavior.py プロジェクト: threathive/CAPEv2
    def run(self):
        """Run analysis.
        @return: processes infomartion list.
        """
        results = []

        if not os.path.exists(self._logs_path):
            log.warning('Analysis results folder does not exist at path "%s".',
                        self._logs_path)
            return results

        # TODO: this should check the current analysis configuration and raise a warning
        # if injection is enabled and there is no logs folder.
        if len(os.listdir(self._logs_path)) == 0:
            log.info(
                "Analysis results folder does not contain any file or injection was disabled."
            )
            return results

        for file_name in os.listdir(self._logs_path):
            file_path = os.path.join(self._logs_path, file_name)

            # Check if Loop Detection is enabled globally or locally (as an option)
            if cfg_process.loop_detection.enabled or self.options.get(
                    "loop_detection"):
                self.compress_log_file(file_path)

            if os.path.isdir(file_path):
                continue

            # Skipping the current log file if it's too big.
            if os.stat(file_path).st_size > cfg.processing.analysis_size_limit:
                log.warning(
                    "Behavioral log {0} too big to be processed, skipped.".
                    format(file_name))
                continue

            # Invoke parsing of current log file (if ram_boost is enabled, otherwise parsing is done on-demand)
            current_log = ParseProcessLog(file_path)
            if current_log.process_id is None:
                continue

            # If the current log actually contains any data, add its data to
            # the results list.
            results.append({
                "process_id": current_log.process_id,
                "process_name": bytes2str(current_log.process_name),
                "parent_id": current_log.parent_id,
                "module_path": bytes2str(current_log.module_path),
                "first_seen": logtime(current_log.first_seen),
                "calls": current_log.calls,
                "threads": current_log.threads,
                "environ": current_log.environdict,
            })

        # Sort the items in the results list chronologically. In this way we
        # can have a sequential order of spawned processes.
        results.sort(key=lambda process: process["first_seen"])

        return results
コード例 #3
0
    def run(self):
        """Run analysis.
        @return: processes infomartion list.
        """
        results = []

        if not os.path.exists(self._logs_path):
            log.warning(
                "Analysis results folder does not exist at path \"%s\".",
                self._logs_path)
            return results

        # TODO: this should check the current analysis configuration and raise a warning
        # if injection is enabled and there is no logs folder.
        if len(os.listdir(self._logs_path)) == 0:
            log.info(
                "Analysis results folder does not contain any file or injection was disabled."
            )
            return results

        for file_name in os.listdir(self._logs_path):
            file_path = os.path.join(self._logs_path, file_name)

            if os.path.isdir(file_path):
                continue

            # Skipping the current log file if it's too big.
            if os.stat(file_path
                       ).st_size > self.cfg.processing.analysis_size_limit:
                log.warning(
                    "Behavioral log {0} too big to be processed, skipped.".
                    format(file_name))
                continue

            # Invoke parsing of current log file.
            current_log = ParseProcessLog(file_path)
            if current_log.process_id is None:
                continue

            # If the current log actually contains any data, add its data to
            # the results list.
            results.append({
                "process_id": current_log.process_id,
                "process_name": current_log.process_name,
                "parent_id": current_log.parent_id,
                "module_path": current_log.module_path,
                "first_seen": logtime(current_log.first_seen),
                "calls": current_log.calls,
                "threads": current_log.threads
            })

        # Sort the items in the results list chronologically. In this way we
        # can have a sequential order of spawned processes.
        results.sort(key=lambda process: process["first_seen"])

        return results
コード例 #4
0
ファイル: behavior.py プロジェクト: HazemAlbezreh/cuckoo
    def log_call(self, context, apiname, category, arguments):
        apiindex, status, returnval, tid, timediff = context

        current_time = self.first_seen + datetime.timedelta(
            0, 0, timediff * 1000)
        timestring = logtime(current_time)

        self.lastcall = self._parse(
            [timestring, tid, category, apiname, status, returnval] +
            arguments)
コード例 #5
0
    def log_call(self, context, apiname, category, arguments):
        apiindex, status, returnval, tid, timediff = context

        current_time = self.first_seen + datetime.timedelta(0, 0, timediff*1000)
        timestring = logtime(current_time)

        self.lastcall = self._parse([timestring,
                                     tid,
                                     category,
                                     apiname, 
                                     status,
                                     returnval] + arguments)
コード例 #6
0
ファイル: behavior.py プロジェクト: blaquee/cuckoo-modified
    def run(self):
        """Run analysis.
        @return: processes infomartion list.
        """
        results = []

        if not os.path.exists(self._logs_path):
            log.warning('Analysis results folder does not exist at path "%s".', self._logs_path)
            return results

        # TODO: this should check the current analysis configuration and raise a warning
        # if injection is enabled and there is no logs folder.
        if len(os.listdir(self._logs_path)) == 0:
            log.info("Analysis results folder does not contain any file or injection was disabled.")
            return results

        for file_name in os.listdir(self._logs_path):
            file_path = os.path.join(self._logs_path, file_name)

            if os.path.isdir(file_path):
                continue

            # Skipping the current log file if it's too big.
            if os.stat(file_path).st_size > self.cfg.processing.analysis_size_limit:
                log.warning("Behavioral log {0} too big to be processed, skipped.".format(file_name))
                continue

            # Invoke parsing of current log file.
            current_log = ParseProcessLog(file_path)
            if current_log.process_id is None:
                continue

            # If the current log actually contains any data, add its data to
            # the results list.
            results.append(
                {
                    "process_id": current_log.process_id,
                    "process_name": current_log.process_name,
                    "parent_id": current_log.parent_id,
                    "module_path": current_log.module_path,
                    "first_seen": logtime(current_log.first_seen),
                    "calls": current_log.calls,
                    "threads": current_log.threads,
                    "environ": current_log.environdict,
                }
            )

        # Sort the items in the results list chronologically. In this way we
        # can have a sequential order of spawned processes.
        results.sort(key=lambda process: process["first_seen"])

        return results
コード例 #7
0
ファイル: behavior.py プロジェクト: kevoreilly/CAPEv2
    def log_call(self, context, apiname, category, arguments):
        """log an api call from data file
        @param context: containing additional api info
        @param apiname: name of the api
        @param category: win32 function category
        @param arguments: arguments to the api call
        """
        apiindex, repeated, status, returnval, tid, timediff, caller, parentcaller = context

        current_time = self.first_seen + datetime.timedelta(0, 0, timediff * 1000)
        timestring = logtime(current_time)

        self.lastcall = self._parse(
            [timestring, tid, caller, parentcaller, category, apiname, repeated, status, returnval] + arguments
        )
コード例 #8
0
ファイル: behavior.py プロジェクト: blaquee/cuckoo-modified
    def log_call(self, context, apiname, category, arguments):
        """ log an api call from data file
        @param context: containing additional api info
        @param apiname: name of the api
        @param category: win32 function category
        @param arguments: arguments to the api call
        """
        apiindex, repeated, status, returnval, tid, timediff, caller, parentcaller = context

        current_time = self.first_seen + datetime.timedelta(0, 0, timediff * 1000)
        timestring = logtime(current_time)

        self.lastcall = self._parse(
            [timestring, tid, caller, parentcaller, category, apiname, repeated, status, returnval] + arguments
        )
コード例 #9
0
ファイル: resultserver.py プロジェクト: 0day1day/cuckoo
    def log_call(self, context, apiname, modulename, arguments):
        if not self.rawlogfd:
            raise CuckooOperationalError("Netlog failure, call before process.")

        apiindex, status, returnval, tid, timediff = context

        #log.debug("log_call> tid:{0} apiname:{1}".format(tid, apiname))

        current_time = self.connect_time + datetime.timedelta(0,0, timediff*1000)
        timestring = logtime(current_time)

        argumentstrings = ["{0}->{1}".format(argname, r) for argname, r in arguments]

        if self.logfd:
            print >>self.logfd, ",".join("\"{0}\"".format(i) for i in [timestring, self.pid,
                self.procname, tid, self.ppid, modulename, apiname, status, returnval,
                ] + argumentstrings)
コード例 #10
0
ファイル: behavior.py プロジェクト: 1000rub/cuckoo
    def run(self):
        """Run analysis.
        @return: processes infomartion list.
        """
        results = []

        if not os.path.exists(self._logs_path):
            log.error("Analysis results folder does not exist at path \"%s\".",
                      self._logs_path)
            return results

        if len(os.listdir(self._logs_path)) == 0:
            log.error("Analysis results folder does not contain any file.")
            return results

        for file_name in os.listdir(self._logs_path):
            file_path = os.path.join(self._logs_path, file_name)

            if os.path.isdir(file_path):
                continue

            # Skipping the current log file if it's too big.
            if os.stat(file_path).st_size > self.cfg.processing.analysis_size_limit:
                log.warning("Behavioral log {0} too big to be processed, skipped.".format(file_name))
                continue

            # Invoke parsing of current log file.
            current_log = ParseProcessLog(file_path)
            if current_log.process_id is None:
                continue

            # If the current log actually contains any data, add its data to
            # the results list.
            results.append({
                "process_id": current_log.process_id,
                "process_name": current_log.process_name,
                "parent_id": current_log.parent_id,
                "first_seen": logtime(current_log.first_seen),
                "calls": current_log.calls,
            })

        # Sort the items in the results list chronologically. In this way we
        # can have a sequential order of spawned processes.
        results.sort(key=lambda process: process["first_seen"])

        return results
コード例 #11
0
ファイル: behavior.py プロジェクト: xarly/cuckoo
    def run(self):
        """Run analysis.
        @return: processes infomartion list.
        """
        results = []

        if not os.path.exists(self._logs_path):
            log.error("Analysis results folder does not exist at path \"%s\".",
                      self._logs_path)
            return results

        if len(os.listdir(self._logs_path)) == 0:
            log.error("Analysis results folder does not contain any file.")
            return results

        for file_name in os.listdir(self._logs_path):
            file_path = os.path.join(self._logs_path, file_name)

            if os.path.isdir(file_path):
                continue

            if not file_path.endswith(".raw"):
                continue

            # Invoke parsing of current log file.
            current_log = ParseProcessLog(file_path)
            if current_log.process_id == None: continue

            # If the current log actually contains any data, add its data to
            # the global results list.
            results.append({
                "process_id": current_log.process_id,
                "process_name": current_log.process_name,
                "parent_id": current_log.parent_id,
                "first_seen": logtime(current_log.first_seen),
                "calls": current_log
            })

        # Sort the items in the results list chronologically. In this way we
        # can have a sequential order of spawned processes.
        results.sort(key=lambda process: process["first_seen"])

        return results
コード例 #12
0
ファイル: behavior.py プロジェクト: 0day1day/cuckoo
    def run(self):
        """Run analysis.
        @return: processes infomartion list.
        """
        results = []

        if not os.path.exists(self._logs_path):
            log.error("Analysis results folder does not exist at path \"%s\".",
                      self._logs_path)
            return results

        if len(os.listdir(self._logs_path)) == 0:
            log.error("Analysis results folder does not contain any file.")
            return results

        for file_name in os.listdir(self._logs_path):
            file_path = os.path.join(self._logs_path, file_name)

            if os.path.isdir(file_path):
                continue
            
            if not file_path.endswith(".raw"):
                continue

            # Invoke parsing of current log file.
            current_log = ParseProcessLog(file_path)
            if current_log.process_id == None: continue

            # If the current log actually contains any data, add its data to
            # the global results list.
            results.append({
                "process_id": current_log.process_id,
                "process_name": current_log.process_name,
                "parent_id": current_log.parent_id,
                "first_seen": logtime(current_log.first_seen),
                "calls": current_log
            })

        # Sort the items in the results list chronologically. In this way we
        # can have a sequential order of spawned processes.
        results.sort(key=lambda process: process["first_seen"])

        return results
コード例 #13
0
ファイル: behavior.py プロジェクト: lehuff/cuckoo-modified
    def run(self):
        """Run analysis.
        @return: processes infomartion list.
        """
        results = []

        if not os.path.exists(self._logs_path):
            log.warning("Analysis results folder does not exist at path \"%s\".", self._logs_path)
            return results

        # TODO: this should check the current analysis configuration and raise a warning
        # if injection is enabled and there is no logs folder.
        if len(os.listdir(self._logs_path)) == 0:
            log.info("Analysis results folder does not contain any file or injection was disabled.")
            return results

        for file_name in os.listdir(self._logs_path):
            file_path = os.path.join(self._logs_path, file_name)

            if os.path.isdir(file_path):
                continue

            # Skipping the current log file if it's too big.
            if os.stat(file_path).st_size > self.cfg.processing.analysis_size_limit:
                log.warning("Behavioral log {0} too big to be processed, skipped.".format(file_name))
                continue

            # Invoke parsing of current log file.
            current_log = ParseProcessLog(file_path)
            if current_log.process_id is None:
                continue

            # If we have a spammy API, theres a chance that process did it multiple times
            # so we'll sum the total counts here as we now have all of the logs parsed for
            # the specific process
            if current_log.spam_apis:
                api_counts = dict()
                for apiInfo in current_log.spam_apis:
                    if apiInfo["api"] not in api_counts.keys():
                        api_counts[apiInfo["api"]] = dict()
                        api_counts[apiInfo["api"]]["count"] = apiInfo["count"]
                        api_counts[apiInfo["api"]]["name"] = apiInfo["name"]
                        api_counts[apiInfo["api"]]["pid"] = apiInfo["pid"]
                    else:
                        api_counts[apiInfo["api"]]["count"] += apiInfo["count"]

                new_spam_apis = list()
                for current_api in api_counts.keys():
                    tmp = {
                        "api": current_api,
                        "name": api_counts[current_api]["name"],
                        "pid": api_counts[current_api]["pid"],
                        "count": api_counts[current_api]["count"]
                    }
                    new_spam_apis.append(tmp)

                current_log.spam_apis = new_spam_apis

            # If the current log actually contains any data, add its data to
            # the results list.
            results.append({
                "process_id": current_log.process_id,
                "process_name": current_log.process_name,
                "parent_id": current_log.parent_id,
                "module_path": current_log.module_path,
                "first_seen": logtime(current_log.first_seen),
                "calls": current_log.calls,
                "threads" : current_log.threads,
                "environ" : current_log.environdict,
                "spam_apis": current_log.spam_apis,
            })

        # Sort the items in the results list chronologically. In this way we
        # can have a sequential order of spawned processes.
        results.sort(key=lambda process: process["first_seen"])

        return results