示例#1
0
    def create_file(self, file_path):
        file_helper = File(file_path)

        if not file_helper.valid():
            log.error(
                "Cannot create target for file '%s', file does not exist or is"
                " 0 bytes", file_path)
            return None

        return self._create(file_path,
                            file_helper,
                            category="file",
                            file_type=file_helper.get_type(),
                            file_size=file_helper.get_size())
示例#2
0
文件: api.py 项目: leixyou/cuckoo
    def store_screenshots(request, task_id, body):
        if not body or not isinstance(body, list):
            return json_error_response("screenshots missing")

        report = ControlApi.get_report(int(task_id))

        if not report:
            return json_error_response("report missing")

        for scr in body:
            sid = scr.get("id", None)
            data = scr.get("data", None)

            try:
                if sid is None or not data:
                    raise ValueError

                ftype, b64 = data.split(",")
                if ftype != "data:image/png;base64":
                    raise ValueError

                f = base64.b64decode(b64)
                if f[:4] != "\x89PNG":
                    raise ValueError
            except ValueError:
                return json_error_response("invalid format")

            shot_path = cwd(
                "shots", "remotecontrol_%d.png" % int(sid),
                analysis=int(task_id)
            )
            open(shot_path, "wb").write(f)

            shot_blob = {}
            shot = File(shot_path)
            if shot.valid():
                shot_id = mdb.store_file(shot)
                shot_blob["original"] = shot_id

            if shot_blob:
                report["shots"].append(shot_blob)

        mdb.db.analysis.save(report)
        return JsonResponse({
            "status": "success",
        })
    def store_screenshots(request, task_id, body):
        if not body or not isinstance(body, list):
            return json_error_response("screenshots missing")

        report = ControlApi.get_report(int(task_id))

        if not report:
            return json_error_response("report missing")

        for scr in body:
            sid = scr.get("id", None)
            data = scr.get("data", None)

            try:
                if sid is None or not data:
                    raise ValueError

                ftype, b64 = data.split(",")
                if ftype != "data:image/png;base64":
                    raise ValueError

                f = base64.b64decode(b64)
                if f[:4] != "\x89PNG":
                    raise ValueError
            except ValueError:
                return json_error_response("invalid format")

            shot_path = cwd("shots",
                            "remotecontrol_%d.png" % int(sid),
                            analysis=int(task_id))
            open(shot_path, "wb").write(f)

            shot_blob = {}
            shot = File(shot_path)
            if shot.valid():
                shot_id = mdb.store_file(shot)
                shot_blob["original"] = shot_id

            if shot_blob:
                report["shots"].append(shot_blob)

        mdb.db.analysis.save(report)
        return JsonResponse({
            "status": "success",
        })
示例#4
0
    def run(self, results):
        """Writes report.
        @param results: analysis results dictionary.
        @raise CuckooReportError: if fails to connect or write to MongoDB.
        """
        # Create a copy of the dictionary. This is done in order to not modify
        # the original dictionary and possibly compromise the following
        # reporting modules.
        report = dict(results)
        if "network" not in report:
            report["network"] = {}

        # This will likely hardcode the cuckoo.log to this point, but that
        # should be fine.
        if report.get("debug"):
            report["debug"]["cuckoo"] = list(report["debug"]["cuckoo"])

        # Store path of the analysis path.
        report["info"]["analysis_path"] = self.analysis_path

        # Store the sample in GridFS.
        if results.get("info", {}).get("category") == "file" and "target" in results:
            sample = File(self.file_path)
            if sample.valid():
                fname = results["target"]["file"]["name"]
                sample_id = self.store_file(sample, filename=fname)
                report["target"] = {"file_id": sample_id}
                report["target"].update(results["target"])

        # Store the PCAP file in GridFS and reference it back in the report.
        pcap_path = os.path.join(self.analysis_path, "dump.pcap")
        pcap = File(pcap_path)
        if pcap.valid():
            pcap_id = self.store_file(pcap)
            report["network"]["pcap_id"] = pcap_id

        sorted_pcap_path = os.path.join(self.analysis_path, "dump_sorted.pcap")
        spcap = File(sorted_pcap_path)
        if spcap.valid():
            spcap_id = self.store_file(spcap)
            report["network"]["sorted_pcap_id"] = spcap_id

        mitmproxy_path = os.path.join(self.analysis_path, "dump.mitm")
        mitmpr = File(mitmproxy_path)
        if mitmpr.valid():
            mitmpr_id = self.store_file(mitmpr)
            report["network"]["mitmproxy_id"] = mitmpr_id

        # Store the process memory dump file and extracted files in GridFS and
        # reference it back in the report.
        if "procmemory" in report and self.options.get("store_memdump", False):
            for idx, procmem in enumerate(report["procmemory"]):
                procmem_path = os.path.join(
                    self.analysis_path, "memory", "%s.dmp" % procmem["pid"]
                )
                procmem_file = File(procmem_path)
                if procmem_file.valid():
                    procmem_id = self.store_file(procmem_file)
                    procmem["procmem_id"] = procmem_id

                for extracted in procmem.get("extracted", []):
                    f = File(extracted["path"])
                    if f.valid():
                        extracted["extracted_id"] = self.store_file(f)

        # Walk through the dropped files, store them in GridFS and update the
        # report with the ObjectIds.
        new_dropped = []
        if "dropped" in report:
            for dropped in report["dropped"]:
                new_drop = dict(dropped)
                drop = File(dropped["path"])
                if drop.valid():
                    dropped_id = self.store_file(drop, filename=dropped["name"])
                    new_drop["object_id"] = dropped_id

                new_dropped.append(new_drop)

        report["dropped"] = new_dropped

        new_extracted = []
        if "extracted" in report:
            for extracted in report["extracted"]:
                new_extr = dict(extracted)
                extr = File(extracted[extracted["category"]])
                if extr.valid():
                    extr_id = self.store_file(extr)
                    new_extr["object_id"] = extr_id

                new_extracted.append(new_extr)

        report["extracted"] = new_extracted

        # Add screenshots.
        report["shots"] = []
        if os.path.exists(self.shots_path):
            # Walk through the files and select the JPGs.
            for shot_file in sorted(os.listdir(self.shots_path)):
                if not shot_file.endswith(".jpg") or "_" in shot_file:
                    continue

                shot_path = os.path.join(self.shots_path, shot_file)
                shot_path_dir = os.path.dirname(shot_path)
                shot_file_name, shot_file_ext = os.path.splitext(shot_file)
                shot_path_resized = os.path.join(shot_path_dir, "%s_small%s" % (shot_file_name, shot_file_ext))

                shot_blob = {}

                # If the screenshot path is a valid file, store it and
                # reference it back in the report.
                if os.path.isfile(shot_path):
                    shot = File(shot_path)
                    if shot.valid():
                        shot_id = self.store_file(shot)
                        shot_blob["original"] = shot_id

                # Try to get the alternative (small) size for this image,
                # store it and reference it back in the report.
                if os.path.isfile(shot_path_resized):
                    shot_small = File(shot_path_resized)
                    if shot_small.valid():
                        shot_id = self.store_file(shot_small)
                        shot_blob["small"] = shot_id

                if shot_blob:
                    report["shots"].append(shot_blob)

        paginate = self.options.get("paginate", 100)

        # Store chunks of API calls in a different collection and reference
        # those chunks back in the report. In this way we should defeat the
        # issue with the oversized reports exceeding MongoDB's boundaries.
        # Also allows paging of the reports.
        if "behavior" in report and "processes" in report["behavior"]:
            new_processes = []
            for process in report["behavior"]["processes"]:
                new_process = dict(process)

                chunk = []
                chunks_ids = []
                # Loop on each process call.
                for call in process["calls"]:
                    # If the chunk size is paginate or if the loop is
                    # completed then store the chunk in MongoDB.
                    if len(chunk) == paginate:
                        to_insert = {"pid": process["pid"], "calls": chunk}
                        chunk_id = self.db.calls.insert(to_insert)
                        chunks_ids.append(chunk_id)
                        # Reset the chunk.
                        chunk = []

                    # Append call to the chunk.
                    chunk.append(call)

                # Store leftovers.
                if chunk:
                    to_insert = {"pid": process["pid"], "calls": chunk}
                    chunk_id = self.db.calls.insert(to_insert)
                    chunks_ids.append(chunk_id)

                # Add list of chunks.
                new_process["calls"] = chunks_ids
                new_processes.append(new_process)

            # Store the results in the report.
            report["behavior"] = dict(report["behavior"])
            report["behavior"]["processes"] = new_processes

        if report.get("procmon"):
            procmon, chunk = [], []

            for entry in report["procmon"]:
                if len(chunk) == paginate:
                    procmon.append(self.db.procmon.insert(chunk))
                    chunk = []

                chunk.append(entry)

            if chunk:
                procmon.append(self.db.procmon.insert(chunk))

            report["procmon"] = procmon

        # Store the report and retrieve its object id.
        self.db.analysis.save(report)
示例#5
0
    def run(self, results):
        """Writes report.
        @param results: analysis results dictionary.
        @raise CuckooReportError: if fails to connect or write to MongoDB.
        """
        # Create a copy of the dictionary. This is done in order to not modify
        # the original dictionary and possibly compromise the following
        # reporting modules.
        report = dict(results)
        if "network" not in report:
            report["network"] = {}

        # This will likely hardcode the cuckoo.log to this point, but that
        # should be fine.
        if report.get("debug"):
            report["debug"]["cuckoo"] = list(report["debug"]["cuckoo"])

        # Store path of the analysis path.
        report["info"]["analysis_path"] = self.analysis_path

        # Store the sample in GridFS.
        if results.get("info", {}).get("category") == "file" and "target" in results:
            sample = File(self.file_path)
            if sample.valid():
                fname = results["target"]["file"]["name"]
                sample_id = self.store_file(sample, filename=fname)
                report["target"] = {"file_id": sample_id}
                report["target"].update(results["target"])

        # Store the PCAP file in GridFS and reference it back in the report.
        pcap_path = os.path.join(self.analysis_path, "dump.pcap")
        pcap = File(pcap_path)
        if pcap.valid():
            pcap_id = self.store_file(pcap)
            report["network"]["pcap_id"] = pcap_id

        sorted_pcap_path = os.path.join(self.analysis_path, "dump_sorted.pcap")
        spcap = File(sorted_pcap_path)
        if spcap.valid():
            spcap_id = self.store_file(spcap)
            report["network"]["sorted_pcap_id"] = spcap_id

        mitmproxy_path = os.path.join(self.analysis_path, "dump.mitm")
        mitmpr = File(mitmproxy_path)
        if mitmpr.valid():
            mitmpr_id = self.store_file(mitmpr)
            report["network"]["mitmproxy_id"] = mitmpr_id

        # Store the process memory dump file and extracted files in GridFS and
        # reference it back in the report.
        if "procmemory" in report and self.options.get("store_memdump", False):
            for idx, procmem in enumerate(report["procmemory"]):
                procmem_path = os.path.join(
                    self.analysis_path, "memory", "%s.dmp" % procmem["pid"]
                )
                procmem_file = File(procmem_path)
                if procmem_file.valid():
                    procmem_id = self.store_file(procmem_file)
                    procmem["procmem_id"] = procmem_id

                for extracted in procmem.get("extracted", []):
                    f = File(extracted["path"])
                    if f.valid():
                        extracted["extracted_id"] = self.store_file(f)

        # Store the scripts that Floss generated in GredFS and reference
        # them back in the report.
        if "strings" in report:
            if "idapro_sct_name" in report["strings"]:
                idapro_sct_path = os.path.join(
                    self.analysis_path, "str_script", report["strings"]["idapro_sct_name"]
                )
                idapro_sct_file = File(idapro_sct_path)
                if idapro_sct_file.valid():
                    report["strings"]["idapro_sct_id"] = self.store_file(idapro_sct_file)

            if "radare_sct_name" in report["strings"]:
                radare_sct_path = os.path.join(
                    self.analysis_path, "str_script", report["strings"]["radare_sct_name"]
                )
                radare_sct_file = File(radare_sct_path)
                if radare_sct_file.valid():
                    report["strings"]["radare_sct_id"] = self.store_file(radare_sct_file)

            if "x64dbg_sct_name" in report["strings"]:
                x64dbg_sct_path = os.path.join(
                    self.analysis_path, "str_script", report["strings"]["x64dbg_sct_name"]
                )
                x64dbg_sct_file = File(x64dbg_sct_path)
                if x64dbg_sct_file.valid():
                    report["strings"]["x64dbg_sct_id"] = self.store_file(x64dbg_sct_file)

        # Walk through the dropped files, store them in GridFS and update the
        # report with the ObjectIds.
        new_dropped = []
        if "dropped" in report:
            for dropped in report["dropped"]:
                new_drop = dict(dropped)
                drop = File(dropped["path"])
                if drop.valid():
                    dropped_id = self.store_file(drop, filename=dropped["name"])
                    new_drop["object_id"] = dropped_id

                new_dropped.append(new_drop)

        report["dropped"] = new_dropped

        new_extracted = []
        if "extracted" in report:
            for extracted in report["extracted"]:
                new_extr = dict(extracted)
                extr = File(extracted["raw"])
                if extr.valid():
                    extr_id = self.store_file(extr)
                    new_extr["object_id"] = extr_id

                new_extracted.append(new_extr)

        report["extracted"] = new_extracted

        # Add screenshots.
        report["shots"] = []
        if os.path.exists(self.shots_path):
            # Walk through the files and select the JPGs.
            for shot_file in sorted(os.listdir(self.shots_path)):
                if not shot_file.endswith(".jpg") or "_" in shot_file:
                    continue

                shot_path = os.path.join(self.shots_path, shot_file)
                shot_path_dir = os.path.dirname(shot_path)
                shot_file_name, shot_file_ext = os.path.splitext(shot_file)
                shot_path_resized = os.path.join(shot_path_dir, "%s_small%s" % (shot_file_name, shot_file_ext))

                shot_blob = {}

                # If the screenshot path is a valid file, store it and
                # reference it back in the report.
                if os.path.isfile(shot_path):
                    shot = File(shot_path)
                    if shot.valid():
                        shot_id = self.store_file(shot)
                        shot_blob["original"] = shot_id

                # Try to get the alternative (small) size for this image,
                # store it and reference it back in the report.
                if os.path.isfile(shot_path_resized):
                    shot_small = File(shot_path_resized)
                    if shot_small.valid():
                        shot_id = self.store_file(shot_small)
                        shot_blob["small"] = shot_id

                if shot_blob:
                    report["shots"].append(shot_blob)

        paginate = self.options.get("paginate", 100)

        # Store chunks of API calls in a different collection and reference
        # those chunks back in the report. In this way we should defeat the
        # issue with the oversized reports exceeding MongoDB's boundaries.
        # Also allows paging of the reports.
        if "behavior" in report and "processes" in report["behavior"]:
            new_processes = []
            for process in report["behavior"]["processes"]:
                new_process = dict(process)

                chunk = []
                chunks_ids = []
                # Loop on each process call.
                for call in process["calls"]:
                    # If the chunk size is paginate or if the loop is
                    # completed then store the chunk in MongoDB.
                    if len(chunk) == paginate:
                        to_insert = {"pid": process["pid"], "calls": chunk}
                        chunk_id = self.db.calls.insert(to_insert)
                        chunks_ids.append(chunk_id)
                        # Reset the chunk.
                        chunk = []

                    # Append call to the chunk.
                    chunk.append(call)

                # Store leftovers.
                if chunk:
                    to_insert = {"pid": process["pid"], "calls": chunk}
                    chunk_id = self.db.calls.insert(to_insert)
                    chunks_ids.append(chunk_id)

                # Add list of chunks.
                new_process["calls"] = chunks_ids
                new_processes.append(new_process)

            # Store the results in the report.
            report["behavior"] = dict(report["behavior"])
            report["behavior"]["processes"] = new_processes

        if report.get("procmon"):
            procmon, chunk = [], []

            for entry in report["procmon"]:
                if len(chunk) == paginate:
                    procmon.append(self.db.procmon.insert(chunk))
                    chunk = []

                chunk.append(entry)

            if chunk:
                procmon.append(self.db.procmon.insert(chunk))

            report["procmon"] = procmon

        # Store the report and retrieve its object id.
        self.db.analysis.save(report)