Exemple #1
0
    def handle(self):
        ip, port = self.client_address

        self.storagepath = self.server.build_storage_path(ip)
        if not self.storagepath:
            return

        task, _ = self.server.get_ctx_for_ip(ip)
        task_log_start(task.id)

        # Create all missing folders for this analysis.
        self.create_folders()

        try:
            # Initialize the protocol handler class for this connection.
            self.negotiate_protocol()

            for event in self.protocol:
                if isinstance(self.protocol, BsonParser) and event["type"] == "process":
                    self.open_process_log(event)
        except CuckooResultError as e:
            log.warning(
                "ResultServer connection stopping because of "
                "CuckooResultError: %s.", e
            )
        except (Disconnect, socket.error):
            pass
        except:
            log.exception("FIXME - exception in resultserver connection %s",
                          self.client_address)

        task_log_stop(task.id)
Exemple #2
0
def process_task(task):
    db = Database()

    try:
        task_log_start(task["id"])

        logger(
            "Starting task reporting",
            action="task.report", status="pending",
            target=task["target"], category=task["category"],
            package=task["package"], options=emit_options(task["options"]),
            custom=task["custom"]
        )

        if task["category"] == "file" and task.get("sample_id"):
            sample = db.view_sample(task["sample_id"])
            copy_path = cwd("storage", "binaries", sample.sha256)
        else:
            copy_path = None

        try:
            process(task["target"], copy_path, task)
            db.set_status(task["id"], TASK_REPORTED)
        except Exception as e:
            log.exception("Task #%d: error reporting: %s", task["id"], e)
            db.set_status(task["id"], TASK_FAILED_PROCESSING)

        log.info("Task #%d: reports generation completed", task["id"], extra={
            "action": "task.report", "status": "success",
        })
    except Exception as e:
        log.exception("Caught unknown exception: %s", e)
    finally:
        task_log_stop(task["id"])
Exemple #3
0
    def handle(self):
        ip, port = self.client_address

        self.storagepath = self.server.build_storage_path(ip)
        if not self.storagepath:
            return

        task, _ = self.server.get_ctx_for_ip(ip)
        task_log_start(task.id)

        # Create all missing folders for this analysis.
        self.create_folders()

        try:
            # Initialize the protocol handler class for this connection.
            self.negotiate_protocol()

            for event in self.protocol:
                if isinstance(self.protocol, BsonParser) and event["type"] == "process":
                    self.open_process_log(event)
        except CuckooResultError as e:
            log.warning(
                "ResultServer connection stopping because of "
                "CuckooResultError: %s.", e
            )
        except (Disconnect, socket.error):
            pass
        except:
            log.exception("FIXME - exception in resultserver connection %s",
                          self.client_address)

        task_log_stop(task.id)
def test_open_process_log_unicode(p):
    set_cwd(tempfile.mkdtemp())
    cuckoo_create()
    mkdir(cwd(analysis=1))
    mkdir(cwd("logs", analysis=1))

    request = server = mock.MagicMock()

    class Handler(ResultHandler):
        storagepath = cwd(analysis=1)

        def handle(self):
            pass

    init_logging(logging.DEBUG)

    try:
        task_log_start(1)
        Handler(request, (None, None), server).open_process_log({
            "pid":
            1,
            "ppid":
            2,
            "process_name":
            u"\u202e",
            "track":
            True,
        })
    finally:
        task_log_stop(1)
    def handle(self, sock, addr):
        """Handle the incoming connection.
        Gevent will close the socket when the function returns."""
        ipaddr = addr[0]

        with self.task_mgmt_lock:
            task_id = self.tasks.get(ipaddr)
            if not task_id:
                log.warning("ResultServer did not have a task for IP %s",
                            ipaddr)
                return

        storagepath = cwd(analysis=task_id)
        ctx = HandlerContext(task_id, storagepath, sock)
        task_log_start(task_id)
        try:
            try:
                protocol = self.negotiate_protocol(task_id, ctx)
            except EOFError:
                return

            # Registering the context allows us to abort the handler by
            # shutting down its socket when the task is deleted; this should
            # prevent lingering sockets
            with self.task_mgmt_lock:
                # NOTE: the task may have been cancelled during the negotation
                # protocol and a different task for that IP address may have
                # been registered
                if self.tasks.get(ipaddr) != task_id:
                    log.warning(
                        "Task #%s for IP %s was cancelled during "
                        "negotiation",
                        task_id,
                        ipaddr,
                    )
                    return
                s = self.handlers.setdefault(task_id, set())
                s.add(ctx)

            try:
                with protocol:
                    protocol.handle()
            except CuckooOperationalError as e:
                log.error(e)
            finally:
                with self.task_mgmt_lock:
                    s.discard(ctx)
                ctx.cancel()
                if ctx.buf:
                    # This is usually not a good sign
                    log.warning(
                        "Task #%s with protocol %s has unprocessed "
                        "data before getting disconnected",
                        task_id,
                        protocol,
                    )

        finally:
            task_log_stop(task_id)
Exemple #6
0
    def run(self):
        task_log_start(self.task.id)
        if not self.ev_client.start(maxtries=2):
            log.error(
                "Could not connect to Cuckoo event messaging client. Aborting")
            self.set_analysis_status(Analysis.FAILED)
            return

        # Tell the client to ask the event server to send all events of
        # type 'signature' and 'netflow'. These events will be sent by onemon.
        self.ev_client.subscribe(self.realtime_sig_cb, "signature")
        self.ev_client.subscribe(self.realtime_netflow_cb, "netflow")
        self.ev_client.subscribe(self.realtime_finished_cb, "finished")
        self.ev_client.subscribe(self.realtime_tlskeys_cb, "tlskeys")
        self.ev_client.subscribe(self.realtime_javascript_cb, "javascript")
        self.ev_client.subscribe(self.realtime_error_cb, "error")

        try:
            if self.start_run():
                self.set_analysis_status(Analysis.RUNNING)
                self.gm_wait_th.start()
                self.run_analysis()

        except Exception as e:
            log.error("Failure during analysis run of task #%s. %s. %s",
                      self.task.id, e, traceback.format_exc())
            try:
                self.ev_client.send_event(
                    "massurltaskfailure", {
                        "taskid": self.task.id,
                        "error": "%s" % traceback.format_exc(4),
                        "status": self.analysis.status
                    })
            except Exception as e:
                log.exception("Failed to send failure notification event")

        finally:
            try:
                self.stop_and_wait()
            except Exception as e:
                log.exception(
                    "Failure while stopping analysis run of task #%s: %s",
                    self.task.id, e)

        if self.completed or self.aborted:
            self.set_analysis_status(Analysis.STOPPED, wait=True)
        else:
            self.set_analysis_status(Analysis.FAILED, wait=True)
    def test_dump_memory_unicode(self):
        p1 = mock.MagicMock()
        p1.communicate.return_value = "5.0.28r111378", ""
        p1.returncode = 0

        p2 = mock.MagicMock()
        p2.wait.return_value = None

        mkdir(cwd(analysis=1))
        task_log_start(1)
        init_logging(logging.DEBUG)

        with mock.patch("cuckoo.machinery.virtualbox.Popen") as p:
            p.side_effect = p1, p2
            self.m.dump_memory("label", u"mem\u202eory.dmp")
        task_log_stop(1)
Exemple #8
0
    def test_dump_memory_unicode(self):
        p1 = mock.MagicMock()
        p1.communicate.return_value = "5.0.28r111378", ""
        p1.returncode = 0

        p2 = mock.MagicMock()
        p2.wait.return_value = None

        mkdir(cwd(analysis=1))
        task_log_start(1)
        init_logging(logging.DEBUG)

        with mock.patch("cuckoo.machinery.virtualbox.Popen") as p:
            p.side_effect = p1, p2
            self.m.dump_memory("label", u"mem\u202eory.dmp")
        task_log_stop(1)
Exemple #9
0
def process_task(task):
    db = Database()
    if not task.dir_exists():
        log.error("Task #%s directory %s does not exist, cannot process it",
                  task.id, task.path)
        db.set_status(task.id, TASK_FAILED_PROCESSING)
        return

    task_log_start(task.id)

    if task.targets:
        target = task.targets[0]
    else:
        target = Target()

    logger("Starting task reporting",
           action="task.report",
           status="pending",
           target=target.target,
           category=target.category,
           package=task["package"],
           options=emit_options(task["options"]),
           custom=task["custom"])

    success = False
    try:
        success = task.process()
    except Exception as e:
        log.error("Failed to process task #%s. Error: %s", task.id, e)
    finally:
        if success:
            log.info("Task #%d: reports generation completed",
                     task.id,
                     extra={
                         "action": "task.report",
                         "status": "success",
                     })
            db.set_status(task.id, TASK_REPORTED)
        else:
            log.error("Failed to process task #%s",
                      task.id,
                      extra={
                          "action": "task.report",
                          "status": "failed",
                      })
            db.set_status(task.id, TASK_FAILED_PROCESSING)
        task_log_stop(task.id)
Exemple #10
0
def test_open_process_log_unicode(p):
    set_cwd(tempfile.mkdtemp())
    cuckoo_create()
    mkdir(cwd(analysis=1))
    mkdir(cwd("logs", analysis=1))

    request = server = mock.MagicMock()

    class Handler(ResultHandler):
        storagepath = cwd(analysis=1)

        def handle(self):
            pass

    init_logging(logging.DEBUG)

    try:
        task_log_start(1)
        Handler(request, (None, None), server).open_process_log({
            "pid": 1, "ppid": 2, "process_name": u"\u202e", "track": True,
        })
    finally:
        task_log_stop(1)
Exemple #11
0
    def init(self):
        """Initialize the analysis."""
        self.storage = cwd(analysis=self.task.id)

        # If the analysis storage folder already exists, we need to abort the
        # analysis or previous results will be overwritten and lost.
        if os.path.exists(self.storage):
            log.error(
                "Analysis results folder already exists at path \"%s\", "
                "analysis aborted", self.storage)
            return False

        # If we're not able to create the analysis storage folder, we have to
        # abort the analysis.
        # Also create all directories that the ResultServer can use for file
        # uploads.
        try:
            Folders.create(self.storage, RESULT_DIRECTORIES)
        except CuckooOperationalError:
            log.error("Unable to create analysis folder %s", self.storage)
            return False

        self.store_task_info()

        if self.task.category == "file" or self.task.category == "archive":
            # Check if we have permissions to access the file.
            # And fail this analysis if we don't have access to the file.
            if not os.access(self.task.target, os.R_OK):
                log.error(
                    "Unable to access target file, please check if we have "
                    "permissions to access the file: \"%s\"", self.task.target)
                return False

            # Check whether the file has been changed for some unknown reason.
            # And fail this analysis if it has been modified.
            # TODO Absorb the file upon submission.
            sample = self.db.view_sample(self.task.sample_id)
            sha256 = File(self.task.target).get_sha256()
            if sha256 != sample.sha256:
                log.error(
                    "Target file has been modified after submission: \"%s\"",
                    self.task.target)
                return False

            # Store a copy of the original file if does not exist already.
            # TODO This should be done at submission time.
            self.binary = cwd("storage", "binaries", sha256)
            if not os.path.exists(self.binary):
                try:
                    shutil.copy(self.task.target, self.binary)
                except (IOError, shutil.Error):
                    log.error(
                        "Unable to store file from \"%s\" to \"%s\", "
                        "analysis aborted", self.task.target, self.binary)
                    return False

            # Each analysis directory contains a symlink/copy of the binary.
            try:
                self.storage_binary = os.path.join(self.storage, "binary")

                if hasattr(os, "symlink"):
                    os.symlink(self.binary, self.storage_binary)
                else:
                    shutil.copy(self.binary, self.storage_binary)
            except (AttributeError, OSError) as e:
                log.error(
                    "Unable to create symlink/copy from \"%s\" to "
                    "\"%s\": %s", self.binary, self.storage, e)
                return False

        # Initiates per-task logging.
        task_log_start(self.task.id)
        return True
Exemple #12
0
    def init(self):
        """Initialize the analysis."""
        self.storage = cwd(analysis=self.task.id)

        # If the analysis storage folder already exists, we need to abort the
        # analysis or previous results will be overwritten and lost.
        if os.path.exists(self.storage):
            log.error("Analysis results folder already exists at path \"%s\", "
                      "analysis aborted", self.storage)
            return False

        # If we're not able to create the analysis storage folder, we have to
        # abort the analysis.
        try:
            Folders.create(self.storage)
        except CuckooOperationalError:
            log.error("Unable to create analysis folder %s", self.storage)
            return False

        self.store_task_info()

        if self.task.category == "file" or self.task.category == "archive":
            # Check if we have permissions to access the file.
            # And fail this analysis if we don't have access to the file.
            if not os.access(self.task.target, os.R_OK):
                log.error(
                    "Unable to access target file, please check if we have "
                    "permissions to access the file: \"%s\"",
                    self.task.target
                )
                return False

            # Check whether the file has been changed for some unknown reason.
            # And fail this analysis if it has been modified.
            # TODO Absorb the file upon submission.
            sample = self.db.view_sample(self.task.sample_id)
            sha256 = File(self.task.target).get_sha256()
            if sha256 != sample.sha256:
                log.error(
                    "Target file has been modified after submission: \"%s\"",
                    self.task.target
                )
                return False

            # Store a copy of the original file if does not exist already.
            # TODO This should be done at submission time.
            self.binary = cwd("storage", "binaries", sha256)
            if not os.path.exists(self.binary):
                try:
                    shutil.copy(self.task.target, self.binary)
                except (IOError, shutil.Error):
                    log.error(
                        "Unable to store file from \"%s\" to \"%s\", "
                        "analysis aborted", self.task.target, self.binary
                    )
                    return False

            # Each analysis directory contains a symlink/copy of the binary.
            try:
                self.storage_binary = os.path.join(self.storage, "binary")

                if hasattr(os, "symlink"):
                    os.symlink(self.binary, self.storage_binary)
                else:
                    shutil.copy(self.binary, self.storage_binary)
            except (AttributeError, OSError) as e:
                log.error("Unable to create symlink/copy from \"%s\" to "
                          "\"%s\": %s", self.binary, self.storage, e)
                return False

        # Initiates per-task logging.
        task_log_start(self.task.id)
        return True
Exemple #13
0
    def run(self):
        """Starts the analysis manager thread."""
        task_log_start(self.task.id)
        analysis_success = False

        try:
            analysis_success = self.start_and_wait()

            # See if the analysis did not fail in the analysis manager
            # and see if the status was not set to failed by
            # the guest manager
            if analysis_success:
                if self.analysis.status == Analysis.FAILED:
                    analysis_success = False
        except Exception as e:
            log.exception("Failure during the starting of task #%s. Error: %s",
                          self.task.id,
                          e,
                          extra={
                              "action": "task.start",
                              "status": "error"
                          })
        finally:
            try:
                self.stop_and_wait()
            except Exception as e:
                log.exception(
                    "Failure during the stopping of task #%s. Error: %s",
                    self.task.id,
                    e,
                    extra={
                        "action": "task.stop",
                        "status": "error"
                    })

        if analysis_success:
            self.set_analysis_status(Analysis.STOPPED, wait=True)
        else:
            self.set_analysis_status(Analysis.FAILED, wait=True)

        if not config("cuckoo:cuckoo:process_results"):
            log.debug("Cuckoo process_results is set to 'no',"
                      " not processing results")
            return

        log.info("Processing and reporting results for task #%s",
                 self.task.id,
                 extra={
                     "action": "task.report",
                     "status": "pending"
                 })
        try:
            self.processing_success = self.task.process()
        except Exception as e:
            log.exception("Error during processing of task #%s. Error: %s",
                          self.task.id,
                          e,
                          extra={
                              "action": "task.report",
                              "status": "failed"
                          })
            return

        log.info("Task #%d: analysis procedure completed",
                 self.task.id,
                 extra={
                     "action": "task.report",
                     "status": "finished",
                 })