Пример #1
0
    def get_task_id(self, label):
        analysistasks = ResultServer().analysistasks
        for task_ip in analysistasks:
            if analysistasks[task_ip][1].label is label:
                return analysistasks[task_ip][0].id

        return None
Пример #2
0
    def _initialize(self, module_name):
        """Read configuration.
        @param module_name: module name.
        """
        self.module_name = module_name
        mmanager_opts = self.options.get(module_name)

        for machine_id in mmanager_opts["machines"].strip().split(","):
            try:
                machine_opts = self.options.get(machine_id.strip())
                machine = Dictionary()
                machine.id = machine_id.strip()
                machine.label = machine_opts[self.LABEL]
                machine.platform = machine_opts["platform"]
                machine.tags = machine_opts.get("tags")
                machine.ip = machine_opts["ip"]

                # If configured, use specific network interface for this
                # machine, else use the default value.
                machine.interface = machine_opts.get("interface")

                # If configured, use specific snapshot name, else leave it
                # empty and use default behaviour.
                machine.snapshot = machine_opts.get("snapshot")

                # If configured, use specific resultserver IP and port,
                # else use the default value.
                opt_resultserver = self.options_globals.resultserver

                # the resultserver port might have been dynamically changed
                #  -> get the current one from the resultserver singelton
                opt_resultserver.port = ResultServer().port

                ip = machine_opts.get("resultserver_ip", opt_resultserver.ip)
                port = machine_opts.get("resultserver_port",
                                        opt_resultserver.port)

                machine.resultserver_ip = ip
                machine.resultserver_port = port

                # Strip parameters.
                for key, value in machine.items():
                    if value and isinstance(value, basestring):
                        machine[key] = value.strip()

                self.db.add_machine(name=machine.id,
                                    label=machine.label,
                                    ip=machine.ip,
                                    platform=machine.platform,
                                    tags=machine.tags,
                                    interface=machine.interface,
                                    snapshot=machine.snapshot,
                                    resultserver_ip=ip,
                                    resultserver_port=port)
            except (AttributeError, CuckooOperationalError) as e:
                log.warning(
                    "Configuration details about machine %s "
                    "are missing: %s", machine_id.strip(), e)
                continue
Пример #3
0
    def start_analysis(self, options):
        """Start analysis.
        @param options: options.
        @return: operation status.
        """
        log.info("Starting analysis on guest (id=%s, ip=%s)", self.id, self.ip)

        # TODO: deal with unicode URLs.
        if options["category"] == "file":
            options["file_name"] = sanitize_filename(options["file_name"])

        # Get and set dynamically generated resultserver port.
        options["port"] = str(ResultServer().port)

        try:
            # Wait for the agent to respond. This is done to check the
            # availability of the agent and verify that it's ready to receive
            # data.
            self.wait(CUCKOO_GUEST_INIT)

            # Invoke the upload of the analyzer to the guest.
            self.upload_analyzer()

            # Give the analysis options to the guest, so it can generate the
            # analysis.conf inside the guest.
            try:
                self.server.add_config(options)
            except:
                raise CuckooGuestError("{0}: unable to upload config to "
                                       "analysis machine".format(self.id))

            # If the target of the analysis is a file, upload it to the guest.
            if options["category"] == "file":
                try:
                    file_data = open(options["target"], "rb").read()
                except (IOError, OSError) as e:
                    raise CuckooGuestError("Unable to read {0}, error: "
                                           "{1}".format(options["target"], e))

                data = xmlrpclib.Binary(file_data)

                try:
                    self.server.add_malware(data, options["file_name"])
                except Exception as e:
                    raise CuckooGuestError("{0}: unable to upload malware to "
                                           "analysis machine: {1}".format(self.id, e))

            # Launch the analyzer.
            pid = self.server.execute()
            log.debug("%s: analyzer started with PID %d", self.id, pid)
        # If something goes wrong when establishing the connection, raise an
        # exception and abort the analysis.
        except (socket.timeout, socket.error):
            raise CuckooGuestError("{0}: guest communication timeout, check "
                                   "networking or try to increase "
                                   "timeout".format(self.id))
Пример #4
0
    def _allocate_new_machine(self):
        """
        allocating/creating new EC2 instance(autoscale option)
        """
        # read configuration file
        machinery_options = self.options.get("aws")
        autoscale_options = self.options.get("autoscale")
        # If configured, use specific network interface for this
        # machine, else use the default value.
        interface = autoscale_options["interface"] if autoscale_options.get(
            "interface") else machinery_options.get("interface")
        resultserver_ip = autoscale_options[
            "resultserver_ip"] if autoscale_options.get(
                "resultserver_ip") else Config("cuckoo:resultserver:ip")
        if autoscale_options.get("resultserver_port"):
            resultserver_port = autoscale_options["resultserver_port"]
        else:
            # The ResultServer port might have been dynamically changed,
            # get it from the ResultServer singleton. Also avoid import
            # recursion issues by importing ResultServer here.
            from lib.cuckoo.core.resultserver import ResultServer
            resultserver_port = ResultServer().port

        log.info("All machines are busy, allocating new machine")
        self.dynamic_machines_sequence += 1
        self.dynamic_machines_count += 1
        new_machine_name = "cuckoo_autoscale_%03d" % self.dynamic_machines_sequence
        instance = self._create_instance(tags=[{
            "Key": "Name",
            "Value": new_machine_name
        }, {
            "Key": self.AUTOSCALE_CUCKOO,
            "Value": "True"
        }])
        if instance is None:
            return False

        self.ec2_machines[instance.id] = instance
        #  sets "new_machine" object in configuration object to avoid raising an exception
        setattr(self.options, new_machine_name, {})
        # add machine to DB
        self.db.add_machine(name=new_machine_name,
                            label=instance.id,
                            ip=instance.private_ip_address,
                            platform=autoscale_options["platform"],
                            options=autoscale_options["options"],
                            tags=autoscale_options["tags"],
                            interface=interface,
                            snapshot=None,
                            resultserver_ip=resultserver_ip,
                            resultserver_port=resultserver_port)
        return True
Пример #5
0
def cuckoo_init(quiet=False, debug=False, artwork=False, test=False, ml=False):
    """Cuckoo initialization workflow.
    @param quiet: if set enable silent mode, it doesn't print anything except warnings
    @param debug: if set enable debug mode, it print all debug messages
    @param artwork: if set it will print only artworks, forever
    @param test: enable integration test mode, used only for testing
    @param ml: do CuckooML analysis of locally stored samples
    """
    cur_path = os.getcwd()
    os.chdir(CUCKOO_ROOT)

    logo()
    check_working_directory()
    check_configs()
    check_version()
    create_structure()

    if artwork:
        import time
        try:
            while True:
                time.sleep(1)
                logo()
        except KeyboardInterrupt:
            return

    init_logging()

    if quiet:
        log.setLevel(logging.WARN)
    elif debug:
        log.setLevel(logging.DEBUG)

    if ml:
        init_cuckooml()
        return

    init_modules()
    init_tasks()
    init_yara()
    init_binaries()
    init_rooter()
    init_routing()

    # TODO: This is just a temporary hack, we need an actual test suite to
    # integrate with Travis-CI.
    if test:
        return

    ResultServer()

    os.chdir(cur_path)
Пример #6
0
def cuckoo_init(quiet=False, debug=False, artwork=False, test=False):
    cur_path = os.getcwd()
    os.chdir(CUCKOO_ROOT)

    logo()
    check_working_directory()
    check_configs()
    create_structure()

    if artwork:
        import time

        try:
            while True:
                time.sleep(1)
                logo()
        except KeyboardInterrupt:
            return

    if quiet:
        level = logging.WARN
    elif debug:
        level = logging.DEBUG
    else:
        level = logging.INFO
    log.setLevel(level)
    init_logging(level)

    check_webgui_mongo()
    init_modules()
    init_tasks()
    init_yara()
    init_rooter()
    init_routing()

    # This is just a temporary hack, we need an actual test suite to integrate
    # with Travis-CI.
    if test:
        return

    ResultServer()
    os.chdir(cur_path)
Пример #7
0
    def start(self):
        # Get updated machine info
        self.machine = self.db.view_machine_by_label(self.machine.label)
        tcpdump = self.options.get("tcpdump", "/usr/sbin/tcpdump")
        bpf = self.options.get("bpf", "")
        remote = self.options.get("remote", False)
        remote_host = self.options.get("host", "")
        if remote:
            file_path = f"/tmp/tcp.dump.{self.task.id}"
        else:
            file_path = os.path.join(CUCKOO_ROOT, "storage", "analyses",
                                     str(self.task.id), "dump.pcap")
        host = self.machine.ip
        # Selects per-machine interface if available.
        if self.machine.interface:
            interface = self.machine.interface
        else:
            interface = self.options.get("interface")
        # Selects per-machine resultserver IP if available.
        if self.machine.resultserver_ip:
            resultserver_ip = str(self.machine.resultserver_ip)
        else:
            resultserver_ip = str(cfg.resultserver.ip)
        # Get resultserver port from its instance because it could change dynamically.
        resultserver_port = str(ResultServer().port)

        if self.machine.resultserver_port:
            resultserver_port = str(self.machine.resultserver_port)
        else:
            resultserver_port = str(cfg.resultserver.port)

        if not os.path.exists(tcpdump):
            log.error(
                'Tcpdump does not exist at path "%s", network capture aborted',
                tcpdump)
            return

        # https://github.com/cuckoosandbox/cuckoo/pull/2842/files
        mode = os.stat(tcpdump).st_mode
        if mode & S_ISUID:
            log.error(
                "Tcpdump is not accessible from this user network capture aborted"
            )
            return

        if not interface:
            log.error("Network interface not defined, network capture aborted")
            return

        pargs = [tcpdump, "-U", "-q", "-s", "0", "-i", interface, "-n"]

        # Trying to save pcap with the same user which cuckoo is running.
        try:
            user = getpass.getuser()
        except Exception:
            pass
        else:
            if not remote:
                pargs.extend(["-Z", user])

        pargs.extend(["-w", file_path])
        if remote:
            pargs.extend(["'", "host", host])
        else:
            pargs.extend(["host", host])
        # Do not capture XMLRPC agent traffic.
        pargs.extend([
            "and",
            "not",
            "(",
            "dst",
            "host",
            host,
            "and",
            "dst",
            "port",
            str(CUCKOO_GUEST_PORT),
            ")",
            "and",
            "not",
            "(",
            "src",
            "host",
            host,
            "and",
            "src",
            "port",
            str(CUCKOO_GUEST_PORT),
            ")",
        ])

        # Do not capture ResultServer traffic.
        pargs.extend([
            "and",
            "not",
            "(",
            "dst",
            "host",
            resultserver_ip,
            "and",
            "dst",
            "port",
            resultserver_port,
            ")",
            "and",
            "not",
            "(",
            "src",
            "host",
            resultserver_ip,
            "and",
            "src",
            "port",
            resultserver_port,
            ")",
        ])

        # TODO fix this, temp fix to not get all that noise
        # pargs.extend(["and", "not", "(", "dst", "host", resultserver_ip, "and", "src", "host", host, ")"])

        if remote and bpf:
            pargs.extend(["and", "("] + bpf.split(" ") + [")"])
            pargs.extend(["'"])
        elif bpf:
            pargs.extend(["and", "(", bpf, ")"])

        if remote and not remote_host:
            log.exception(
                "Failed to start sniffer, remote enabled but no ssh string has been specified"
            )
            return
        elif remote:

            try:
                from subprocess import DEVNULL  # py3k
            except ImportError:
                DEVNULL = open(os.devnull, "wb")

            f = open(f"/tmp/{self.task.id}.sh", "w")
            if f:
                f.write(f"{' '.join(pargs)} & PID=$!")
                f.write("\n")
                f.write(f"echo $PID > /tmp/{self.task.id}.pid")
                f.write("\n")
                f.close()

            remote_output = subprocess.check_output([
                "scp", "-q", f"/tmp/{self.task.id}.sh",
                remote_host + f":/tmp/{self.task.id}.sh"
            ],
                                                    stderr=DEVNULL)
            remote_output = subprocess.check_output(
                [
                    "ssh", remote_host, "nohup", "/bin/bash",
                    f"/tmp/{self.task.id}.sh", ">", "/tmp/log", "2>",
                    "/tmp/err"
                ],
                stderr=subprocess.STDOUT,
            )

            self.pid = subprocess.check_output(
                ["ssh", remote_host, "cat", f"/tmp/{self.task.id}.pid"],
                stderr=DEVNULL).strip()
            log.info(
                "Started remote sniffer @ %s with (interface=%s, host=%s, dump path=%s, pid=%s)",
                remote_host,
                interface,
                host,
                file_path,
                self.pid,
            )
            remote_output = subprocess.check_output([
                "ssh", remote_host, "rm", "-f", f"/tmp/{self.task.id}.pid",
                f"/tmp/{self.task.id}.sh"
            ],
                                                    stderr=DEVNULL)

        else:
            try:
                self.proc = subprocess.Popen(pargs,
                                             stdout=subprocess.PIPE,
                                             stderr=subprocess.PIPE)
            except (OSError, ValueError):
                log.exception(
                    "Failed to start sniffer (interface=%s, host=%s, dump path=%s)",
                    interface, host, file_path)
                return

            log.info(
                "Started sniffer with PID %d (interface=%s, host=%s, dump path=%s)",
                self.proc.pid, interface, host, file_path)
Пример #8
0
    def launch_analysis(self):
        """Start analysis."""
        succeeded = False
        dead_machine = False

        log.info("Starting analysis of %s \"%s\" (task=%d)",
                 self.task.category.upper(), self.task.target, self.task.id)

        # Initialize the analysis folders.
        if not self.init_storage():
            return False

        if self.task.category == "file":
            # Check whether the file has been changed for some unknown reason.
            # And fail this analysis if it has been modified.
            if not self.check_file():
                return False

            # Store a copy of the original file.
            if not self.store_file():
                return False

        # Acquire analysis machine.
        try:
            self.acquire_machine()
        except CuckooOperationalError as e:
            machine_lock.release()
            log.error("Cannot acquire machine: {0}".format(e))
            return False

        # At this point we can tell the ResultServer about it.
        try:
            ResultServer().add_task(self.task, self.machine)
        except Exception as e:
            machinery.release(self.machine.label)
            self.errors.put(e)

        aux = RunAuxiliary(task=self.task, machine=self.machine)
        aux.start()

        # Generate the analysis configuration file.
        options = self.build_options()

        try:
            unlocked = False

            # Mark the selected analysis machine in the database as started.
            guest_log = self.db.guest_start(self.task.id,
                                            self.machine.name,
                                            self.machine.label,
                                            machinery.__class__.__name__)
            # Start the machine.
            machinery.start(self.machine.label)

            # By the time start returns it will have fully started the Virtual
            # Machine. We can now safely release the machine lock.
            machine_lock.release()
            unlocked = True

            # Initialize the guest manager.
            guest = GuestManager(self.machine.name, self.machine.ip,
                                 self.machine.platform)

            # Start the analysis.
            guest.start_analysis(options)

            guest.wait_for_completion()
            succeeded = True
        except CuckooMachineError as e:
            if not unlocked:
                machine_lock.release()
            log.error(str(e), extra={"task_id": self.task.id})
            dead_machine = True
        except CuckooGuestError as e:
            if not unlocked:
                machine_lock.release()
            log.error(str(e), extra={"task_id": self.task.id})
        finally:
            # Stop Auxiliary modules.
            aux.stop()

            # Take a memory dump of the machine before shutting it off.
            if self.cfg.cuckoo.memory_dump or self.task.memory:
                try:
                    dump_path = os.path.join(self.storage, "memory.dmp")
                    machinery.dump_memory(self.machine.label, dump_path)
                except NotImplementedError:
                    log.error("The memory dump functionality is not available "
                              "for the current machine manager.")
                except CuckooMachineError as e:
                    log.error(e)

            try:
                # Stop the analysis machine.
                machinery.stop(self.machine.label)
            except CuckooMachineError as e:
                log.warning("Unable to stop machine %s: %s",
                            self.machine.label, e)

            # Mark the machine in the database as stopped. Unless this machine
            # has been marked as dead, we just keep it as "started" in the
            # database so it'll not be used later on in this session.
            self.db.guest_stop(guest_log)

            # After all this, we can make the ResultServer forget about the
            # internal state for this analysis task.
            ResultServer().del_task(self.task, self.machine)

            if dead_machine:
                # Remove the guest from the database, so that we can assign a
                # new guest when the task is being analyzed with another
                # machine.
                self.db.guest_remove(guest_log)

                # Remove the analysis directory that has been created so
                # far, as launch_analysis() is going to be doing that again.
                shutil.rmtree(self.storage)

                # This machine has turned dead, so we throw an exception here
                # which informs the AnalysisManager that it should analyze
                # this task again with another available machine.
                raise CuckooDeadMachine()

            try:
                # Release the analysis machine. But only if the machine has
                # not turned dead yet.
                machinery.release(self.machine.label)
            except CuckooMachineError as e:
                log.error("Unable to release machine %s, reason %s. "
                          "You might need to restore it manually.",
                          self.machine.label, e)

        return succeeded
Пример #9
0
    def launch_analysis(self):
        """Start analysis."""
        succeeded = False
        dead_machine = False
        self.socks5s = _load_socks5_operational()

        log.info("Task #{0}: Starting analysis of {1} '{2}'".format(
            self.task.id, self.task.category.upper(),
            convert_to_printable(self.task.target)))

        # Initialize the analysis folders.
        if not self.init_storage():
            log.debug("Failed to initialize the analysis folder")
            return False

        if self.task.category in ["file", "pcap", "static"]:
            sha256 = File(self.task.target).get_sha256()
            # Check whether the file has been changed for some unknown reason.
            # And fail this analysis if it has been modified.
            if not self.check_file(sha256):
                return False

            # Store a copy of the original file.
            if not self.store_file(sha256):
                return False

        if self.task.category in ("pcap", "static"):
            if self.task.category == "pcap":
                if hasattr(os, "symlink"):
                    os.symlink(self.binary,
                               os.path.join(self.storage, "dump.pcap"))
                else:
                    shutil.copy(self.binary,
                                os.path.join(self.storage, "dump.pcap"))
            # create the logs/files directories as
            # normally the resultserver would do it
            dirnames = ["logs", "files", "aux"]
            for dirname in dirnames:
                try:
                    os.makedirs(os.path.join(self.storage, dirname))
                except:
                    pass
            return True

        # Acquire analysis machine.
        try:
            self.acquire_machine()
            self.db.set_task_vm(self.task.id, self.machine.label,
                                self.machine.id)
        # At this point we can tell the ResultServer about it.
        except CuckooOperationalError as e:
            machine_lock.release()
            log.error("Task #{0}: Cannot acquire machine: {1}".format(
                self.task.id, e),
                      exc_info=True)
            return False

        # Generate the analysis configuration file.
        options = self.build_options()

        try:
            ResultServer().add_task(self.task, self.machine)
        except Exception as e:
            machinery.release(self.machine.label)
            log.exception(e, exc_info=True)
            self.errors.put(e)

        aux = RunAuxiliary(task=self.task, machine=self.machine)

        try:
            unlocked = False

            # Mark the selected analysis machine in the database as started.
            guest_log = self.db.guest_start(self.task.id, self.machine.name,
                                            self.machine.label,
                                            machinery.__class__.__name__)
            # Start the machine.
            machinery.start(self.machine.label)

            # Enable network routing.
            self.route_network()

            # By the time start returns it will have fully started the Virtual
            # Machine. We can now safely release the machine lock.
            machine_lock.release()
            unlocked = True

            aux.start()

            # Initialize the guest manager.
            guest = GuestManager(self.machine.name, self.machine.ip,
                                 self.machine.platform, self.task.id, self)

            options["clock"] = self.db.update_clock(self.task.id)
            self.db.guest_set_status(self.task.id, "starting")
            # Start the analysis.
            guest.start_analysis(options)
            if self.db.guest_get_status(self.task.id) == "starting":
                self.db.guest_set_status(self.task.id, "running")
                guest.wait_for_completion()

            self.db.guest_set_status(self.task.id, "stopping")
            succeeded = True
        except CuckooMachineError as e:
            if not unlocked:
                machine_lock.release()
            log.error(str(e), extra={"task_id": self.task.id}, exc_info=True)
            dead_machine = True
        except CuckooGuestError as e:
            if not unlocked:
                machine_lock.release()
            log.error(str(e), extra={"task_id": self.task.id}, exc_info=True)
        finally:
            # Stop Auxiliary modules.
            aux.stop()

            # Take a memory dump of the machine before shutting it off.
            if self.cfg.cuckoo.memory_dump or self.task.memory:
                try:
                    dump_path = get_memdump_path(self.task.id)
                    need_space, space_available = free_space_monitor(
                        os.path.dirname(dump_path), return_value=True)
                    if need_space:
                        log.error(
                            "Not enough free disk space! Could not dump ram (Only %d MB!)",
                            space_available)
                    else:
                        machinery.dump_memory(self.machine.label, dump_path)
                except NotImplementedError:
                    log.error("The memory dump functionality is not available "
                              "for the current machine manager.")

                except CuckooMachineError as e:
                    log.error(e, exc_info=True)

            try:
                # Stop the analysis machine.
                machinery.stop(self.machine.label)

            except CuckooMachineError as e:
                log.warning(
                    "Task #{0}: Unable to stop machine {1}: {2}".format(
                        self.task.id, self.machine.label, e))

            # Mark the machine in the database as stopped. Unless this machine
            # has been marked as dead, we just keep it as "started" in the
            # database so it'll not be used later on in this session.
            self.db.guest_stop(guest_log)

            # After all this, we can make the ResultServer forget about the
            # internal state for this analysis task.
            ResultServer().del_task(self.task, self.machine)

            # Drop the network routing rules if any.
            self.unroute_network()

            if dead_machine:
                # Remove the guest from the database, so that we can assign a
                # new guest when the task is being analyzed with another
                # machine.
                self.db.guest_remove(guest_log)

                # Remove the analysis directory that has been created so
                # far, as launch_analysis() is going to be doing that again.
                shutil.rmtree(self.storage)

                # This machine has turned dead, so we throw an exception here
                # which informs the AnalysisManager that it should analyze
                # this task again with another available machine.
                raise CuckooDeadMachine()

            try:
                # Release the analysis machine. But only if the machine has
                # not turned dead yet.
                machinery.release(self.machine.label)

            except CuckooMachineError as e:
                log.error("Task #{0}: Unable to release machine {1}, reason "
                          "{2}. You might need to restore it manually.".format(
                              self.task.id, self.machine.label, e))

        return succeeded
Пример #10
0
    def launch_analysis(self):
        """Start analysis."""
        succeeded = False
        dead_machine = False

        log.info("Starting analysis of %s \"%s\" (task=%d)",
                 self.task.category.upper(), self.task.target, self.task.id)

        # Initialize the analysis folders.
        if not self.init_storage():
            return False

        if self.task.category == "file":
            sample = Database().view_sample(self.task.sample_id)

            is_first_task = not self.task.experiment.times

            if is_first_task:
                # Check whether the file has been changed for some unknown reason.
                # And fail this analysis if it has been modified.
                if not self.check_file():
                    return False

                # Store a copy of the original file.
                if not self.store_file():
                    return False

            self.binary = os.path.join(CUCKOO_ROOT, "storage", "binaries",
                                       sample.sha256)

            self.create_symlink()

        # TODO Scheduling should happen at the end of the experiment.
        scheduled = False
        if self.task.repeat == TASK_RECURRENT:
            scheduled = Database().schedule(self.task.id)

        # Acquire analysis machine.
        try:
            self.acquire_machine()
        except CuckooOperationalError as e:
            machine_lock.release()
            log.error("Cannot acquire machine: {0}".format(e))
            return False

        # Generate the analysis configuration file.
        options = self.build_options()

        # At this point we can tell the ResultServer about it.
        try:
            ResultServer().add_task(self.task, self.machine)
        except Exception as e:
            machinery.release(self.machine.label)
            self.errors.put(e)

        aux = RunAuxiliary(task=self.task, machine=self.machine)
        aux.start()

        try:
            unlocked = False

            # Mark the selected analysis machine in the database as started.
            guest_log = Database().guest_start(self.task.id, self.machine.name,
                                               self.machine.label,
                                               machinery.__class__.__name__)

            # Starting the machinery takes some time. In order not to run into
            # race conditions with max_machines_count because acquiring new
            # machines takes place in parallel next to starting machines, for
            # which it takes a little delay before the machines' status turns
            # into "running", we hold the machine lock until the machine has
            # fully started (or gives an error, of course).
            try:
                # Start the machine, revert only if we are the first task in
                # the experiment.
                machinery.start(self.machine.label, revert=is_first_task)
            finally:
                machine_lock.release()
                unlocked = True

            # Initialize the guest manager.
            # FIXME - The critical timeout options is analysis_timeout + 60 sec
            #           should it be configurable?
            guest = GuestManager(self.machine.name, self.machine.ip,
                                 options["timeout"] + 60,
                                 self.machine.platform)

            # Start the analysis if we are the first task of a series
            if is_first_task:
                guest.start_analysis(options)

            guest.wait_for_completion()
            succeeded = True
        except CuckooMachineError as e:
            if not unlocked:
                machine_lock.release()
            log.error(str(e), extra={"task_id": self.task.id})
            dead_machine = True
        except CuckooGuestError as e:
            if not unlocked:
                machine_lock.release()
            log.error(str(e), extra={"task_id": self.task.id})
        finally:
            # Stop Auxiliary modules.
            aux.stop()

            # Take a memory dump of the machine before shutting it off.
            if self.cfg.cuckoo.memory_dump or self.task.memory:
                try:
                    dump_path = os.path.join(self.storage, "memory.dmp")
                    machinery.dump_memory(self.machine.label, dump_path)
                except NotImplementedError:
                    log.error("The memory dump functionality is not available "
                              "for the current machine manager.")
                except CuckooMachineError as e:
                    log.error(e)

            try:
                # Stop the analysis machine.
                machinery.stop(self.machine.label)
            except CuckooMachineError as e:
                log.warning("Unable to stop machine %s: %s",
                            self.machine.label, e)

            # Mark the machine in the database as stopped. Unless this machine
            # has been marked as dead, we just keep it as "started" in the
            # database so it'll not be used later on in this session.
            Database().guest_stop(guest_log)

            # After all this, we can make the ResultServer forget about the
            # internal state for this analysis task.
            ResultServer().del_task(self.task, self.machine)

            if dead_machine:
                # Remove the guest from the database, so that we can assign a
                # new guest when the task is being analyzed with another
                # machine.
                Database().guest_remove(guest_log)

                # Remove the analysis directory that has been created so
                # far, as launch_analysis() is going to be doing that again.
                shutil.rmtree(self.storage)

                # This machine has turned dead, so we throw an exception here
                # which informs the AnalysisManager that it should analyze
                # this task again with another available machine.
                raise CuckooDeadMachine()

            try:
                # Release the analysis machine.
                if not scheduled:
                    log.debug(
                        "Freeing the machine since we're the last task of this experiment"
                    )
                    machinery.release(self.machine.label)

                    # Unset the machine_name of this experiment.
                    if self.task.experiment:
                        Database().update_experiment(
                            None,
                            id=self.task.experiment.id,
                            machine_name=None)
            except CuckooMachineError as e:
                log.error(
                    "Unable to release machine %s, reason %s. "
                    "You might need to restore it manually.",
                    self.machine.label, e)

        return succeeded
Пример #11
0
    def start(self):
        tcpdump = self.options.get("tcpdump", "/usr/sbin/tcpdump")
        bpf = self.options.get("bpf", "")
        file_path = os.path.join(CUCKOO_ROOT, "storage", "analyses", str(self.task.id), "dump.pcap")
        host = self.machine.ip
        # Selects per-machine interface if available.
        if self.machine.interface:
            interface = self.machine.interface
        else:
            interface = self.options.get("interface")
        # Selects per-machine resultserver IP if available.
        if self.machine.resultserver_ip:
            resultserver_ip = self.machine.resultserver_ip
        else:
            resultserver_ip = str(Config().resultserver.ip)
        # Get resultserver port from its instance because it could change dynamically.
        resultserver_port = str(ResultServer().port)

        if not os.path.exists(tcpdump):
            log.error("Tcpdump does not exist at path \"%s\", network "
                      "capture aborted", tcpdump)
            return

        mode = os.stat(tcpdump)[stat.ST_MODE]
        if mode and stat.S_ISUID != 2048:
            log.error("Tcpdump is not accessible from this user, "
                      "network capture aborted")
            return

        if not interface:
            log.error("Network interface not defined, network capture aborted")
            return

        pargs = [tcpdump, "-U", "-q", "-s", "0", "-i", interface, "-n"]

        # Trying to save pcap with the same user which cuckoo is running.
        try:
            user = getpass.getuser()
        except:
            pass
        else:
            pargs.extend(["-Z", user])

        pargs.extend(["-w", file_path])
        pargs.extend(["host", host])
        # Do not capture XMLRPC agent traffic.
        pargs.extend(["and", "not", "(", "dst", "host", host, "and", "dst", "port",
                      str(CUCKOO_GUEST_PORT), ")", "and", "not", "(", "src", "host",
                      host, "and", "src", "port", str(CUCKOO_GUEST_PORT), ")"])

        # Do not capture ResultServer traffic.
        pargs.extend(["and", "not", "(", "dst", "host", resultserver_ip,
                      "and", "dst", "port", resultserver_port, ")", "and",
                      "not", "(", "src", "host", resultserver_ip, "and",
                      "src", "port", resultserver_port, ")"])

        if bpf:
            pargs.extend(["and", bpf])

        try:
            self.proc = subprocess.Popen(pargs, stdout=subprocess.PIPE,
                                         stderr=subprocess.PIPE)
        except (OSError, ValueError):
            log.exception("Failed to start sniffer (interface=%s, host=%s, "
                          "dump path=%s)", interface, host, file_path)
            return

        log.info("Started sniffer with PID %d (interface=%s, host=%s, "
                 "dump path=%s)", self.proc.pid, interface, host, file_path)
Пример #12
0
    def launch_analysis(self):
        """Start analysis."""
        succeeded = False

        target = self.task.target
        if self.task.category == "file":
            target = os.path.basename(target)

        log.info("Starting analysis of %s \"%s\" (task #%d, options \"%s\")",
                 self.task.category.upper(), target, self.task.id,
                 emit_options(self.task.options))

        # Initialize the analysis folders.
        if not self.init_storage():
            return False

        self.store_task_info()

        if self.task.category == "file":
            # Check whether the file has been changed for some unknown reason.
            # And fail this analysis if it has been modified.
            if not self.check_file():
                return False

            # Store a copy of the original file.
            if not self.store_file():
                return False

        # Acquire analysis machine.
        try:
            self.acquire_machine()
        except CuckooOperationalError as e:
            machine_lock.release()
            log.error("Cannot acquire machine: {0}".format(e))
            return False

        # At this point we can tell the ResultServer about it.
        try:
            ResultServer().add_task(self.task, self.machine)
        except Exception as e:
            machinery.release(self.machine.label)
            self.errors.put(e)

        # Generate the analysis configuration file.
        options = self.build_options()

        #####################################
        # THIS PART IS USED FOR PINDEMONIUM #
        #####################################
        if (options["package"][0:6] == 'exePIN'):
            print(options)
            log.info(
                "\033[91m\n\n\t\t ###### Will now proceed with PINDemonium #######\n\n\033[0m"
            )
            options = Unpack(options)
            print(options)
        #####################################
        # THIS PART IS USED FOR PINDEMONIUM #

#####################################

        aux = RunAuxiliary(task=self.task, machine=self.machine)
        aux.start()
        try:
            unlocked = False
            self.interface = None

            # Mark the selected analysis machine in the database as started.
            guest_log = self.db.guest_start(self.task.id, self.machine.name,
                                            self.machine.label,
                                            machinery.__class__.__name__)
            # Start the machine.
            machinery.start(self.machine.label, self.task)

            # Enable network routing.
            self.route_network()

            # By the time start returns it will have fully started the Virtual
            # Machine. We can now safely release the machine lock.
            machine_lock.release()
            unlocked = True

            # Run and manage the components inside the guest unless this
            # machine has the "noagent" option specified (please refer to the
            # wait_finish() function for more details on this function).
            if "noagent" not in self.machine.options:
                self.guest_manage(options)
            else:
                self.wait_finish()

            succeeded = True
        except CuckooMachineError as e:
            if not unlocked:
                machine_lock.release()
            log.error(str(e), extra={"task_id": self.task.id})
            log.critical(
                "A critical error has occurred trying to use the machine "
                "with name %s during an analysis due to which it is no "
                "longer in a working state, please report this issue and all "
                "of the related environment details to the developers so we "
                "can improve this situation. (Note that before we would "
                "simply remove this VM from doing any more analyses, but as "
                "all the VMs will eventually be depleted that way, hopefully "
                "we'll find a better solution now).",
                self.machine.name,
            )
        except CuckooGuestError as e:
            if not unlocked:
                machine_lock.release()
            log.error(str(e), extra={"task_id": self.task.id})
        finally:
            # Stop Auxiliary modules.
            aux.stop()

            # Take a memory dump of the machine before shutting it off.
            if self.cfg.cuckoo.memory_dump or self.task.memory:
                try:
                    dump_path = os.path.join(self.storage, "memory.dmp")
                    machinery.dump_memory(self.machine.label, dump_path)
                except NotImplementedError:
                    log.error("The memory dump functionality is not available "
                              "for the current machine manager.")
                except CuckooMachineError as e:
                    log.error(e)

            try:
                # Stop the analysis machine.
                machinery.stop(self.machine.label)
            except CuckooMachineError as e:
                log.warning("Unable to stop machine %s: %s",
                            self.machine.label, e)

            # Mark the machine in the database as stopped. Unless this machine
            # has been marked as dead, we just keep it as "started" in the
            # database so it'll not be used later on in this session.
            self.db.guest_stop(guest_log)

            # After all this, we can make the ResultServer forget about the
            # internal state for this analysis task.
            ResultServer().del_task(self.task, self.machine)

            # Drop the network routing rules if any.
            self.unroute_network()

            try:
                # Release the analysis machine. But only if the machine has
                # not turned dead yet.
                machinery.release(self.machine.label)
            except CuckooMachineError as e:
                log.error(
                    "Unable to release machine %s, reason %s. "
                    "You might need to restore it manually.",
                    self.machine.label, e)

        return succeeded
Пример #13
0
 def _get_resultserver_port(self):
     """Returns the ResultServer port."""
     # Avoid import recursion issues by importing ResultServer here.
     from lib.cuckoo.core.resultserver import ResultServer
     return ResultServer().port
Пример #14
0
    def start(self):
        # Get updated machine info
        self.machine = self.db.view_machine_by_label(self.machine.label)
        tcpdump = self.options.get("tcpdump", "/usr/sbin/tcpdump")
        bpf = self.options.get("bpf", "")
        file_path = os.path.join(CUCKOO_ROOT, "storage", "analyses",
                                 "%s" % self.task.id, "dump.pcap")
        host = self.machine.ip
        # Selects per-machine interface if available.
        if self.machine.interface:
            interface = self.machine.interface
        else:
            interface = self.options.get("interface")
        # Selects per-machine resultserver IP if available.
        if self.machine.resultserver_ip:
            resultserver_ip = str(self.machine.resultserver_ip)
        else:
            resultserver_ip = str(Config().resultserver.ip)
        # Get resultserver port from its instance because it could change dynamically.
        resultserver_port = str(ResultServer().port)

        if self.machine.resultserver_port:
            resultserver_port = str(self.machine.resultserver_port)
        else:
            resultserver_port = str(Config().resultserver.port)

        if not os.path.exists(tcpdump):
            log.error("Tcpdump does not exist at path \"%s\", network "
                      "capture aborted", tcpdump)
            return

        mode = os.stat(tcpdump)[stat.ST_MODE]
        if self.options.get("suid_check", True) and (mode & stat.S_ISUID) == 0 and os.geteuid() > 0:
            # now do a weak file capability check
            has_caps = False
            try:
                caplib = ctypes.cdll.LoadLibrary("libcap.so.2")
                if caplib:
                    caplist = caplib.cap_get_file(tcpdump)
                    if caplist:
                        has_caps = True
            except:
                pass
            if not has_caps:
                log.error("Tcpdump is not accessible from this user, "
                          "network capture aborted")
                return

        if not interface:
            log.error("Network interface not defined, network capture aborted")
            return

        pargs = [tcpdump, "-U", "-q", "-s", "0", "-i", interface, "-n"]

        # Trying to save pcap with the same user which cuckoo is running.
        try:
            user = getpass.getuser()
        except:
            pass
        else:
            pargs.extend(["-Z", user])

        pargs.extend(["-w", file_path])
        pargs.extend(["host", host])
        # Do not capture XMLRPC agent traffic.
        pargs.extend(["and", "not", "(", "dst", "host", host, "and", "dst", "port",
                      str(CUCKOO_GUEST_PORT), ")", "and", "not", "(", "src", "host",
                      host, "and", "src", "port", str(CUCKOO_GUEST_PORT), ")"])

        # Do not capture ResultServer traffic.
        pargs.extend(["and", "not", "(", "dst", "host", resultserver_ip,
                      "and", "dst", "port", resultserver_port, ")", "and",
                      "not", "(", "src", "host", resultserver_ip, "and",
                      "src", "port", resultserver_port, ")"])

        if bpf:
            pargs.extend(["and", "(", bpf, ")"])

        try:
            self.proc = subprocess.Popen(pargs, stdout=subprocess.PIPE,
                                         stderr=subprocess.PIPE)
        except (OSError, ValueError):
            log.exception("Failed to start sniffer (interface=%s, host=%s, "
                          "dump path=%s)", interface, host, file_path)
            return

        log.info("Started sniffer with PID %d (interface=%s, host=%s, "
                 "dump path=%s)", self.proc.pid, interface, host, file_path)
Пример #15
0
    def start(self):
        # Get updated machine info
        self.machine = self.db.view_machine_by_label(self.machine.label)
        tcpdump = self.options.get("tcpdump", "/usr/sbin/tcpdump")
        bpf = self.options.get("bpf", "")
        remote = self.options.get("remote", False)
        remote_host = self.options.get("host", "")
        if remote:
            file_path = "/tmp/tcp.dump.%d" % self.task.id
        else:
            file_path = os.path.join(CUCKOO_ROOT, "storage", "analyses", str(self.task.id), "dump.pcap")
        host = self.machine.ip
        # Selects per-machine interface if available.
        if self.machine.interface:
            interface = self.machine.interface
        else:
            interface = self.options.get("interface")
        # Selects per-machine resultserver IP if available.
        if self.machine.resultserver_ip:
            resultserver_ip = str(self.machine.resultserver_ip)
        else:
            resultserver_ip = str(Config().resultserver.ip)
        # Get resultserver port from its instance because it could change dynamically.
        resultserver_port = str(ResultServer().port)

        if self.machine.resultserver_port:
            resultserver_port = str(self.machine.resultserver_port)
        else:
            resultserver_port = str(Config().resultserver.port)

        if not os.path.exists(tcpdump):
            log.error("Tcpdump does not exist at path \"%s\", network "
                      "capture aborted", tcpdump)
            return

        mode = os.stat(tcpdump)[stat.ST_MODE]
        if self.options.get("suid_check", True) and (mode & stat.S_ISUID) == 0 and os.geteuid() > 0:
            # now do a weak file capability check
            has_caps = False
            try:
                caplib = ctypes.cdll.LoadLibrary("libcap.so.2")
                if caplib:
                    caplist = caplib.cap_get_file(tcpdump)
                    if caplist:
                        has_caps = True
            except:
                pass
            if not has_caps:
                log.error("Tcpdump is not accessible from this user, "
                          "network capture aborted")
                return

        if not interface:
            log.error("Network interface not defined, network capture aborted")
            return

        pargs = [tcpdump, "-U", "-q", "-s", "0", "-i", interface, "-n"]

        # Trying to save pcap with the same user which cuckoo is running.
        try:
            user = getpass.getuser()
        except:
            pass
        else:
            if not remote:
               pargs.extend(["-Z", user])

        pargs.extend(["-w", file_path])
        if remote:
            pargs.extend(["'", "host", host])
        else:
            pargs.extend(["host", host])
        # Do not capture XMLRPC agent traffic.
        pargs.extend(["and", "not", "(", "dst", "host", host, "and", "dst", "port",
                      str(CUCKOO_GUEST_PORT), ")", "and", "not", "(", "src", "host",
                      host, "and", "src", "port", str(CUCKOO_GUEST_PORT), ")"])

        # Do not capture ResultServer traffic.
        pargs.extend(["and", "not", "(", "dst", "host", resultserver_ip,
                      "and", "dst", "port", resultserver_port, ")", "and",
                      "not", "(", "src", "host", resultserver_ip, "and",
                      "src", "port", resultserver_port, ")"])

        if remote and bpf:
            pargs.extend(["and", "("] + bpf.split(' ') + [ ")" ] )
            pargs.extend(["'"])
        elif bpf:
            pargs.extend(["and", "(", bpf, ")"])

        if remote and not remote_host:
            log.exception("Failed to start sniffer, remote enabled but no ssh string has been specified")
            return
        elif remote:

             try:
                from subprocess import DEVNULL # py3k
             except ImportError:
                DEVNULL = open(os.devnull, 'wb')

             f = open("/tmp/%d.sh" % self.task.id, "w")
             if f:
                  f.write( ' '.join(pargs)  + ' & PID=$!')
                  f.write("\n")
                  f.write( 'echo $PID > /tmp/%d.pid' % self.task.id )
                  f.write("\n")
                  f.close()

             remote_output = subprocess.check_output(['scp', '-q', "/tmp/%d.sh" % self.task.id, remote_host + ":/tmp/%d.sh" % self.task.id  ], stderr=DEVNULL)
             remote_output = subprocess.check_output(['ssh', remote_host, 'nohup', "/bin/bash", '/tmp/%d.sh' % self.task.id, '>','/tmp/log','2>','/tmp/err' ], stderr=subprocess.STDOUT)

             self.pid = subprocess.check_output(['ssh', remote_host, 'cat', '/tmp/%d.pid' % self.task.id ], stderr=DEVNULL).strip()
             log.info("Started remote sniffer @ %s with (interface=%s, host=%s, "
                  "dump path=%s, pid=%s)", remote_host, interface, host, file_path, self.pid)
             remote_output = subprocess.check_output(['ssh', remote_host, 'rm', '-f', '/tmp/%d.pid' % self.task.id, '/tmp/%d.sh' % self.task.id ], stderr=DEVNULL)

        else:
            try:
                self.proc = subprocess.Popen(pargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            except (OSError, ValueError):
                log.exception("Failed to start sniffer (interface=%s, host=%s, "
                          "dump path=%s)", interface, host, file_path)
                return

            log.info("Started sniffer with PID %d (interface=%s, host=%s, "
                    "dump path=%s)", self.proc.pid, interface, host, file_path)
Пример #16
0
    def launch_analysis(self):
        """Start analysis."""
        succeeded = False
        dead_machine = False
        self.socks5s = _load_socks5_operational()

        log.info(
            "Task #%s: Starting analysis of %s '%s'",
            self.task.id,
            self.task.category.upper(),
            convert_to_printable(self.task.target),
        )

        # Initialize the analysis folders.
        if not self.init_storage():
            log.debug("Failed to initialize the analysis folder")
            return False

        category_early_escape = self.category_checks()
        if isinstance(category_early_escape, bool):
            return category_early_escape

        # Acquire analysis machine.
        try:
            self.acquire_machine()
            self.db.set_task_vm(self.task.id, self.machine.label, self.machine.id)
        # At this point we can tell the ResultServer about it.
        except CuckooOperationalError as e:
            machine_lock.release()
            log.error("Task #%s: Cannot acquire machine: %s", self.task.id, e, exc_info=True)
            return False

        # Generate the analysis configuration file.
        options = self.build_options()

        try:
            ResultServer().add_task(self.task, self.machine)
        except Exception as e:
            machinery.release(self.machine.label)
            log.exception(e, exc_info=True)
            self.errors.put(e)

        aux = RunAuxiliary(task=self.task, machine=self.machine)

        try:
            unlocked = False

            # Mark the selected analysis machine in the database as started.
            guest_log = self.db.guest_start(self.task.id, self.machine.name, self.machine.label, machinery.__class__.__name__)
            # Start the machine.
            machinery.start(self.machine.label)

            # Enable network routing.
            self.route_network()

            # By the time start returns it will have fully started the Virtual
            # Machine. We can now safely release the machine lock.
            machine_lock.release()
            unlocked = True

            aux.start()

            # Initialize the guest manager.
            guest = GuestManager(self.machine.name, self.machine.ip, self.machine.platform, self.task.id, self)

            options["clock"] = self.db.update_clock(self.task.id)
            self.db.guest_set_status(self.task.id, "starting")
            # Start the analysis.
            guest.start_analysis(options)
            if self.db.guest_get_status(self.task.id) == "starting":
                self.db.guest_set_status(self.task.id, "running")
                guest.wait_for_completion()

            self.db.guest_set_status(self.task.id, "stopping")
            succeeded = True
        except (CuckooMachineError, CuckooNetworkError) as e:
            if not unlocked:
                machine_lock.release()
            log.error(str(e), extra={"task_id": self.task.id}, exc_info=True)
            dead_machine = True
        except CuckooGuestError as e:
            if not unlocked:
                machine_lock.release()
            log.error(str(e), extra={"task_id": self.task.id}, exc_info=True)
        finally:
            # Stop Auxiliary modules.
            aux.stop()

            # Take a memory dump of the machine before shutting it off.
            if self.cfg.cuckoo.memory_dump or self.task.memory:
                try:
                    dump_path = get_memdump_path(self.task.id)
                    need_space, space_available = free_space_monitor(os.path.dirname(dump_path), return_value=True)
                    if need_space:
                        log.error("Not enough free disk space! Could not dump ram (Only %d MB!)", space_available)
                    else:
                        machinery.dump_memory(self.machine.label, dump_path)
                except NotImplementedError:
                    log.error("The memory dump functionality is not available for the current machine manager")

                except CuckooMachineError as e:
                    log.error(e, exc_info=True)

            try:
                # Stop the analysis machine.
                machinery.stop(self.machine.label)

            except CuckooMachineError as e:
                log.warning("Task #%s: Unable to stop machine %s: %s", self.task.id, self.machine.label, e)

            # Mark the machine in the database as stopped. Unless this machine
            # has been marked as dead, we just keep it as "started" in the
            # database so it'll not be used later on in this session.
            self.db.guest_stop(guest_log)

            # After all this, we can make the ResultServer forget about the
            # internal state for this analysis task.
            ResultServer().del_task(self.task, self.machine)

            # Drop the network routing rules if any.
            self.unroute_network()

            if dead_machine:
                # Remove the guest from the database, so that we can assign a
                # new guest when the task is being analyzed with another
                # machine.
                self.db.guest_remove(guest_log)

                # Remove the analysis directory that has been created so
                # far, as launch_analysis() is going to be doing that again.
                shutil.rmtree(self.storage)

                # This machine has turned dead, so we throw an exception here
                # which informs the AnalysisManager that it should analyze
                # this task again with another available machine.
                raise CuckooDeadMachine()

            try:
                # Release the analysis machine. But only if the machine has
                # not turned dead yet.
                machinery.release(self.machine.label)

            except CuckooMachineError as e:
                log.error(
                    "Task #%s: Unable to release machine %s, reason %s. You might need to restore it manually",
                    self.task.id,
                    self.machine.label,
                    e,
                )

        return succeeded
Пример #17
0
    def start_analysis(self, options):
        """Start analysis.
        @param options: options.
        @return: operation status.
        """
        log.info("Starting analysis on guest (id=%s, ip=%s)", self.id, self.ip)

        if misc_config.ENABLE_CUCKOO_EXTRA_INFO:
            time.sleep(10)
            subprocess.call([misc_config.ADB_PATH, "connect", "192.168.56.10"])
            log.info("Starting to collect information")

            # Custom: Get process information
            try:
                self.getProcessList()
                # Get listening Ports
                self.getListeningPorts()
                self.generateFileList()
            except:
                log.info("ADB Error occured! Try again...")
                try:
                    subprocess.Popen([misc_config.ADB_PATH, "kill-server"])
                    subprocess.Popen(["killall adb"])
                    time.sleep(2)
                    subprocess.call(
                        [misc_config.ADB_PATH, "connect", "192.168.56.10"])
                    time.sleep(5)
                    self.getProcessList()
                    # Get listening Ports
                    self.getListeningPorts()
                    self.generateFileList()
                except:
                    log.info("ADB Error for the second time!")

        # TODO: deal with unicode URLs.
        if options["category"] == "file":
            options["file_name"] = sanitize_filename(options["file_name"])

        # If the analysis timeout is higher than the critical timeout,
        # automatically increase the critical timeout by one minute.
        if options["timeout"] > self.timeout:
            log.debug("Automatically increased critical timeout to %s",
                      self.timeout)
            self.timeout = options["timeout"] + 60

        # Get and set dynamically generated resultserver port.
        options["port"] = str(ResultServer().port)

        try:
            # Wait for the agent to respond. This is done to check the
            # availability of the agent and verify that it's ready to receive
            # data.
            self.wait(CUCKOO_GUEST_INIT)

            # Invoke the upload of the analyzer to the guest.
            self.upload_analyzer()

            # Give the analysis options to the guest, so it can generate the
            # analysis.conf inside the guest.
            try:
                self.server.add_config(options)
            except:
                raise CuckooGuestError("{0}: unable to upload config to "
                                       "analysis machine".format(self.id))

            # If the target of the analysis is a file, upload it to the guest.
            if options["category"] == "file":
                try:
                    file_data = open(options["target"], "rb").read()
                except (IOError, OSError) as e:
                    raise CuckooGuestError("Unable to read {0}, error: "
                                           "{1}".format(options["target"], e))

                data = xmlrpclib.Binary(file_data)

                try:
                    self.server.add_malware(data, options["file_name"])
                except Exception as e:
                    raise CuckooGuestError("{0}: unable to upload malware to "
                                           "analysis machine: {1}".format(
                                               self.id, e))

            # Launch the analyzer.
            pid = self.server.execute()
            log.debug("%s: analyzer started with PID %d", self.id, pid)
        # If something goes wrong when establishing the connection, raise an
        # exception and abort the analysis.
        except (socket.timeout, socket.error):
            raise CuckooGuestError("{0}: guest communication timeout, check "
                                   "networking or try to increase "
                                   "timeout".format(self.id))

        # Custom
        # Give the app some time to start up
        log.debug("Starting to simulate user interaction")
        time.sleep(10)
        self.simulateUserInteraction()