Пример #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("id", type=str, help="ID of the analysis to process (auto for continuous processing of unprocessed tasks).")
    parser.add_argument("-d", "--debug", help="Display debug messages", action="store_true", required=False)
    parser.add_argument("-r", "--report", help="Re-generate report", action="store_true", required=False)
    parser.add_argument("-p", "--parallel", help="Number of parallel threads to use (auto mode only).", type=int, required=False, default=1)
    parser.add_argument("-u", "--user", type=str, help="Drop user privileges to this user")
    parser.add_argument("-m", "--modules", help="Path to signature and reporting modules - overrides default modules path.", type=str, required=False)

    args = parser.parse_args()

    if args.user:
        drop_privileges(args.user)

    if args.debug:
        log.setLevel(logging.DEBUG)

    if args.modules:
        sys.path.insert(0, args.modules)

    init_modules(machinery=False)

    if args.id == "auto":
        autoprocess(parallel=args.parallel)
    else:
        task = Database().view_task(int(args.id))
        if not task:
            process(task={"id": int(args.id), "category": "file", "target": ""}, report=args.report)
        else:
            process(task=task.to_dict(), report=args.report)
Пример #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("path", type=str, help="Path to the file to analyze")
    parser.add_argument("--package", type=str, action="store", default="", help="Specify an analysis package", required=False)
    parser.add_argument("--custom", type=str, action="store", default="", help="Specify any custom value", required=False)
    parser.add_argument("--timeout", type=int, action="store", default=0, help="Specify an analysis timeout", required=False)
    parser.add_argument("--options", type=str, action="store", default="", help="Specify options for the analysis package (e.g. \"name=value,name2=value2\")", required=False)
    parser.add_argument("--priority", type=int, action="store", default=1, help="Specify a priority for the analysis represented by an integer", required=False)
    parser.add_argument("--machine", type=str, action="store", default="", help="Specify the identifier of a machine you want to use", required=False)
    parser.add_argument("--platform", type=str, action="store", default="", help="Specify the operating system platform you want to use (windows/darwin/linux)", required=False)

    try:
        args = parser.parse_args()
    except IOError as e:
        parser.error(e)
        return False

    if not os.path.exists(args.path):
        print("ERROR: the specified file does not exist at path \"%s\"" % args.path)
        return False

    db = Database()

    task_id = db.add(file_path=args.path,
                     md5=File(args.path).get_md5(),
                     package=args.package,
                     timeout=args.timeout,
                     options=args.options,
                     priority=args.priority,
                     machine=args.machine,
                     platform=args.platform,
                     custom=args.custom)

    print("SUCCESS: Task added with id %d" % task_id)
Пример #3
0
def start(request, task_id):
    db = Database()
    db.start_task(task_id)

    return render_to_response("success.html",
            {"message": "Task scheduled for NOW, thanks for all the fish."},
            context_instance=RequestContext(request))
Пример #4
0
def remove_pending(request):
    db = Database()    
    tasks = db.list_tasks(status=TASK_PENDING)
    for task in tasks:
        db.delete_task(task.id)
        
    return redirect("analysis.views.pending")
Пример #5
0
def attempt_to_start_analysis(binary):
    print 'starting ana'
    db = Database()           
    tasks = db.list_tasks()
    
    filename = ntpath.basename(binary.file_path)
    output = ntpath.join('/tmp/', filename)                
    
    for task in tasks:
        if task.to_dict()['target'] == output:
            return
        else:
            with open(output, "wb") as handle:
                handle.write(binary.data)                        
                task_id = db.add_path(file_path=output,
                                      package="",
                                      timeout=120,
                                      options="",
                                      priority=1,
                                      machine="",
                                      custom="",
                                      memory=False,
                                      enforce_timeout=False,
                                      tags=None)
                if not task_id:
                    print 'asd'
                    err = "Failed adding sandbox analysis for %s" % filename                                
                    raise Exception(err)    
Пример #6
0
def show_reports(request,binary_sha1):

    db = Database()
    tasks_files = db.list_tasks_by_binary( binary_sha1, limit=50, category="file" )
    analyses_files = []
    
    if tasks_files:
        for tup in tasks_files:
            sample = tup[0] 
            task = tup[1] 
            new = task.to_dict()
            #new["sample"] = db.view_sample(new["sample_id"]).to_dict()
            new["sample"] = sample.to_dict()
            if db.view_errors(task.id):
                new["errors"] = True

            # obtain station and file name with target
            filepath = new["target"]
            filedata = filepath.split('/')
            new["file"] = filedata[3] if len(filedata) > 3 else filedata[2]
            new["station"] = filedata[2] if len(filedata) > 3 else ""
            analyses_files.append(new)

    return render_to_response("analysis/show_reports.html",
                              {"files": analyses_files, "urls": None},
                              context_instance=RequestContext(request))
Пример #7
0
def cuckoo_clean_failed_tasks():
    """Clean up failed tasks 
    It deletes all stored data from file system and configured databases (SQL
    and MongoDB for failed tasks.
    """
    # Init logging.
    # This need to init a console logger handler, because the standard
    # logger (init_logging()) logs to a file which will be deleted.
    create_structure()
    init_console_logging()

    # Initialize the database connection.
    db = Database()

    # Check if MongoDB reporting is enabled and drop that if it is.
    cfg = Config("reporting")
    if cfg.mongodb and cfg.mongodb.enabled:
        from pymongo import MongoClient
        host = cfg.mongodb.get("host", "127.0.0.1")
        port = cfg.mongodb.get("port", 27017)
        mdb = cfg.mongodb.get("db", "cuckoo")
        try:
            results_db = MongoClient(host, port)[mdb]
        except:
            log.warning("Unable to connect to MongoDB database: %s", mdb)
            return 

        failed_tasks_a = db.list_tasks(status=TASK_FAILED_ANALYSIS)
        failed_tasks_p = db.list_tasks(status=TASK_FAILED_PROCESSING)
        failed_tasks_r = db.list_tasks(status=TASK_FAILED_REPORTING)
        failed_tasks_rc = db.list_tasks(status=TASK_RECOVERED)
        for e in failed_tasks_a,failed_tasks_p,failed_tasks_r,failed_tasks_rc:
            for el2 in e:
                new = el2.to_dict()
                remove_task(new["id"])
Пример #8
0
def store_and_submit_fileobj(fobj, filename, package="", options="", timeout=0, priority=1, machine="", platform=""):
    # Do everything in tmppath/TMPSUBDIR
    tmppath = tempfile.gettempdir()
    targetpath = os.path.join(tmppath, TMPSUBDIR)
    if not os.path.exists(targetpath): os.mkdir(targetpath)

    # Upload will be stored in a tmpdir with the original name
    tmpdir = tempfile.mkdtemp(prefix="upload_", dir=targetpath)
    tmpf = open(os.path.join(tmpdir, filename), "wb")
    t = fobj.read(BUFSIZE)

    # While reading from client also compute md5hash
    md5h = hashlib.md5()
    while t:
        md5h.update(t)
        tmpf.write(t)
        t = fobj.read(BUFSIZE)

    tmpf.close()

    # Submit task to cuckoo db
    db = Database()
    task_id = db.add(file_path=tmpf.name,
                     md5=md5h.hexdigest(),
                     package=package,
                     timeout=timeout,
                     options=options,
                     priority=priority,
                     machine=machine,
                     platform=platform)

    return task_id
Пример #9
0
def main():
    """main function for standalone usage"""
    usage = "usage: %prog [options]"
    parser = OptionParser(usage=usage)
    parser.add_option('-a', '--archive-toplevel-dir', default='/mnt/cuckoo_archive',
                      help='Archive top-level directory [default: %default]')
    parser.add_option('-m', '--local-machine-dir', default=socket.gethostname(),
                      help='Machine-specific directory [default: $HOST]')

    (options, args) = parser.parse_args()

    if len(args) != 0:
        parser.print_help()
        return 2

    # do stuff
    archive_dir = os.path.join(options.archive_toplevel_dir, options.local_machine_dir)
    try:
        os.mkdir(archive_dir)
    except OSError: # already exists
        pass

    db = Database()

    for task in db.list_tasks(status=TASK_REPORTED):
        task_path_src = _analysis_dir(task.id)

        if not os.path.islink(task_path_src):
            task_path_dst = os.path.join(archive_dir, str(task.id))
            move(task_path_src, task_path_dst)
            os.symlink(task_path_dst, task_path_src)
            print(bold(green('Successfully')) + ' archived %s' % task_path_dst)
Пример #10
0
def analysis_view(a_id):    
    db = Database()
    context = {}
    rows = db.get_analysis(a_id) 
    template = lookup.get_template("browse.html")
    context["cuckoo_root"] = CUCKOO_ROOT
    return template.render(os=os, rows=rows, **context)
Пример #11
0
def autoprocess(parallel=1):
    maxcount = cfg.cuckoo.max_analysis_count
    count = 0
    db = Database()
    pool = multiprocessing.Pool(parallel)
    pending_results = []

    # CAUTION - big ugly loop ahead.
    while count < maxcount or not maxcount:

        # Pending_results maintenance.
        for ar, tid, target, copy_path in list(pending_results):
            if ar.ready():
                if ar.successful():
                    log.info("Task #%d: reports generation completed", tid)
                else:
                    try:
                        ar.get()
                    except:
                        log.exception("Exception when processing task ID %u.", tid)
                        db.set_status(tid, TASK_FAILED_PROCESSING)

                pending_results.remove((ar, tid, target, copy_path))

        # If still full, don't add more (necessary despite pool).
        if len(pending_results) >= parallel:
            time.sleep(1)
            continue

        # If we're here, getting parallel tasks should at least
        # have one we don't know.
        tasks = db.list_tasks(status=TASK_COMPLETED, limit=parallel,
                              order_by="completed_on asc")

        # For loop to add only one, nice.
        for task in tasks:
            # Not-so-efficient lock.
            if task.id in [tid for ar, tid, target, copy_path
                           in pending_results]:
                continue

            log.info("Processing analysis data for Task #%d", task.id)

            sample = db.view_sample(task.sample_id)

            copy_path = os.path.join(CUCKOO_ROOT, "storage",
                                     "binaries", sample.sha256)

            args = task.id, task.target, copy_path
            kwargs = dict(report=True, auto=True)
            result = pool.apply_async(process, args, kwargs)

            pending_results.append((result, task.id, task.target, copy_path))

            count += 1
            break

        # If there wasn't anything to add, sleep tight.
        if not tasks:
            time.sleep(5)
Пример #12
0
class TestDatabase:
    def setUp(self):
        self.d = Database(dsn="sqlite://")

    def test_machine_add_clean(self):
        # Add.
        self.d.add_machine("a", "a", "1.1.1.1", "win", "", "", "", "", "")
        session = self.d.Session()
        assert_equal(session.query(Machine).count(), 1)
        # Delete.
        self.d.clean_machines()
        assert_equal(session.query(Machine).count(), 0)

    def test_task_add_del(self):
        # Add.
        sample_path = tempfile.mkstemp()[1]
        self.d.add_path(sample_path)
        session = self.d.Session()
        assert_equal(session.query(Sample).count(), 1)
        assert_equal(session.query(Task).count(), 1)
        # Drop tasks.
        self.d.drop_tasks()
        assert_equal(session.query(Task).count(), 0)
        assert_equal(session.query(Sample).count(), 1)
        # Drop samples.
        self.d.drop_samples()
        assert_equal(session.query(Sample).count(), 0)
Пример #13
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("vmname", type=str, help="Name of the Virtual Machine.")
    parser.add_argument("--debug", action="store_true", help="Debug log in case of errors.")
    parser.add_argument("--add", action="store_true", help="Add a Virtual Machine.")
    parser.add_argument("--ip", type=str, help="Static IP Address.")
    parser.add_argument("--platform", type=str, default="windows", help="Guest Operating System.")
    parser.add_argument("--tags", type=str, help="Tags for this Virtual Machine.")
    parser.add_argument("--interface", type=str, help="Sniffer interface for this machine.")
    parser.add_argument("--snapshot", type=str, help="Specific Virtual Machine Snapshot to use.")
    parser.add_argument("--resultserver", type=str, help="IP:Port of the Result Server.")
    args = parser.parse_args()

    logging.basicConfig()
    log = logging.getLogger()

    if args.debug:
        log.setLevel(logging.DEBUG)

    db = Database()

    if args.resultserver:
        resultserver_ip, resultserver_port = args.resultserver.split(":")
    else:
        conf = Config()
        resultserver_ip = conf.resultserver.ip
        resultserver_port = conf.resultserver.port

    if args.add:
        db.add_machine(args.vmname, args.vmname, args.ip, args.platform,
                       args.tags, args.interface, args.snapshot,
                       resultserver_ip, int(resultserver_port))
        db.unlock_machine(args.vmname)

        update_conf(conf.cuckoo.machinery, args)
Пример #14
0
def init_config(override=True):
    """Read configuration from the configuration files and update each entry
    in the database."""
    db = Database()

    log.debug("Initializing configuration..")
    config = db.config_all()
    if not override and config:
        return

    for fname in os.listdir(os.path.join(CUCKOO_ROOT, "conf")):
        basename, ext = os.path.splitext(fname)
        if ext != ".conf":
            continue

        cfg = Config(basename)

        for section, values in cfg.sections.items():
            for key, value in values.items():
                attr = "%s.%s.%s" % (basename, section, key)
                if attr in config and config[attr] == value:
                    continue

                log.debug("Updating configuration %s to '%s' (from '%s')",
                          attr, value, config.get(attr, ''))
                db.config_set(attr, value)
Пример #15
0
    def run(self):
        """Run information gathering.
        @return: information dict.
        """
        self.key = "info"

        if "started_on" not in self.task:
            return dict(
                version=CUCKOO_VERSION,
                started="none",
                ended="none",
                duration=-1,
                id=int(self.task["id"]),
                category="unknown",
                custom="unknown",
                machine=None,
                package="unknown"
            )

        if self.task.get("started_on") and self.task.get("completed_on"):
            started = time.strptime(self.task["started_on"], "%Y-%m-%d %H:%M:%S")
            started = datetime.fromtimestamp(time.mktime(started))
            ended = time.strptime(self.task["completed_on"], "%Y-%m-%d %H:%M:%S")
            ended = datetime.fromtimestamp(time.mktime(ended))
            duration = (ended - started).seconds
        else:
            log.critical("Failed to get start/end time from Task.")
            started, ended, duration = None, None, -1

        db = Database()

        # Fetch sqlalchemy object.
        task = db.view_task(self.task["id"], details=True)

        if task and task.guest:
            # Get machine description.
            machine = task.guest.to_dict()
            # Remove superfluous fields.
            del machine["task_id"]
            del machine["id"]
        else:
            machine = None

        return dict(
            version=CUCKOO_VERSION,
            started=self.task["started_on"],
            ended=self.task.get("completed_on", "none"),
            duration=duration,
            id=int(self.task["id"]),
            category=self.task["category"],
            custom=self.task["custom"],
            owner=self.task["owner"],
            machine=machine,
            package=self.task["package"],
            platform=self.task["platform"],
            options=self.task["options"],
            route=self.task["route"],
        )
Пример #16
0
def cuckoo_clean():
    """Clean up cuckoo setup.
    It deletes logs, all stored data from file system and configured databases (SQL
    and MongoDB.
    """
    # Init logging.
    # This need to init a console logger handler, because the standard
    # logger (init_logging()) logs to a file which will be deleted.
    create_structure()
    init_console_logging()

    # Initialize the database connection.
    db = Database()

    # Drop all tables.
    db.drop()

    # Check if MongoDB reporting is enabled and drop that if it is.
    cfg = Config("reporting")
    if cfg.mongodb and cfg.mongodb.enabled:
        from pymongo import MongoClient
        host = cfg.mongodb.get("host", "127.0.0.1")
        port = cfg.mongodb.get("port", 27017)
        mdb = cfg.mongodb.get("db", "cuckoo")
        try:
            conn = MongoClient(host, port)
            conn.drop_database(mdb)
            conn.disconnect()
        except:
            log.warning("Unable to drop MongoDB database: %s", mdb)

    # Paths to clean.
    paths = [
        os.path.join(CUCKOO_ROOT, "db"),
        os.path.join(CUCKOO_ROOT, "log"),
        os.path.join(CUCKOO_ROOT, "storage"),
    ]

    # Delete various directories.
    for path in paths:
        if os.path.isdir(path):
            try:
                shutil.rmtree(path)
            except (IOError, OSError) as e:
                log.warning("Error removing directory %s: %s", path, e)

    # Delete all compiled Python objects ("*.pyc").
    for dirpath, dirnames, filenames in os.walk(CUCKOO_ROOT):
        for fname in filenames:
            if not fname.endswith(".pyc"):
                continue

            path = os.path.join(CUCKOO_ROOT, dirpath, fname)

            try:
                os.unlink(path)
            except (IOError, OSError) as e:
                log.warning("Error removing file %s: %s", path, e)
Пример #17
0
def pending(request):
    db = Database()
    tasks = db.list_tasks(status=TASK_PENDING)

    pending = []
    for task in tasks:
        pending.append(task.to_dict())

    return render_to_response("analysis/pending.html", {"tasks": pending}, context_instance=RequestContext(request))
Пример #18
0
def pending(request):
    db = Database()
    tasks = db.list_tasks(status=TASK_PENDING)

    pending = []
    for task in tasks:
        pending.append(task.to_dict())

    return render(request, "analysis/pending.html", {"tasks": pending})
    def sendFileToCuckoo(self, path):
        from lib.cuckoo.common.objects import Dictionary
        from lib.cuckoo.core.database import Database

        db = Database()
        a = db.add_path(path, package="")
        # a = db.add_path(path, 60, None, None, 3, None, 'cuckoo1', 'windows', False, False, None)
        print "Analyzing task: " + str(a)
        return self.isVirus(a)
Пример #20
0
def remove(request, task_id):
    """Remove an analysis.
    @todo: remove folder from storage.
    """
    anals = results_db.analysis.find({"info.id": int(task_id)})

    # Checks if more analysis found with the same ID, like if process.py was run manually.
    if anals.count() > 1:
        message = "Multiple tasks with this ID deleted, thanks for all the fish. (The specified analysis was duplicated in mongo)"
    elif anals.count() == 1:
        message = "Task deleted, thanks for all the fish."

    if anals.count() > 0:
        # Delete dups too.
        for analysis in anals:
            # Delete sample if not used.
            if "file_id" in analysis["target"]:
                if results_db.analysis.find({"target.file_id": ObjectId(analysis["target"]["file_id"])}).count() == 1:
                    fs.delete(ObjectId(analysis["target"]["file_id"]))

            # Delete screenshots.
            for shot in analysis["shots"]:
                if results_db.analysis.find({"shots": ObjectId(shot)}).count() == 1:
                    fs.delete(ObjectId(shot))

            # Delete network pcap.
            if "pcap_id" in analysis["network"] and results_db.analysis.find({"network.pcap_id": ObjectId(analysis["network"]["pcap_id"])}).count() == 1:
                fs.delete(ObjectId(analysis["network"]["pcap_id"]))

            # Delete sorted pcap
            if "sorted_pcap_id" in analysis["network"] and results_db.analysis.find({"network.sorted_pcap_id": ObjectId(analysis["network"]["sorted_pcap_id"])}).count() == 1:
                fs.delete(ObjectId(analysis["network"]["sorted_pcap_id"]))

            # Delete dropped.
            for drop in analysis["dropped"]:
                if "object_id" in drop and results_db.analysis.find({"dropped.object_id": ObjectId(drop["object_id"])}).count() == 1:
                    fs.delete(ObjectId(drop["object_id"]))

            # Delete calls.
            for process in analysis.get("behavior", {}).get("processes", []):
                for call in process["calls"]:
                    results_db.calls.remove({"_id": ObjectId(call)})

            # Delete analysis data.
            results_db.analysis.remove({"_id": ObjectId(analysis["_id"])})
    else:
        return render_to_response("error.html",
                                  {"error": "The specified analysis does not exist"},
                                  context_instance=RequestContext(request))

    # Delete from SQL db.
    db = Database()
    db.delete_task(task_id)

    return render_to_response("success.html",
                              {"message": message},
                              context_instance=RequestContext(request))
Пример #21
0
def unschedule(request, task_id):
    db = Database()
    task = db.view_task(task_id)
    if task.status == TASK_SCHEDULED:
        db.set_status(task_id, TASK_UNSCHEDULED)

    return render_to_response("success.html",
            {"message": "Task unscheduled, thanks for all the fish."},
            context_instance=RequestContext(request))
Пример #22
0
def drop_report(request,task_id):
    """ 
    delete report and task
    """ 
    report = results_db.analysis.remove({"info.id": int(task_id)})
    
    if not report:
        db = Database()
        r = db.delete_task(task_id)
    
    return index(request) 
Пример #23
0
def delete_all():  
    """Delete ALL tasks in Cuckoo's local processing queue."""
    
    db = Database()
    list = db.list_tasks()
    
    if not list:
        print(bold(red("Error")) + ": no tasks to be deleted")
    else: 
        for url in list:
            db.delete_task(db.count_tasks())
Пример #24
0
    def run(self):
        """Run information gathering.
        @return: information dict.
        """
        self.key = "info"

        if not "started_on" in self.task:
            return dict(
                version=CUCKOO_VERSION,
                started="none",
                ended="none",
                duration="none",
                id=int(self.task["id"]),
                category="unknown",
                custom="unknown",
                machine=None,
                package="unknown"
            )

        try:
            started = time.strptime(self.task["started_on"], "%Y-%m-%d %H:%M:%S")
            started = datetime.fromtimestamp(time.mktime(started))
            ended = time.strptime(self.task["completed_on"], "%Y-%m-%d %H:%M:%S")
            ended = datetime.fromtimestamp(time.mktime(ended))
        except:
            log.critical("Failed to get start/end time from Task.")
            duration = -1
        else:
            duration = (ended - started).seconds

        db = Database()

        # Fetch sqlalchemy object.
        task = db.view_task(self.task["id"], details=True)

        if task and task.guest:
            # Get machine description.
            machine = task.guest.to_dict()
            # Remove useless task_id.
            del machine["task_id"]
        else:
            machine = None

        return dict(
            version=CUCKOO_VERSION,
            started=self.task["started_on"],
            ended=self.task.get("completed_on", "none"),
            duration=duration,
            id=int(self.task["id"]),
            category=self.task["category"],
            custom=self.task["custom"],
            machine=machine,
            package=self.task["package"]
        )
Пример #25
0
def drop_report_from_binary(request,task_id,binary_sha1):
    """ 
    delete report and task
    """ 
    report = results_db.analysis.remove({"info.id": int(task_id)})
    
    if not report:
        db = Database()
        r = db.delete_task(task_id)
    
    return show_reports(request,binary_sha1) 
Пример #26
0
def task_done(tid_list):
    """Calculates number of completed URLs for a task
    then compare with size of tid_list."""
    
    db = Database()
    tasks_count = db.count_tasks(status=TASK_COMPLETED, tid_list=tid_list)
    tasks_count += db.count_tasks(status=TASK_REPORTED, tid_list=tid_list)
        
    if tasks_count < len(tid_list):
        return False
    elif tasks_count == len(tid_list):
        return True
Пример #27
0
def cuckoo_clean_failed_url_tasks():
    """Clean up failed tasks 
    It deletes all stored data from file system and configured databases (SQL
    and MongoDB for failed tasks.
    """
    # Init logging.
    # This need to init a console logger handler, because the standard
    # logger (init_logging()) logs to a file which will be deleted.
    create_structure()
    init_console_logging()

    # Initialize the database connection.
    db = Database()

    # Check if MongoDB reporting is enabled and drop that if it is.
    cfg = Config("reporting")
    if cfg.mongodb and cfg.mongodb.enabled:
        from pymongo import MongoClient
        host = cfg.mongodb.get("host", "127.0.0.1")
        port = cfg.mongodb.get("port", 27017)
        mdb = cfg.mongodb.get("db", "cuckoo")
        try:
            results_db = MongoClient(host, port)[mdb]
        except:
            log.warning("Unable to connect MongoDB database: %s", mdb)
            return

        done = False
        while not done:
            rtmp = results_db.analysis.find({"info.category": "url", "network.http.0": {"$exists": False}},{"info.id": 1},sort=[("_id", -1)]).limit( 100 )
            if rtmp and rtmp.count() > 0:
                for e in rtmp:
                    if e["info"]["id"]:
                        print e["info"]["id"]
                        try:
                            results_db.suricata.remove({"info.id": int(e["info"]["id"])})
                        except:
                            print "failed to remove %s" % (e["info"]["id"])
                        if db.delete_task(e["info"]["id"]):
                            delete_folder(os.path.join(CUCKOO_ROOT, "storage", "analyses",
                                       "%s" % e["info"]["id"]))
                        else:
                            print "failed to remove %s" % (e["info"]["id"])
                        try:
                            results_db.analysis.remove({"info.id": int(e["info"]["id"])})
                        except:
                            print "failed to remove %s" % (e["info"]["id"])
                    else:
                        done = True
            else:
                done = True 
Пример #28
0
def init_tasks():
    """Check tasks and reschedule uncompleted ones."""
    db = Database()
    cfg = Config()

    if cfg.cuckoo.reschedule:
        log.debug("Checking for locked tasks...")

        tasks = db.list_tasks(status=TASK_RUNNING)

        for task in tasks:
            db.reschedule(task.id)
            log.info("Rescheduled task with ID {0} and "
                     "target {1}".format(task.id, task.target))
Пример #29
0
def autoprocess(parallel=1):
    cfg = Config()
    maxcount = cfg.cuckoo.max_analysis_count
    count = 0
    db = Database()
    pool = multiprocessing.Pool(parallel)
    pending_results = []

    # CAUTION - big ugly loop ahead
    while count < maxcount or not maxcount:

        # pending_results maintenance
        for ar, tid in list(pending_results):
            if ar.ready():
                if ar.successful():
                    print "subtask success", tid, "returnvalue", ar.get()
                    log.info("Task #%d: reports generation completed", tid)
                else:
                    try: ar.get()
                    except:
                        log.exception("Exception when processing task ID %u.", tid)
                        db.set_status(tid, TASK_FAILED_PROCESSING)

                pending_results.remove((ar, tid))

        # if still full, don't add more (necessary despite pool)
        if len(pending_results) >= parallel:
            time.sleep(1)
            continue

        # if we're here, getting #parallel tasks should at least have one we don't know
        tasks = db.list_tasks(status=TASK_COMPLETED, limit=parallel)

        # for loop to add only one, nice
        for task in tasks:
            # not-so-efficient lock
            if task.id in [tid for ar, tid in pending_results]:
                continue

            log.info("Processing analysis data for Task #%d", task.id)

            result = pool.apply_async(do, (task.id,), {"report": True})                
            pending_results.append((result, task.id))

            count += 1
            break

        # if there wasn't anything to add, sleep tight
        if not tasks:
            time.sleep(5)
Пример #30
0
def cuckoo_clean_before_day(days=None):
    """Clean up failed tasks 
    It deletes all stored data from file system and configured databases (SQL
    and MongoDB for tasks completed before now - days.
    """
    # Init logging.
    # This need to init a console logger handler, because the standard
    # logger (init_logging()) logs to a file which will be deleted.
    if not days:
        print "No days argument provided bailing"
        return
    create_structure()
    init_console_logging()

    # Initialize the database connection.
    db = Database()

    # Check if MongoDB reporting is enabled and drop that if it is.
    cfg = Config("reporting")
    if cfg.mongodb and cfg.mongodb.enabled:
        from pymongo import MongoClient
        host = cfg.mongodb.get("host", "127.0.0.1")
        port = cfg.mongodb.get("port", 27017)
        mdb = cfg.mongodb.get("db", "cuckoo")
        try:
            results_db = MongoClient(host, port)[mdb]
        except:
            log.warning("Unable to connect to MongoDB database: %s", mdb)
            return

        added_before = datetime.datetime.now() - datetime.timedelta(days=int(days))
        old_tasks = db.list_tasks(added_before=added_before)
        for e in old_tasks:
            new = e.to_dict()
            print int(new["id"])
            try:
                results_db.suricata.remove({"info.id": int(new["id"])})
            except:
                print "failed to remove suricata info (may not exist) %s" % (int(new["id"]))
            try:
                results_db.analysis.remove({"info.id": int(new["id"])})
            except:
                print "failed to remove analysis info (may not exist) %s" % (int(new["id"]))
            if db.delete_task(new["id"]):
                delete_folder(os.path.join(CUCKOO_ROOT, "storage", "analyses",
                       "%s" % int(new["id"])))
            else:
                print "failed to remove faile task %s from DB" % (int(new["id"]))
Пример #31
0
def ajax_submit_url(request):
    if request.method == "POST":
        package = request.POST.get("package", "")
        timeout = min(force_int(request.POST.get("timeout")), 60 * 60 * 24)
        options = request.POST.get("options", "")
        priority = force_int(request.POST.get("priority"))
        machine = request.POST.get("machine", "")
        gateway = request.POST.get("gateway", None)
        clock = request.POST.get("clock", None)
        custom = request.POST.get("custom", "")
        memory = bool(request.POST.get("memory", False))
        enforce_timeout = bool(request.POST.get("enforce_timeout", False))
        status = bool(request.POST.get("user_status", False))

#        if not status:
#            user_status=0
#        else:
#            user_status=1

        if request.user.id==None:
            user_id = 1
        else:
            user_id = request.user.id

        tags = request.POST.get("tags", None)

        if request.POST.get("free"):
            if options:
                options += ","
            options += "free=yes"

        if request.POST.get("nohuman"):
            if options:
                options += ","
            options += "nohuman=yes"

        if request.POST.get("tor"):
            if options:
                options += ","
            options += "tor=yes"

        if request.POST.get("process_memory"):
            if options:
                options += ","
            options += "procmemdump=yes"

        if request.POST.get("kernel_analysis"):
            if options:
                options += ","
            options += "kernel_analysis=yes"

        if gateway and gateway in settings.GATEWAYS:
            if "," in settings.GATEWAYS[gateway]:
                tgateway = random.choice(settings.GATEWAYS[gateway].split(","))
                ngateway = settings.GATEWAYS[tgateway]
            else:
                ngateway = settings.GATEWAYS[gateway]
            if options:
                options += ","
            options += "setgw=%s" % (ngateway)

        db = Database()
        task_ids = []
        task_machines = []

        if machine.lower() == "all":
            for entry in db.list_machines():
                task_machines.append(entry.label)
        else:
            task_machines.append(machine)

        if "url" in request.POST and request.POST.get("url").strip():
            url = request.POST.get("url").strip()
            if not url:
                return render_to_response("error.html",
                                          {"error": "You specified an invalid URL!"},
                                          context_instance=RequestContext(request))

            url = url.replace("hxxps://", "https://").replace("hxxp://", "http://").replace("[.]", ".")
            for entry in task_machines:
                task_id = db.add_url(url=url,
                                     package=package,
                                     timeout=timeout,
                                     options=options,
                                     priority=priority,
                                     machine=entry,
                                     custom=custom,
                                     memory=memory,
                                     enforce_timeout=enforce_timeout,
                                     tags=tags,
                                     clock=clock,
                                     user_status=user_status,
                                     user_id=user_id)
                if task_id:
                    #pp.pprint(task_id)
                    task_ids.append(until.encrpt(task_id))


        tasks_count = len(task_ids)
        if tasks_count > 0:
           return HttpResponse(json.dumps({"correct": "%s" % task_ids[0]}), content_type="application/json")
        else:
           return HttpResponse(json.dumps({"error": "Error adding task to Cuckoo's database."}), content_type="application/json")
Пример #32
0
# This is the Alembic Config object, which provides access to the values within
# the .ini file in use.
config = context.config

# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)

# Get cuckoo root path.
curdir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(curdir, "..", ".."))

from lib.cuckoo.core.database import Base, Database

# Get database connection string from cuckoo configuration.
url = Database(schema_check=False).engine.url.__to_string__(
    hide_password=False)
target_metadata = Base.metadata


def run_migrations_offline():
    """Run migrations in 'offline' mode.
    This configures the context with just a URL
    and not an Engine, though an Engine is acceptable
    here as well.  By skipping the Engine creation
    we don't even need a DBAPI to be available.
    Calls to context.execute() here emit the given string to the
    script output.
    """
    context.configure(url=url)

    with context.begin_transaction():
Пример #33
0
def cuckoo_clean_before_day(args):
    """Clean up failed tasks
    It deletes all stored data from file system and configured databases (SQL
    and MongoDB for tasks completed before now - days.
    """
    # Init logging.
    # This need to init a console logger handler, because the standard
    # logger (init_logging()) logs to a file which will be deleted.
    if not args.delete_older_than_days:
        print "No days argument provided bailing"
        return
    else:
        days = args.delete_older_than_days
    create_structure()
    init_console_logging()
    id_arr = []

    # Initialize the database connection.
    db = Database()

    # Check if MongoDB reporting is enabled and drop that if it is.
    cfg = Config("reporting")
    if cfg.mongodb and cfg.mongodb.enabled:
        from pymongo import MongoClient
        host = cfg.mongodb.get("host", "127.0.0.1")
        port = cfg.mongodb.get("port", 27017)
        mdb = cfg.mongodb.get("db", "cuckoo")
        try:
            results_db = MongoClient(host, port)[mdb]
        except:
            log.warning("Unable to connect to MongoDB database: %s", mdb)
            return

        added_before = datetime.now() - timedelta(days=int(days))
        if args.files_only_filter:
            print("file filter applied")
            old_tasks = db.list_tasks(added_before=added_before,
                                      category="file")
        elif args.urls_only_filter:
            print("url filter applied")
            old_tasks = db.list_tasks(added_before=added_before,
                                      category="url")
        else:
            old_tasks = db.list_tasks(added_before=added_before)

        for e in old_tasks:
            new = e.to_dict()
            print int(new["id"])
            id_arr.append({"info.id": (int(new["id"]))})

        print "number of matching records %s before suri/custom filter " % len(
            id_arr)
        if id_arr and args.suricata_zero_alert_filter:
            result = list(
                results_db.analysis.find(
                    {
                        "suricata.alerts.alert": {
                            "$exists": False
                        },
                        "$or": id_arr
                    }, {"info.id": 1}))
            tmp_arr = []
            for entry in result:
                tmp_arr.append(entry["info"]["id"])
            id_arr = tmp_arr
        if id_arr and args.custom_include_filter:
            result = list(
                results_db.analysis.find(
                    {
                        "info.custom": {
                            "$regex": args.custom_include_filter
                        },
                        "$or": id_arr
                    }, {"info.id": 1}))
            tmp_arr = []
            for entry in result:
                tmp_arr.append(entry["info"]["id"])
            id_arr = tmp_arr
        print "number of matching records %s" % len(id_arr)
        for e in id_arr:
            try:
                print "removing %s from analysis db" % (e)
                results_db.analysis.remove({"info.id": e})
            except:
                print "failed to remove analysis info (may not exist) %s" % (e)
            if db.delete_task(e):
                delete_folder(
                    os.path.join(CUCKOO_ROOT, "storage", "analyses", "%s" % e))
            else:
                print "failed to remove faile task %s from DB" % (e)
Пример #34
0
def export(request, task_id):
    directories = request.POST.getlist("directories")
    task = Database().view_task(task_id)

    if not directories:
        print("directories is empty")
        return render_to_response(
            "error.html", {"error": "You have not selected any directory"},
            context_instance=RequestContext(request))

    report = results_db.analysis.find_one({"info.id": int(task_id)},
                                          sort=[("_id", pymongo.DESCENDING)])
    if not report:
        return render_to_response(
            "error.html", {"error": "The specified analysis does not exist"},
            context_instance=RequestContext(request))

    path = report["info"]["analysis_path"]

    #Creating a analysis.json file for basic information about the analysis. Used for import
    analysis_path = os.path.join(path, "analysis.json")
    with open(analysis_path, "w") as outfile:
        json.dump({'target': report["target"]}, outfile, indent=4)

    #Creates a zip file with the selected contents of the analyses
    zf = zipfile.ZipFile(path + ".zip", "w", zipfile.ZIP_DEFLATED)

    #file_path will be used to store the sample of the file in the zip that will be exported
    if report["info"]["category"] == "file":
        file_path = task.target
        zf.write(file_path, "binary")
    elif report["info"]["category"] == "url":
        file_path = report["target"]["url"]
    else:
        return render_to_response(
            "error.html",
            {"error": "The category of the specified analysis isn't valid"},
            context_instance=RequestContext(request))

    for dirname, subdirs, files in os.walk(path):
        if os.path.basename(dirname) == task_id:
            for filename in files:
                zf.write(os.path.join(dirname, filename), filename)
        if os.path.basename(dirname) in directories:
            for filename in files:
                zf.write(os.path.join(dirname, filename),
                         os.path.join(os.path.basename(dirname), filename))

    zf.close()

    #Deleting the analysis.json file from the original analysis directory
    if os.path.isfile(analysis_path):
        os.remove(analysis_path)

    zfile = open(zf.filename, 'rb')

    try:
        zip_file_type = zf.content_type
    except AttributeError:
        zip_file_type = "application/zip"

    response = HttpResponse(zfile, content_type=zip_file_type)
    response[
        "Content-Disposition"] = "attachment; filename=%s" % task_id + ".zip"
    response["Content-Length"] = os.path.getsize(zf.filename)

    return response
Пример #35
0
def submit_file(request):
    if request.method == "POST":
        package = request.POST.get("package", "")
        timeout = min(force_int(request.POST.get("timeout")), 60 * 60 * 24)
        options = request.POST.get("options", "")
        priority = force_int(request.POST.get("priority"))
        machine = request.POST.get("machine", "")
        gateway = request.POST.get("gateway", None)
        clock = request.POST.get("clock", None)
        custom = request.POST.get("custom", "")
        memory = bool(request.POST.get("memory", False))
        enforce_timeout = bool(request.POST.get("enforce_timeout", False))
        status = bool(request.POST.get("user_status", False))
        if not status:
            user_status=0
        else:
            user_status=1

        if request.user.id==None:
            user_id = 1
        else:
            user_id = request.user.id
        
        tags = request.POST.get("tags", None)

        if request.POST.get("free"):
            if options:
                options += ","
            options += "free=yes"

        if request.POST.get("nohuman"):
            if options:
                options += ","
            options += "nohuman=yes"

        if request.POST.get("tor"):
            if options:
                options += ","
            options += "tor=yes"

        if request.POST.get("process_memory"):
            if options:
                options += ","
            options += "procmemdump=yes"

        if request.POST.get("kernel_analysis"):
            if options:
                options += ","
            options += "kernel_analysis=yes"   

        if gateway and gateway in settings.GATEWAYS:
            if "," in settings.GATEWAYS[gateway]:
                tgateway = random.choice(settings.GATEWAYS[gateway].split(","))
                ngateway = settings.GATEWAYS[tgateway]
            else:
                ngateway = settings.GATEWAYS[gateway]
            if options:
                options += ","
            options += "setgw=%s" % (ngateway)

        db = Database()
        task_ids = []
        task_machines = []

        if machine.lower() == "all":
            for entry in db.list_machines():
                task_machines.append(entry.label)
        else:
            task_machines.append(machine)

        if "sample" in request.FILES:

            for sample in request.FILES.getlist("sample"):
                if sample.size == 0:
                    return render_to_response("error.html",
                                              {"error": "You uploaded an empty file."},
                                              context_instance=RequestContext(request))
                elif sample.size > settings.MAX_UPLOAD_SIZE:
                    return render_to_response("error.html",
                                              {"error": "You uploaded a file that exceeds that maximum allowed upload size."},
                                              context_instance=RequestContext(request))
    
                # Moving sample from django temporary file to Cuckoo temporary storage to
                # let it persist between reboot (if user like to configure it in that way).
                path = store_temp_file(sample.read(),
                                       sample.name)
                pp.pprint("\nFile Path is %s\n" % path)
                currentMD5 = until.getBigFileMD5(path)

                provious_analysis = results_db.analysis.find({"target.file.md5": currentMD5}).sort([["_id", -1]])

                task = []

                for single in provious_analysis:
                    #pp.pprint(single)
                    single["info"]["base64"] = until.encrpt(single["info"]["id"])
                    single["info"]["filename"] = single["target"]["file"]["name"]
                    pp.pprint(single["info"])
                    task.append(single["info"])

                second_post = json.dumps({"file_path":path,"package":package,"timeout":timeout,"options":options,"machine":machine,"priority":priority,"custom":custom,"memory":memory,"enforce_timeout":enforce_timeout,"tags":tags,"clock":clock,"user_status":user_status,"user_id":user_id}, sort_keys=True)
                pp.pprint(second_post)

                if provious_analysis.count()>=1:
                   return render_to_response("submission/ShowSimilar.html",
                          {"tasks" : task, "params" : second_post},
                          context_instance=RequestContext(request))
                else:
                    #tempfilePath = request.POST.get("file_path", "")
                    for entry in task_machines:
                        task_ids_new = db.demux_sample_and_add_to_db(file_path=path, package=package, timeout=timeout, options=options, priority=priority,
                                                                 machine=entry, custom=custom, memory=memory, enforce_timeout=enforce_timeout, tags=tags, clock=clock, user_status=user_status, user_id=user_id)
                    pp.pprint(task_ids_new)
                    final_task_ids=[]
                    for taskId in task_ids_new:
                        final_task_ids.append(until.encrpt(taskId))
                    task_ids.extend(final_task_ids)

                    tasks_count = len(task_ids)
                    pp.pprint(task_ids)

                    if tasks_count > 0:

                        return render_to_response("submission/complete.html",
                                                  {"tasks" : task_ids,
                                                   "tasks_count" : tasks_count},
                                                  context_instance=RequestContext(request))
                    else:
                        return render_to_response("error.html",
                                                  {"error": "Error adding task to Cuckoo's database."},
                                                  context_instance=RequestContext(request))
    else:
        enabledconf = dict()
        enabledconf["vt"] = settings.VTDL_ENABLED
        enabledconf["kernel"] = settings.OPT_ZER0M0N
        enabledconf["memory"] = Config("processing").memory.get("enabled")
        enabledconf["procmemory"] = Config("processing").procmemory.get("enabled")
        enabledconf["tor"] = Config("auxiliary").tor.get("enabled")
        if Config("auxiliary").gateways:
            enabledconf["gateways"] = True
        else:
            enabledconf["gateways"] = False
        enabledconf["tags"] = False
        # Get enabled machinery
        machinery = Config("cuckoo").cuckoo.get("machinery")
        # Get VM names for machinery config elements
        vms = [x.strip() for x in getattr(Config(machinery), machinery).get("machines").split(",")]
        # Check each VM config element for tags
        for vmtag in vms:
            if "tags" in getattr(Config(machinery), vmtag).keys():
                enabledconf["tags"] = True

        files = os.listdir(os.path.join(settings.CUCKOO_PATH, "analyzer", "windows", "modules", "packages"))

        packages = []
        for name in files:
            name = os.path.splitext(name)[0]
            if name == "__init__":
                continue

            packages.append(name)

        # Prepare a list of VM names, description label based on tags.
        machines = []
        for machine in Database().list_machines():
            tags = []
            for tag in machine.tags:
                tags.append(tag.name)

            if tags:
                label = machine.label + ": " + ", ".join(tags)
            else:
                label = machine.label

            machines.append((machine.label, label))

        # Prepend ALL/ANY options.
        machines.insert(0, ("", "First available"))
        machines.insert(1, ("all", "All"))

        return render_to_response("submission/submit_file.html",
                                  {"packages": sorted(packages),
                                   "machines": machines,
                                   "gateways": settings.GATEWAYS,
                                   "config": enabledconf},
                                  context_instance=RequestContext(request))
Пример #36
0
def cuckoo_clean():
    """Clean up cuckoo setup.
    It deletes logs, all stored data from file system and configured databases (SQL
    and MongoDB.
    """
    # Init logging.
    # This need to init a console logger handler, because the standard
    # logger (init_logging()) logs to a file which will be deleted.
    create_structure()
    init_console_logging()

    # Initialize the database connection.
    db = Database()

    # Drop all tables.
    db.drop()

    # Check if MongoDB reporting is enabled and drop that if it is.
    cfg = Config("reporting")
    if cfg.mongodb and cfg.mongodb.enabled:
        from pymongo import MongoClient
        host = cfg.mongodb.get("host", "127.0.0.1")
        port = cfg.mongodb.get("port", 27017)
        mdb = cfg.mongodb.get("db", "cuckoo")
        try:
            conn = MongoClient(host, port)
            conn.drop_database(mdb)
            conn.close()
        except:
            log.warning("Unable to drop MongoDB database: %s", mdb)

    # Check if ElasticSearch is enabled and delete that data if it is.
    if cfg.elasticsearchdb and cfg.elasticsearchdb.enabled and not cfg.elasticsearchdb.searchonly:
        from elasticsearch import Elasticsearch
        delidx = cfg.elasticsearchdb.index + "-*"
        try:
            es = Elasticsearch(hosts=[{
                "host": cfg.elasticsearchdb.host,
                "port": cfg.elasticsearchdb.port,
            }],
                               timeout=60)
        except:
            log.warning("Unable to connect to ElasticSearch")

        if es:
            analyses = es.search(index=delidx, doc_type="analysis",
                                 q="*")["hits"]["hits"]
        if analyses:
            for analysis in analyses:
                esidx = analysis["_index"]
                esid = analysis["_id"]
                # Check if behavior exists
                if analysis["_source"]["behavior"]:
                    for process in analysis["_source"]["behavior"][
                            "processes"]:
                        for call in process["calls"]:
                            es.delete(
                                index=esidx,
                                doc_type="calls",
                                id=call,
                            )
                # Delete the analysis results
                es.delete(
                    index=esidx,
                    doc_type="analysis",
                    id=esid,
                )

    # Paths to clean.
    paths = [
        os.path.join(CUCKOO_ROOT, "db"),
        os.path.join(CUCKOO_ROOT, "log"),
        os.path.join(CUCKOO_ROOT, "storage"),
    ]

    # Delete various directories.
    for path in paths:
        if os.path.isdir(path):
            try:
                shutil.rmtree(path)
            except (IOError, OSError) as e:
                log.warning("Error removing directory %s: %s", path, e)

    # Delete all compiled Python objects ("*.pyc").
    for dirpath, dirnames, filenames in os.walk(CUCKOO_ROOT):
        for fname in filenames:
            if not fname.endswith(".pyc"):
                continue

            path = os.path.join(CUCKOO_ROOT, dirpath, fname)

            try:
                os.unlink(path)
            except (IOError, OSError) as e:
                log.warning("Error removing file %s: %s", path, e)
Пример #37
0
class Scheduler:
    """Tasks Scheduler.

    This class is responsible for the main execution loop of the tool. It
    prepares the analysis machines and keep waiting and loading for new
    analysis tasks.
    Whenever a new task is available, it launches AnalysisManager which will
    take care of running the full analysis process and operating with the
    assigned analysis machine.
    """
    def __init__(self, maxcount=None):
        self.running = True
        self.cfg = Config()
        self.db = Database()
        self.maxcount = maxcount
        self.total_analysis_count = 0
        self.vpn_cfg = Config("vpn")

    def initialize(self):
        """Initialize the machine manager."""
        global machinery, machine_lock

        machinery_name = self.cfg.cuckoo.machinery

        max_vmstartup_count = self.cfg.cuckoo.max_vmstartup_count
        if max_vmstartup_count:
            machine_lock = threading.Semaphore(max_vmstartup_count)
        else:
            machine_lock = threading.Lock()

        log.info(
            "Using \"%s\" machine manager with max_analysis_count=%d, "
            "max_machines_count=%d, and max_vmstartup_count=%d",
            machinery_name, self.cfg.cuckoo.max_analysis_count,
            self.cfg.cuckoo.max_machines_count,
            self.cfg.cuckoo.max_vmstartup_count)

        # Get registered class name. Only one machine manager is imported,
        # therefore there should be only one class in the list.
        plugin = list_plugins("machinery")[0]
        # Initialize the machine manager.
        machinery = plugin()

        # Find its configuration file.
        conf = os.path.join(CUCKOO_ROOT, "conf", "%s.conf" % machinery_name)

        if not os.path.exists(conf):
            raise CuckooCriticalError("The configuration file for machine "
                                      "manager \"{0}\" does not exist at path:"
                                      " {1}".format(machinery_name, conf))

        # Provide a dictionary with the configuration options to the
        # machine manager instance.
        machinery.set_options(Config(machinery_name))

        # Initialize the machine manager.
        try:
            machinery.initialize(machinery_name)
        except CuckooMachineError as e:
            raise CuckooCriticalError("Error initializing machines: %s" % e)

        # At this point all the available machines should have been identified
        # and added to the list. If none were found, Cuckoo needs to abort the
        # execution.
        if not len(machinery.machines()):
            raise CuckooCriticalError("No machines available.")
        else:
            log.info("Loaded %s machine/s", len(machinery.machines()))

        if len(machinery.machines()) > 1 and self.db.engine.name == "sqlite":
            log.warning("As you've configured Cuckoo to execute parallel "
                        "analyses, we recommend you to switch to a MySQL "
                        "a PostgreSQL database as SQLite might cause some "
                        "issues.")

        if len(machinery.machines()) > 4 and self.cfg.cuckoo.process_results:
            log.warning("When running many virtual machines it is recommended "
                        "to process the results in a separate process.py to "
                        "increase throughput and stability. Please read the "
                        "documentation about the `Processing Utility`.")

        # Drop all existing packet forwarding rules for each VM. Just in case
        # Cuckoo was terminated for some reason and various forwarding rules
        # have thus not been dropped yet.
        for machine in machinery.machines():
            if not machine.interface:
                log.info(
                    "Unable to determine the network interface for VM "
                    "with name %s, Cuckoo will not be able to give it "
                    "full internet access or route it through a VPN! "
                    "Please define a default network interface for the "
                    "machinery or define a network interface for each "
                    "VM.", machine.name)
                continue

            # Drop forwarding rule to each VPN.
            if self.vpn_cfg.vpn.enabled:
                for vpn in vpns.values():
                    rooter("forward_disable", machine.interface, vpn.interface,
                           machine.ip)

            # Drop forwarding rule to the internet / dirty line.
            if self.cfg.routing.internet != "none":
                rooter("forward_disable", machine.interface,
                       self.cfg.routing.internet, machine.ip)

    def stop(self):
        """Stop scheduler."""
        self.running = False
        # Shutdown machine manager (used to kill machines that still alive).
        machinery.shutdown()

    def start(self):
        """Start scheduler."""
        self.initialize()

        log.info("Waiting for analysis tasks.")

        # Message queue with threads to transmit exceptions (used as IPC).
        errors = Queue.Queue()

        # Command-line overrides the configuration file.
        if self.maxcount is None:
            self.maxcount = self.cfg.cuckoo.max_analysis_count

        # This loop runs forever.
        while self.running:
            time.sleep(1)

            # Wait until the machine lock is not locked. This is only the case
            # when all machines are fully running, rather that about to start
            # or still busy starting. This way we won't have race conditions
            # with finding out there are no available machines in the analysis
            # manager or having two analyses pick the same machine.
            if not machine_lock.acquire(False):
                continue

            machine_lock.release()

            # If not enough free disk space is available, then we print an
            # error message and wait another round (this check is ignored
            # when the freespace configuration variable is set to zero).
            if self.cfg.cuckoo.freespace:
                # Resolve the full base path to the analysis folder, just in
                # case somebody decides to make a symbolic link out of it.
                dir_path = os.path.join(CUCKOO_ROOT, "storage", "analyses")

                # TODO: Windows support
                if hasattr(os, "statvfs"):
                    dir_stats = os.statvfs(dir_path)

                    # Calculate the free disk space in megabytes.
                    space_available = dir_stats.f_bavail * dir_stats.f_frsize
                    space_available /= 1024 * 1024

                    if space_available < self.cfg.cuckoo.freespace:
                        log.error("Not enough free disk space! (Only %d MB!)",
                                  space_available)
                        continue

            # Have we limited the number of concurrently executing machines?
            if self.cfg.cuckoo.max_machines_count > 0:
                # Are too many running?
                if len(machinery.running()
                       ) >= self.cfg.cuckoo.max_machines_count:
                    continue

            # If no machines are available, it's pointless to fetch for
            # pending tasks. Loop over.
            if not machinery.availables():
                continue

            # Exits if max_analysis_count is defined in the configuration
            # file and has been reached.
            if self.maxcount and self.total_analysis_count >= self.maxcount:
                if active_analysis_count <= 0:
                    self.stop()
            else:
                # Fetch a pending analysis task.
                #TODO: this fixes only submissions by --machine, need to add other attributes (tags etc.)
                for machine in self.db.get_available_machines():
                    task = self.db.fetch(machine=machine.name)
                    if task:
                        break
                else:
                    task = self.db.fetch()
                if task:
                    log.debug("Task #{0}: Processing task".format(task.id))
                    self.total_analysis_count += 1
                    # Initialize and start the analysis manager.
                    analysis = AnalysisManager(task, errors)
                    analysis.daemon = True
                    analysis.start()

            # Deal with errors.
            try:
                raise errors.get(block=False)
            except Queue.Empty:
                pass
Пример #38
0
 def __init__(self, maxcount=None):
     self.running = True
     self.cfg = Config()
     self.db = Database()
     self.maxcount = maxcount
     self.total_analysis_count = 0
Пример #39
0
 def __init__(self):
     self.running = True
     self.cfg = Config()
     self.db = Database()
Пример #40
0
from lib.cuckoo.common.config import Config
from lib.cuckoo.common.constants import CUCKOO_ROOT, CUCKOO_VERSION
from lib.cuckoo.common.exceptions import CuckooDisableModule
from lib.cuckoo.common.exceptions import CuckooOperationalError
from lib.cuckoo.common.exceptions import CuckooProcessingError
from lib.cuckoo.common.exceptions import CuckooReportError
from lib.cuckoo.common.exceptions import CuckooDependencyError
from lib.cuckoo.core.database import Database

try:
    import re2 as re
except ImportError:
    import re

log = logging.getLogger(__name__)
db = Database()
_modules = defaultdict(dict)


def import_plugin(name):
    try:
        module = __import__(name, globals(), locals(), ["dummy"])
    except ImportError as e:
        print('Unable to import plugin "{0}": {1}'.format(name, e))
        return
    else:
        # ToDo remove for release
        try:
            load_plugins(module)
        except Exception as e:
            print(e)
Пример #41
0
    def run(self, results):
        self.task_options_stack = []
        self.task_options = None
        self.task_custom = None
        filesdict = {}
        report = dict(results)
        db = Database()
        detections = set()

        if self.task_options and 'disable_cape=1' in self.task_options:
            return

        parent_package = report["info"].get("package")
        if parent_package in cape_package_list:
            # we only want to trigger detections from 'straight' runs, behavioural packages or unpackers
            if parent_package != "Extraction" and parent_package != "Injection" and parent_package != "Compression" and parent_package != "UPX":
                return

        ##### Initial static hits from CAPE's yara signatures
        #####
        if "target" in results:
            target = results["target"]
            if "file" in target:
                file = target["file"]
                if "cape_yara" in file:
                    for entry in file["cape_yara"]:
                        self.process_cape_yara(entry, detections)

        if results["procdump"]:
            for file in results["procdump"]:
                if "cape_yara" in file:
                    for entry in file["cape_yara"]:
                        self.process_cape_yara(entry, detections)

        if results["CAPE"]:
            for file in results["CAPE"]:
                if "cape_yara" in file:
                    for entry in file["cape_yara"]:
                        self.process_cape_yara(entry, detections)

        if results["dropped"]:
            for file in results["dropped"]:
                if "cape_yara" in file:
                    for entry in file["cape_yara"]:
                        self.process_cape_yara(entry, detections)

        ##### Dynamic CAPE hits
        ##### Packers, injection or other generic dumping
        #####
        if "signatures" in results:
            for entry in results["signatures"]:
                if entry["name"] == "InjectionCreateRemoteThread" or entry[
                        "name"] == "InjectionProcessHollowing" or entry[
                            "name"] == "InjectionSetWindowLong":
                    if report["info"].has_key("package"):
                        if parent_package == 'doc':
                            detections.add('Injection_doc')
                            continue
                        if parent_package == 'dll' or parent_package == 'regsvr':
                            detections.add('Injection_dll')
                            continue
                        if parent_package == 'zip':
                            detections.add('Injection_zip')
                            continue
                        detections.add('Injection')

                elif entry["name"] == "Extraction":
                    if report["info"].has_key("package"):
                        if parent_package == 'doc':
                            #    detections.add('Extraction_doc')
                            # Word triggers this so removed
                            continue
                        if parent_package == 'zip':
                            detections.add('Extraction_zip')
                            continue
                        if parent_package == 'dll':
                            detections.add('Extraction_dll')
                            continue
                        if parent_package == 'regsvr':
                            detections.add('Extraction_regsvr')
                            continue
                        detections.add('Extraction')

                elif entry["name"] == "Compression":
                    if report["info"].has_key("package"):
                        if parent_package == 'dll' or parent_package == 'regsvr':
                            detections.add('Compression_dll')
                            continue
                        if parent_package == 'doc':
                            detections.add('Compression_doc')
                            continue
                        detections.add('Compression')

        ##### Specific malware family packages
        #####
                elif entry["name"] == "PlugX":
                    if report["info"].has_key("package"):
                        if parent_package == 'PlugXPayload':
                            detections.add('PlugXPayload')
                            continue
                        if parent_package == 'zip':
                            detections.add('PlugX_zip')
                            continue
                        if parent_package == 'doc':
                            detections.add('PlugX_doc')
                            continue
                        if parent_package == 'dll':
                            detections.add('PlugX_dll')
                            continue
                        detections.add('PlugX')

                elif entry["name"] == "PlugX fuzzy":
                    if report["info"].has_key("package"):
                        if parent_package == 'PlugXPayload':
                            detections.add('PlugXPayload_fuzzy')
                            continue
                        if parent_package == 'zip':
                            detections.add('PlugX_fuzzy_zip')
                            continue
                        if parent_package == 'doc':
                            detections.add('PlugX_fuzzy_doc')
                            continue
                        if parent_package == 'dll':
                            detections.add('PlugX_fuzzy_dll')
                            continue
                        detections.add('PlugX_fuzzy')

                elif entry["name"] == "Derusbi":
                    if report["info"].has_key("package"):
                        detections.add('Derusbi')

                elif entry["name"] == "EvilGrab":
                    if report["info"].has_key("package"):
                        detections.add('EvilGrab')

        # We only want to submit a single job if we have a
        # malware detection. A given package should do
        # everything we need for its respective family.
        package = None

        if 'PlugX_fuzzy' in detections:
            package = 'PlugX_fuzzy'
        elif 'PlugXPayload_fuzzy' in detections:
            package = 'PlugXPayload_fuzzy'
        elif 'PlugX_fuzzy_zip' in detections:
            package = 'PlugX_fuzzy_zip'
        elif 'PlugX_fuzzy_doc' in detections:
            package = 'PlugX_fuzzy_doc'
        elif 'PlugX_fuzzy_dll' in detections:
            package = 'PlugX_fuzzy_dll'

        # We may have both 'fuzzy' and non 'fuzzy'
        # but only want to submit non.
        if 'PlugX' in detections:
            package = 'PlugX'
        elif 'PlugXPayload' in detections:
            package = 'PlugXPayload'
        elif 'PlugX_zip' in detections:
            package = 'PlugX_zip'
        elif 'PlugX_doc' in detections:
            package = 'PlugX_doc'
        elif 'PlugX_dll' in detections:
            package = 'PlugX_dll'

        if 'Derusbi' in detections:
            package = 'Derusbi'

        if 'EvilGrab' in detections:
            package = 'EvilGrab'

        if 'Azzy' in detections:
            if parent_package == 'dll':
                package = 'Azzy_dll'
            else:
                package = 'Azzy'

        self.task_options = self.task["options"]
        # we want to switch off automatic process dumps in CAPE submissions
        if self.task_options and 'procmemdump=1' in self.task_options:
            self.task_options = self.task_options.replace(
                u"procmemdump=1", u"procmemdump=0", 1)
        if self.task_options_stack:
            self.task_options = ','.join(self.task_options_stack)

        if package:
            task_id = db.add_path(
                file_path=self.task["target"],
                package=package,
                timeout=self.task["timeout"],
                options=self.task_options,
                priority=self.task["priority"] +
                1,  # increase priority to expedite related submission
                machine=self.task["machine"],
                platform=self.task["platform"],
                memory=self.task["memory"],
                enforce_timeout=self.task["enforce_timeout"],
                clock=None,
                tags=None)
            if task_id:
                log.info(
                    u"CAPE detection on file \"{0}\": {1} - added as CAPE task with ID {2}"
                    .format(self.task["target"], package, task_id))
            else:
                log.warn(
                    "Error adding CAPE task to database: {0}".format(package))

        else:  # nothing submitted, only 'dumpers' left
            if parent_package == "Extraction" or parent_package == "Injection" or parent_package == "Compression":
                return
            for dumper in detections:
                task_id = db.add_path(
                    file_path=self.task["target"],
                    package=dumper,
                    timeout=self.task["timeout"],
                    options=self.task_options,
                    priority=self.task["priority"] +
                    1,  # increase priority to expedite related submission
                    machine=self.task["machine"],
                    platform=self.task["platform"],
                    memory=self.task["memory"],
                    enforce_timeout=self.task["enforce_timeout"],
                    clock=None,
                    tags=None)
                if task_id:
                    log.info(
                        u"CAPE detection on file \"{0}\": {1} - added as CAPE task with ID {2}"
                        .format(self.task["target"], dumper, task_id))
                else:
                    log.warn("Error adding CAPE task to database: {0}".format(
                        dumper))
        return
Пример #42
0
from lib.cuckoo.common.utils import store_temp_file
from lib.cuckoo.core.database import Database, TASK_COMPLETED, TASK_REPORTED, TASK_RUNNING, TASK_PENDING, TASK_FAILED_REPORTING, TASK_DISTRIBUTED_COMPLETED

from lib.cuckoo.common.dist_db import Node, StringList, Task, Machine, create_session

# we need original db to reserve ID in db,
# to store later report, from master or slave
reporting_conf = Config("reporting")

# init
logging.getLogger("elasticsearch").setLevel(logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)

STATUSES = {}
main_db = Database()

dead_count = 5
if reporting_conf.distributed.dead_count:
    dead_count = reporting_conf.distributed.dead_count

INTERVAL = 10

# controller of dead nodes
failed_count = dict()
# status controler count to reset number
status_count = dict()

lock_retriever = threading.Lock()
dist_lock = threading.BoundedSemaphore(
    int(reporting_conf.distributed.dist_threads))
Пример #43
0
def ajax_submit_file(request):
    if request.method == "POST":
        package = request.POST.get("package", "")
        timeout = min(force_int(request.POST.get("timeout")), 60 * 60 * 24)
        options = request.POST.get("options", "")
        priority = force_int(request.POST.get("priority"))
        machine = request.POST.get("machine", "")
        gateway = request.POST.get("gateway", None)
        clock = request.POST.get("clock", None)
        custom = request.POST.get("custom", "")
        memory = bool(request.POST.get("memory", False))
        enforce_timeout = bool(request.POST.get("enforce_timeout", False))
        status = request.POST.get("user_status", False)
        print "AJAX SUBMIT FILE USER STATUS %s" % status

        #if not status:
        #    user_status=0
        #else:
        #    user_status=1

        if request.user.id==None:
            user_id = 1
        else:
            user_id = request.user.id
        
        tags = request.POST.get("tags", None)

        if request.POST.get("free"):
            if options:
                options += ","
            options += "free=yes"

        if request.POST.get("nohuman"):
            if options:
                options += ","
            options += "nohuman=yes"

        if request.POST.get("tor"):
            if options:
                options += ","
            options += "tor=yes"

        if request.POST.get("process_memory"):
            if options:
                options += ","
            options += "procmemdump=yes"

        if request.POST.get("kernel_analysis"):
            if options:
                options += ","
            options += "kernel_analysis=yes"   

        if gateway and gateway in settings.GATEWAYS:
            if "," in settings.GATEWAYS[gateway]:
                tgateway = random.choice(settings.GATEWAYS[gateway].split(","))
                ngateway = settings.GATEWAYS[tgateway]
            else:
                ngateway = settings.GATEWAYS[gateway]
            if options:
                options += ","
            options += "setgw=%s" % (ngateway)

        db = Database()
        task_ids = []
        task_machines = []

        if machine.lower() == "all":
            for entry in db.list_machines():
                task_machines.append(entry.label)
        else:
            task_machines.append(machine)

        tempfilePath = request.POST.get("file_path", "")
        print "AJAX SUBMIT FILE TAMP FILE PATH %s" % tempfilePath
        if tempfilePath:
            for entry in task_machines:
                print "AJAX LIST MACHINE NAME %s" % entry
                task_ids_new = db.demux_sample_and_add_to_db(file_path=tempfilePath, package=package, timeout=timeout, options=options, priority=priority,
                                                         machine=entry, custom=custom, memory=memory, enforce_timeout=enforce_timeout, tags=tags, clock=clock, user_status=status, user_id=user_id)
            #pp.pprint(task_ids_new)
            final_task_ids=[]
            for taskId in task_ids_new:
                final_task_ids.append(until.encrpt(taskId))
            task_ids.extend(final_task_ids)

            tasks_count = len(task_ids)
            pp.pprint(task_ids)
            # task_ids = ["YXNkZmRzZmFkc2YxMTVkc2Zhc2RmYXNkZg=="]
            # tasks_count = 1
            if tasks_count > 0:
                return HttpResponse(json.dumps({"correct": "%s" % task_ids[0]}), content_type="application/json")
            else:
                return HttpResponse(json.dumps({"error": "Error adding task to Cuckoo's database."}), content_type="application/json")
        else:
            return HttpResponse(json.dumps({"error": "Error adding task to Cuckoo's database."}), content_type="application/json")
Пример #44
0
def index(request, task_id=None, sha1=None):
    if request.method == "POST":
        package = request.POST.get("package", "")
        timeout = force_int(request.POST.get("timeout"))
        options = request.POST.get("options", "")
        priority = force_int(request.POST.get("priority"))
        machine = request.POST.get("machine", "")
        custom = request.POST.get("custom", "")
        memory = bool(request.POST.get("memory", False))
        enforce_timeout = bool(request.POST.get("enforce_timeout", False))
        tags = request.POST.get("tags", None)

        if request.POST.get("route"):
            if options:
                options += ","
            options += "route=%s" % request.POST.get("route")

        if request.POST.get("free"):
            if options:
                options += ","
            options += "free=yes"

        if request.POST.get("process_memory"):
            if options:
                options += ","
            options += "procmemdump=yes"

        db = Database()
        task_ids = []
        task_machines = []

        if machine.lower() == "all":
            for entry in db.list_machines():
                task_machines.append(entry.label)
        else:
            task_machines.append(machine)

        # In case of resubmitting a file.
        if request.POST.get("category") == "file":
            task = Database().view_task(task_id)

            for entry in task_machines:
                task_id = db.add_path(file_path=task.target,
                                      package=package,
                                      timeout=timeout,
                                      options=options,
                                      priority=priority,
                                      machine=entry,
                                      custom=custom,
                                      memory=memory,
                                      enforce_timeout=enforce_timeout,
                                      tags=tags)
                if task_id:
                    task_ids.append(task_id)

        elif request.FILES.getlist("sample"):
            samples = request.FILES.getlist("sample")
            for sample in samples:
                # Error if there was only one submitted sample and it's empty.
                # But if there are multiple and one was empty, just ignore it.
                if not sample.size:
                    if len(samples) != 1:
                        continue

                    return render_to_response(
                        "error.html", {"error": "You uploaded an empty file."},
                        context_instance=RequestContext(request))
                elif sample.size > settings.MAX_UPLOAD_SIZE:
                    return render_to_response("error.html", {
                        "error":
                        "You uploaded a file that exceeds that maximum allowed upload size."
                    },
                                              context_instance=RequestContext(
                                                  request))

                # Moving sample from django temporary file to Cuckoo temporary storage to
                # let it persist between reboot (if user like to configure it in that way).
                path = store_temp_file(sample.read(), sample.name)

                for entry in task_machines:
                    task_id = db.add_path(file_path=path,
                                          package=package,
                                          timeout=timeout,
                                          options=options,
                                          priority=priority,
                                          machine=entry,
                                          custom=custom,
                                          memory=memory,
                                          enforce_timeout=enforce_timeout,
                                          tags=tags)
                    if task_id:
                        task_ids.append(task_id)

        # When submitting a dropped file.
        elif request.POST.get("category") == "dropped_file":
            filepath = dropped_filepath(task_id, sha1)

            for entry in task_machines:
                task_id = db.add_path(file_path=filepath,
                                      package=package,
                                      timeout=timeout,
                                      options=options,
                                      priority=priority,
                                      machine=entry,
                                      custom=custom,
                                      memory=memory,
                                      enforce_timeout=enforce_timeout,
                                      tags=tags)
                if task_id:
                    task_ids.append(task_id)

        else:
            url = request.POST.get("url").strip()
            if not url:
                return render_to_response(
                    "error.html", {"error": "You specified an invalid URL!"},
                    context_instance=RequestContext(request))

            for entry in task_machines:
                task_id = db.add_url(url=url,
                                     package=package,
                                     timeout=timeout,
                                     options=options,
                                     priority=priority,
                                     machine=entry,
                                     custom=custom,
                                     memory=memory,
                                     enforce_timeout=enforce_timeout,
                                     tags=tags)
                if task_id:
                    task_ids.append(task_id)

        tasks_count = len(task_ids)
        if tasks_count > 0:
            return render_to_response(
                "submission/complete.html", {
                    "tasks": task_ids,
                    "tasks_count": tasks_count,
                    "baseurl": request.build_absolute_uri('/')[:-1]
                },
                context_instance=RequestContext(request))
        else:
            return render_to_response(
                "error.html",
                {"error": "Error adding task to Cuckoo's database."},
                context_instance=RequestContext(request))
    else:
        files = os.listdir(
            os.path.join(settings.CUCKOO_PATH, "analyzer", "windows",
                         "modules", "packages"))

        packages = []
        for name in files:
            name = os.path.splitext(name)[0]
            if name == "__init__":
                continue

            packages.append(name)

        # Prepare a list of VM names, description label based on tags.
        machines = []
        for machine in Database().list_machines():
            tags = []
            for tag in machine.tags:
                tags.append(tag.name)

            if tags:
                label = machine.label + ": " + ", ".join(tags)
            else:
                label = machine.label

            machines.append((machine.label, label))

        # Prepend ALL/ANY options.
        machines.insert(0, ("", "First available"))
        machines.insert(1, ("all", "All"))

        return render_to_response("submission/index.html", {
            "packages": sorted(packages),
            "machines": machines,
            "vpns": vpns.values(),
            "route": cfg.routing.route,
            "internet": cfg.routing.internet
        },
                                  context_instance=RequestContext(request))
Пример #45
0
def import_analysis(request):
    if request.method == "POST":
        db = Database()
        task_ids = []
        samples = request.FILES.getlist("sample")

        for sample in samples:
            # Error if there was only one submitted sample and it's empty.
            # But if there are multiple and one was empty, just ignore it.
            if not sample.size:
                if len(samples) != 1:
                    continue

                return render_to_response(
                    "error.html", {"error": "You uploaded an empty file."},
                    context_instance=RequestContext(request))
            elif sample.size > settings.MAX_UPLOAD_SIZE:
                return render_to_response("error.html", {
                    "error":
                    "You uploaded a file that exceeds that maximum allowed upload size."
                },
                                          context_instance=RequestContext(
                                              request))

            if not sample.name.endswith(".zip"):
                return render_to_response(
                    "error.html",
                    {"error": "You uploaded a file that wasn't a .zip."},
                    context_instance=RequestContext(request))

            path = store_temp_file(sample.read(), sample.name)
            zf = zipfile.ZipFile(path)

            #Path to store the extracted files from the zip
            extract_path = os.path.dirname(path) + "\\" + os.path.splitext(
                sample.name)[0]
            zf.extractall(extract_path)

            report = extract_path + "\\analysis.json"
            if os.path.isfile(report):
                with open(report) as json_file:
                    json_data = json.load(json_file)
                    category = json_data["Target"]["category"]

                    if category == "file":
                        binary = extract_path + "\\binary"

                        if os.path.isfile(binary):
                            task_id = db.add_path(file_path=binary,
                                                  package="",
                                                  timeout=0,
                                                  options="",
                                                  priority=0,
                                                  machine="",
                                                  custom="",
                                                  memory=False,
                                                  enforce_timeout=False,
                                                  tags=None)
                            if task_id:
                                task_ids.append(task_id)

                    elif category == "url":
                        url = json_data["Target"]["url"]
                        if not url:
                            return render_to_response(
                                "error.html",
                                {"error": "You specified an invalid URL!"},
                                context_instance=RequestContext(request))

                        task_id = db.add_url(url=url,
                                             package="",
                                             timeout=0,
                                             options="",
                                             priority=0,
                                             machine="",
                                             custom="",
                                             memory=False,
                                             enforce_timeout=False,
                                             tags=None)
                        if task_id:
                            task_ids.append(task_id)
            else:
                return render_to_response(
                    "error.html", {"error": "No analysis.json found!"},
                    context_instance=RequestContext(request))

            tasks_count = len(task_ids)
            if tasks_count > 0:
                return render_to_response(
                    "submission/complete.html", {
                        "tasks": task_ids,
                        "tasks_count": tasks_count,
                        "baseurl": request.build_absolute_uri('/')[:-1]
                    },
                    context_instance=RequestContext(request))

    return render_to_response("analysis/import.html",
                              context_instance=RequestContext(request))
Пример #46
0
class Scheduler:
    """Tasks Scheduler.

    This class is responsible for the main execution loop of the tool. It
    prepares the analysis machines and keep waiting and loading for new
    analysis tasks.
    Whenever a new task is available, it launches AnalysisManager which will
    take care of running the full analysis process and operating with the
    assigned analysis machine.
    """

    def __init__(self):
        self.running = True
        self.cfg = Config()
        self.db = Database()

    def initialize(self):
        """Initialize the machine manager."""
        global machinery

        machinery_name = self.cfg.cuckoo.machinery

        log.info("Using \"%s\" machine manager", machinery_name)

        # Get registered class name. Only one machine manager is imported,
        # therefore there should be only one class in the list.
        plugin = list_plugins("machinery")[0]
        # Initialize the machine manager.
        machinery = plugin()

        # Find its configuration file.
        conf = os.path.join(CUCKOO_ROOT, "conf", "%s.conf" % machinery_name)

        if not os.path.exists(conf):
            raise CuckooCriticalError("The configuration file for machine "
                                      "manager \"{0}\" does not exist at path:"
                                      " {1}".format(machinery_name, conf))

        # Provide a dictionary with the configuration options to the
        # machine manager instance.
        machinery.set_options(Config(conf))

        # Initialize the machine manager.
        try:
            machinery.initialize(machinery_name)
        except CuckooMachineError as e:
            raise CuckooCriticalError("Error initializing machines: %s" % e)

        # At this point all the available machines should have been identified
        # and added to the list. If none were found, Cuckoo needs to abort the
        # execution.
        if not len(machinery.machines()):
            raise CuckooCriticalError("No machines available")
        else:
            log.info("Loaded %s machine/s", len(machinery.machines()))

    def stop(self):
        """Stop scheduler."""
        self.running = False
        # Shutdown machine manager (used to kill machines that still alive).
        machinery.shutdown()

    def start(self):
        """Start scheduler."""
        global total_analysis_count
        self.initialize()

        log.info("Waiting for analysis tasks...")

        # Message queue with threads to transmit exceptions (used as IPC).
        errors = Queue.Queue()

        maxcount = self.cfg.cuckoo.max_analysis_count

        # This loop runs forever.
        while self.running:
            time.sleep(1)

            # If not enough free disk space is available, then we print an
            # error message and wait another round (this check is ignored
            # when the freespace configuration variable is set to zero).
            if self.cfg.cuckoo.freespace:
                # Resolve the full base path to the analysis folder, just in
                # case somebody decides to make a symbolic link out of it.
                dir_path = os.path.join(CUCKOO_ROOT, "storage", "analyses")

                # TODO: Windows support
                if hasattr(os, "statvfs"):
                    dir_stats = os.statvfs(dir_path)

                    # Calculate the free disk space in megabytes.
                    space_available = dir_stats.f_bavail * dir_stats.f_frsize
                    space_available /= 1024 * 1024

                    if space_available < self.cfg.cuckoo.freespace:
                        log.error("Not enough free disk space! (Only %d MB!)",
                                  space_available)
                        continue

            # If no machines are available, it's pointless to fetch for
            # pending tasks. Loop over.
            if not machinery.availables():
                continue

            # Exits if max_analysis_count is defined in the configuration
            # file and has been reached.
            if maxcount and total_analysis_count >= maxcount:
                if active_analysis_count <= 0:
                    self.stop()
            else:
                # Fetch a pending analysis task.
                task = self.db.fetch()

                if task:
                    log.debug("Processing task #%s", task.id)
                    total_analysis_count += 1

                    # Initialize and start the analysis manager.
                    analysis = AnalysisManager(task, errors)
                    analysis.start()

            # Deal with errors.
            try:
                error = errors.get(block=False)
            except Queue.Empty:
                pass
            else:
                raise error
Пример #47
0
def cuckoo_clean_sorted_pcap_dump():
    """Clean up failed tasks
    It deletes all stored data from file system and configured databases (SQL
    and MongoDB for failed tasks.
    """
    # Init logging.
    # This need to init a console logger handler, because the standard
    # logger (init_logging()) logs to a file which will be deleted.
    create_structure()
    init_console_logging()

    # Initialize the database connection.
    db = Database()

    # Check if MongoDB reporting is enabled and drop that if it is.
    cfg = Config("reporting")
    if cfg.mongodb and cfg.mongodb.enabled:
        from pymongo import MongoClient
        host = cfg.mongodb.get("host", "127.0.0.1")
        port = cfg.mongodb.get("port", 27017)
        mdb = cfg.mongodb.get("db", "cuckoo")
        try:
            results_db = MongoClient(host, port)[mdb]
        except:
            log.warning("Unable to connect MongoDB database: %s", mdb)
            return

        done = False
        while not done:
            rtmp = results_db.analysis.find(
                {
                    "network.sorted_pcap_id": {
                        "$exists": True
                    }
                }, {
                    "info.id": 1
                },
                sort=[("_id", -1)]).limit(100)
            if rtmp and rtmp.count() > 0:
                for e in rtmp:
                    if e["info"]["id"]:
                        print e["info"]["id"]
                        try:
                            results_db.analysis.update(
                                {"info.id": int(e["info"]["id"])},
                                {"$unset": {
                                    "network.sorted_pcap_id": ""
                                }})
                        except:
                            print "failed to remove sorted pcap from db for id %s" % (
                                e["info"]["id"])
                        try:
                            path = os.path.join(CUCKOO_ROOT, "storage",
                                                "analyses",
                                                "%s" % (e["info"]["id"]),
                                                "dump_sorted.pcap")
                            os.remove(path)
                        except Exception as e:
                            print "failed to remove sorted_pcap from disk %s" % (
                                e)
                    else:
                        done = True
            else:
                done = True
Пример #48
0
    def launch_analysis(self):
        """Start analysis."""
        succeeded = False
        dead_machine = False

        log.info("Starting analysis of %s \"%s\" (task=%d)",
                 self.task.category.upper(), self.task.target, self.task.id)

        # Initialize the the analysis folders.
        if not self.init_storage():
            return False

        if self.task.category == "file":
            # Check whether the file has been changed for some unknown reason.
            # And fail this analysis if it has been modified.
            if not self.check_file():
                return False

            # Store a copy of the original file.
            if not self.store_file():
                return False

        # Acquire analysis machine.
        try:
            self.acquire_machine()
        except CuckooOperationalError as e:
            log.error("Cannot acquire machine: {0}".format(e))
            return False

        # Generate the analysis configuration file.
        options = self.build_options()

        # At this point we can tell the Resultserver about it.
        try:
            Resultserver().add_task(self.task, self.machine)
        except Exception as e:
            machinery.release(self.machine.label)
            self.errors.put(e)

        aux = RunAuxiliary(task=self.task, machine=self.machine)
        aux.start()

        try:
            # Mark the selected analysis machine in the database as started.
            guest_log = Database().guest_start(self.task.id,
                                               self.machine.name,
                                               self.machine.label,
                                               machinery.__class__.__name__)
            # Start the machine.
            machinery.start(self.machine.label)
        except CuckooMachineError as e:
            log.error(str(e), extra={"task_id": self.task.id})
            dead_machine = True
        else:
            try:
                # Initialize the guest manager.
                guest = GuestManager(self.machine.name, self.machine.ip, self.machine.platform)
                # Start the analysis.
                guest.start_analysis(options)
            except CuckooGuestError as e:
                log.error(str(e), extra={"task_id": self.task.id})
            else:
                # Wait for analysis completion.
                try:
                    guest.wait_for_completion()
                    succeeded = True
                except CuckooGuestError as e:
                    log.error(str(e), extra={"task_id": self.task.id})
                    succeeded = False

        finally:
            # Stop Auxiliary modules.
            aux.stop()

            # Take a memory dump of the machine before shutting it off.
            if self.cfg.cuckoo.memory_dump or self.task.memory:
                try:
                    machinery.dump_memory(self.machine.label,
                                          os.path.join(self.storage, "memory.dmp"))
                except NotImplementedError:
                    log.error("The memory dump functionality is not available "
                              "for the current machine manager")
                except CuckooMachineError as e:
                    log.error(e)

            try:
                # Stop the analysis machine.
                machinery.stop(self.machine.label)
            except CuckooMachineError as e:
                log.warning("Unable to stop machine %s: %s",
                            self.machine.label, e)

            # Mark the machine in the database as stopped. Unless this machine
            # has been marked as dead, we just keep it as "started" in the
            # database so it'll not be used later on in this session.
            Database().guest_stop(guest_log)

            # After all this, we can make the Resultserver forget about the
            # internal state for this analysis task.
            Resultserver().del_task(self.task, self.machine)

            if dead_machine:
                # Remove the guest from the database, so that we can assign a
                # new guest when the task is being analyzed with another
                # machine.
                Database().guest_remove(guest_log)

                # Remove the analysis directory that has been created so
                # far, as launch_analysis() is going to be doing that again.
                shutil.rmtree(self.storage)

                # This machine has turned dead, so we throw an exception here
                # which informs the AnalysisManager that it should analyze
                # this task again with another available machine.
                raise CuckooDeadMachine()

            try:
                # Release the analysis machine. But only if the machine has
                # not turned dead yet.
                machinery.release(self.machine.label)
            except CuckooMachineError as e:
                log.error("Unable to release machine %s, reason %s. "
                          "You might need to restore it manually",
                          self.machine.label, e)

        return succeeded
Пример #49
0
def index(request):
    if request.method == "POST":
        package = request.POST.get("package", "")
        timeout = min(force_int(request.POST.get("timeout")), 60 * 60 * 24)
        options = request.POST.get("options", "")
        priority = force_int(request.POST.get("priority"))
        machine = request.POST.get("machine", "")
        gateway = request.POST.get("gateway", None)
        clock = request.POST.get("clock", None)
        custom = request.POST.get("custom", "")
        memory = bool(request.POST.get("memory", False))
        enforce_timeout = bool(request.POST.get("enforce_timeout", False))
        user_status = bool(request.POST.get("user_status", False))

        tags = request.POST.get("tags", None)

        if request.POST.get("free"):
            if options:
                options += ","
            options += "free=yes"

        if request.POST.get("nohuman"):
            if options:
                options += ","
            options += "nohuman=yes"

        if request.POST.get("tor"):
            if options:
                options += ","
            options += "tor=yes"

        if request.POST.get("process_memory"):
            if options:
                options += ","
            options += "procmemdump=yes"

        if request.POST.get("kernel_analysis"):
            if options:
                options += ","
            options += "kernel_analysis=yes"   

        if gateway and gateway in settings.GATEWAYS:
            if "," in settings.GATEWAYS[gateway]:
                tgateway = random.choice(settings.GATEWAYS[gateway].split(","))
                ngateway = settings.GATEWAYS[tgateway]
            else:
                ngateway = settings.GATEWAYS[gateway]
            if options:
                options += ","
            options += "setgw=%s" % (ngateway)

        db = Database()
        task_ids = []
        task_machines = []

        if machine.lower() == "all":
            for entry in db.list_machines():
                task_machines.append(entry.label)
        else:
            task_machines.append(machine)

        if "sample" in request.FILES:
            for sample in request.FILES.getlist("sample"):
                if sample.size == 0:
                    return render_to_response("error.html",
                                              {"error": "You uploaded an empty file."},
                                              context_instance=RequestContext(request))
                elif sample.size > settings.MAX_UPLOAD_SIZE:
                    return render_to_response("error.html",
                                              {"error": "You uploaded a file that exceeds that maximum allowed upload size."},
                                              context_instance=RequestContext(request))
    
                # Moving sample from django temporary file to Cuckoo temporary storage to
                # let it persist between reboot (if user like to configure it in that way).
                path = store_temp_file(sample.read(),
                                       sample.name)
    
                for entry in task_machines:
                    task_ids_new = db.demux_sample_and_add_to_db(file_path=path, package=package, timeout=timeout, options=options, priority=priority,
                                                                 machine=entry, custom=custom, memory=memory, enforce_timeout=enforce_timeout, tags=tags, clock=clock)
                    task_ids.extend(task_ids_new)
        elif "quarantine" in request.FILES:
            for sample in request.FILES.getlist("quarantine"):
                if sample.size == 0:
                    return render_to_response("error.html",
                                              {"error": "You uploaded an empty quarantine file."},
                                              context_instance=RequestContext(request))
                elif sample.size > settings.MAX_UPLOAD_SIZE:
                    return render_to_response("error.html",
                                              {"error": "You uploaded a quarantine file that exceeds that maximum allowed upload size."},
                                              context_instance=RequestContext(request))
    
                # Moving sample from django temporary file to Cuckoo temporary storage to
                # let it persist between reboot (if user like to configure it in that way).
                tmp_path = store_temp_file(sample.read(),
                                       sample.name)

                path = unquarantine(tmp_path)
                try:
                    os.remove(tmp_path)
                except:
                    pass

                if not path:
                    return render_to_response("error.html",
                                              {"error": "You uploaded an unsupported quarantine file."},
                                              context_instance=RequestContext(request))

                for entry in task_machines:
                    task_ids_new = db.demux_sample_and_add_to_db(file_path=path, package=package, timeout=timeout, options=options, priority=priority,
                                                                 machine=entry, custom=custom, memory=memory, enforce_timeout=enforce_timeout, tags=tags, clock=clock)
                    task_ids.extend(task_ids_new)
        elif "url" in request.POST and request.POST.get("url").strip():
            url = request.POST.get("url").strip()
            if not url:
                return render_to_response("error.html",
                                          {"error": "You specified an invalid URL!"},
                                          context_instance=RequestContext(request))

            url = url.replace("hxxps://", "https://").replace("hxxp://", "http://").replace("[.]", ".")
            for entry in task_machines:
                task_id = db.add_url(url=url,
                                     package=package,
                                     timeout=timeout,
                                     options=options,
                                     priority=priority,
                                     machine=entry,
                                     custom=custom,
                                     memory=memory,
                                     enforce_timeout=enforce_timeout,
                                     tags=tags,
                                     clock=clock)
                if task_id:
                    task_ids.append(task_id)
        elif settings.VTDL_ENABLED and "vtdl" in request.POST:
            vtdl = request.POST.get("vtdl").strip()
            if (not settings.VTDL_PRIV_KEY and not settings.VTDL_INTEL_KEY) or not settings.VTDL_PATH:
                    return render_to_response("error.html",
                                              {"error": "You specified VirusTotal but must edit the file and specify your VTDL_PRIV_KEY or VTDL_INTEL_KEY variable and VTDL_PATH base directory"},
                                              context_instance=RequestContext(request))
            else:
                base_dir = tempfile.mkdtemp(prefix='cuckoovtdl',dir=settings.VTDL_PATH)
                hashlist = []
                if "," in vtdl:
                    hashlist=vtdl.split(",")
                else:
                    hashlist.append(vtdl)
                onesuccess = False

                for h in hashlist:
                    filename = base_dir + "/" + h
                    if settings.VTDL_PRIV_KEY:
                        url = 'https://www.virustotal.com/vtapi/v2/file/download'
                        params = {'apikey': settings.VTDL_PRIV_KEY, 'hash': h}
                    else:
                        url = 'https://www.virustotal.com/intelligence/download/'
                        params = {'apikey': settings.VTDL_INTEL_KEY, 'hash': h}

                    try:
                        r = requests.get(url, params=params, verify=True)
                    except requests.exceptions.RequestException as e:
                        return render_to_response("error.html",
                                              {"error": "Error completing connection to VirusTotal: {0}".format(e)},
                                              context_instance=RequestContext(request))
                    if r.status_code == 200:
                        try:
                            f = open(filename, 'wb')
                            f.write(r.content)
                            f.close()
                        except:
                            return render_to_response("error.html",
                                              {"error": "Error writing VirusTotal download file to temporary path"},
                                              context_instance=RequestContext(request))

                        onesuccess = True

                        for entry in task_machines:
                            task_ids_new = db.demux_sample_and_add_to_db(file_path=filename, package=package, timeout=timeout, options=options, priority=priority,
                                                                         machine=entry, custom=custom, memory=memory, enforce_timeout=enforce_timeout, tags=tags, clock=clock)
                            task_ids.extend(task_ids_new)
                    elif r.status_code == 403:
                        return render_to_response("error.html",
                                                  {"error": "API key provided is not a valid VirusTotal key or is not authorized for VirusTotal downloads"},
                                                  context_instance=RequestContext(request))


                if not onesuccess:
                    return render_to_response("error.html",
                                              {"error": "Provided hash not found on VirusTotal"},
                                              context_instance=RequestContext(request))



        tasks_count = len(task_ids)
        if tasks_count > 0:
            return render_to_response("submission/complete.html",
                                      {"tasks" : task_ids,
                                       "tasks_count" : tasks_count},
                                      context_instance=RequestContext(request))
        else:
            return render_to_response("error.html",
                                      {"error": "Error adding task to Cuckoo's database."},
                                      context_instance=RequestContext(request))
    else:
        enabledconf = dict()
        enabledconf["vt"] = settings.VTDL_ENABLED
        enabledconf["kernel"] = settings.OPT_ZER0M0N
        enabledconf["memory"] = Config("processing").memory.get("enabled")
        enabledconf["procmemory"] = Config("processing").procmemory.get("enabled")
        enabledconf["tor"] = Config("auxiliary").tor.get("enabled")
        if Config("auxiliary").gateways:
            enabledconf["gateways"] = True
        else:
            enabledconf["gateways"] = False
        enabledconf["tags"] = False
        # Get enabled machinery
        machinery = Config("cuckoo").cuckoo.get("machinery")
        # Get VM names for machinery config elements
        vms = [x.strip() for x in getattr(Config(machinery), machinery).get("machines").split(",")]
        # Check each VM config element for tags
        for vmtag in vms:
            if "tags" in getattr(Config(machinery), vmtag).keys():
                enabledconf["tags"] = True

        files = os.listdir(os.path.join(settings.CUCKOO_PATH, "analyzer", "windows", "modules", "packages"))

        packages = []
        for name in files:
            name = os.path.splitext(name)[0]
            if name == "__init__":
                continue

            packages.append(name)

        # Prepare a list of VM names, description label based on tags.
        machines = []
        for machine in Database().list_machines():
            tags = []
            for tag in machine.tags:
                tags.append(tag.name)

            if tags:
                label = machine.label + ": " + ", ".join(tags)
            else:
                label = machine.label

            machines.append((machine.label, label))

        # Prepend ALL/ANY options.
        machines.insert(0, ("", "First available"))
        machines.insert(1, ("all", "All"))

        return render_to_response("submission/index.html",
                                  {"packages": sorted(packages),
                                   "machines": machines,
                                   "gateways": settings.GATEWAYS,
                                   "config": enabledconf},
                                  context_instance=RequestContext(request))
Пример #50
0
class Machinery(object):
    """Base abstract class for machinery modules."""

    # Default label used in machinery configuration file to supply virtual
    # machine name/label/vmx path. Override it if you dubbed it in another
    # way.
    LABEL = "label"

    def __init__(self):
        self.module_name = ""
        self.options = None
        self.options_globals = Config()
        # Database pointer.
        self.db = Database()

        # Machine table is cleaned to be filled from configuration file
        # at each start.
        self.db.clean_machines()

    def set_options(self, options):
        """Set machine manager options.
        @param options: machine manager options dict.
        """
        self.options = options

    def initialize(self, module_name):
        """Read, load, and verify machines configuration.
        @param module_name: module name.
        """
        # Load.
        self._initialize(module_name)

        # Run initialization checks.
        self._initialize_check()

    def _initialize(self, module_name):
        """Read configuration.
        @param module_name: module name.
        """
        self.module_name = module_name
        mmanager_opts = self.options.get(module_name)

        for machine_id in mmanager_opts["machines"].strip().split(","):
            try:
                machine_opts = self.options.get(machine_id.strip())
                machine = Dictionary()
                machine.id = machine_id.strip()
                machine.label = machine_opts[self.LABEL]
                machine.platform = machine_opts["platform"]
                machine.tags = machine_opts.get("tags")
                machine.ip = machine_opts["ip"]

                # If configured, use specific network interface for this
                # machine, else use the default value.
                machine.interface = machine_opts.get("interface")

                # If configured, use specific snapshot name, else leave it
                # empty and use default behaviour.
                machine.snapshot = machine_opts.get("snapshot")

                # If configured, use specific resultserver IP and port,
                # else use the default value.
                opt_resultserver = self.options_globals.resultserver

                # the resultserver port might have been dynamically changed
                #  -> get the current one from the resultserver singelton
                opt_resultserver.port = ResultServer().port

                ip = machine_opts.get("resultserver_ip", opt_resultserver.ip)
                port = machine_opts.get("resultserver_port", opt_resultserver.port)

                machine.resultserver_ip = ip
                machine.resultserver_port = port

                # Strip parameters.
                for key, value in machine.items():
                    if value and isinstance(value, basestring):
                        machine[key] = value.strip()

                self.db.add_machine(name=machine.id,
                                    label=machine.label,
                                    ip=machine.ip,
                                    platform=machine.platform,
                                    tags=machine.tags,
                                    interface=machine.interface,
                                    snapshot=machine.snapshot,
                                    resultserver_ip=ip,
                                    resultserver_port=port)
            except (AttributeError, CuckooOperationalError) as e:
                log.warning("Configuration details about machine %s "
                            "are missing: %s", machine_id, e)
                continue

    def _initialize_check(self):
        """Runs checks against virtualization software when a machine manager
        is initialized.
        @note: in machine manager modules you may override or superclass
               his method.
        @raise CuckooMachineError: if a misconfiguration or a unkown vm state
                                   is found.
        """
        try:
            configured_vms = self._list()
        except NotImplementedError:
            return

        for machine in self.machines():
            # If this machine is already in the "correct" state, then we
            # go on to the next machine.
            if machine.label in configured_vms and \
                    self._status(machine.label) in [self.POWEROFF, self.ABORTED]:
                continue

            # This machine is currently not in its correct state, we're going
            # to try to shut it down. If that works, then the machine is fine.
            try:
                self.stop(machine.label)
            except CuckooMachineError as e:
                msg = "Please update your configuration. Unable to shut " \
                      "'{0}' down or find the machine in its proper state:" \
                      " {1}".format(machine.label, e)
                raise CuckooCriticalError(msg)

        if not self.options_globals.timeouts.vm_state:
            raise CuckooCriticalError("Virtual machine state change timeout "
                                      "setting not found, please add it to "
                                      "the config file.")

    def machines(self):
        """List virtual machines.
        @return: virtual machines list
        """
        return self.db.list_machines()

    def availables(self):
        """How many machines are free.
        @return: free machines count.
        """
        return self.db.count_machines_available()

    def acquire(self, machine_id=None, platform=None, tags=None):
        """Acquire a machine to start analysis.
        @param machine_id: machine ID.
        @param platform: machine platform.
        @param tags: machine tags
        @return: machine or None.
        """
        if machine_id:
            return self.db.lock_machine(label=machine_id)
        elif platform:
            return self.db.lock_machine(platform=platform, tags=tags)
        else:
            return self.db.lock_machine(tags=tags)

    def release(self, label=None):
        """Release a machine.
        @param label: machine name.
        """
        self.db.unlock_machine(label)

    def running(self):
        """Returns running virtual machines.
        @return: running virtual machines list.
        """
        return self.db.list_machines(locked=True)

    def shutdown(self):
        """Shutdown the machine manager. Kills all alive machines.
        @raise CuckooMachineError: if unable to stop machine.
        """
        if len(self.running()) > 0:
            log.info("Still %s guests alive. Shutting down...",
                     len(self.running()))
            for machine in self.running():
                try:
                    self.stop(machine.label)
                except CuckooMachineError as e:
                    log.warning("Unable to shutdown machine %s, please check "
                                "manually. Error: %s", machine.label, e)

    def set_status(self, label, status):
        """Set status for a virtual machine.
        @param label: virtual machine label
        @param status: new virtual machine status
        """
        self.db.set_machine_status(label, status)

    def start(self, label=None):
        """Start a machine.
        @param label: machine name.
        @raise NotImplementedError: this method is abstract.
        """
        raise NotImplementedError

    def stop(self, label=None):
        """Stop a machine.
        @param label: machine name.
        @raise NotImplementedError: this method is abstract.
        """
        raise NotImplementedError

    def _list(self):
        """Lists virtual machines configured.
        @raise NotImplementedError: this method is abstract.
        """
        raise NotImplementedError

    def dump_memory(self, label, path):
        """Takes a memory dump of a machine.
        @param path: path to where to store the memory dump.
        """
        raise NotImplementedError

    def _wait_status(self, label, state):
        """Waits for a vm status.
        @param label: virtual machine name.
        @param state: virtual machine status, accepts multiple states as list.
        @raise CuckooMachineError: if default waiting timeout expire.
        """
        # This block was originally suggested by Loic Jaquemet.
        waitme = 0
        try:
            current = self._status(label)
        except NameError:
            return

        if isinstance(state, str):
            state = [state]
        while current not in state:
            log.debug("Waiting %i cuckooseconds for machine %s to switch "
                      "to status %s", waitme, label, state)
            if waitme > int(self.options_globals.timeouts.vm_state):
                raise CuckooMachineError("Timeout hit while for machine {0} "
                                         "to change status".format(label))
            time.sleep(1)
            waitme += 1
            current = self._status(label)
Пример #51
0
def submit_url(request):
    if request.method == "POST":
        package = request.POST.get("package", "")
        timeout = min(force_int(request.POST.get("timeout")), 60 * 60 * 24)
        options = request.POST.get("options", "")
        priority = force_int(request.POST.get("priority"))
        machine = request.POST.get("machine", "")
        gateway = request.POST.get("gateway", None)
        clock = request.POST.get("clock", None)
        custom = request.POST.get("custom", "")
        memory = bool(request.POST.get("memory", False))
        enforce_timeout = bool(request.POST.get("enforce_timeout", False))
        status = bool(request.POST.get("user_status", False))

        if not status:
            user_status=0
        else:
            user_status=1

        if request.user.id==None:
            user_id = 1
        else:
            user_id = request.user.id

        tags = request.POST.get("tags", None)

        if request.POST.get("free"):
            if options:
                options += ","
            options += "free=yes"

        if request.POST.get("nohuman"):
            if options:
                options += ","
            options += "nohuman=yes"

        if request.POST.get("tor"):
            if options:
                options += ","
            options += "tor=yes"

        if request.POST.get("process_memory"):
            if options:
                options += ","
            options += "procmemdump=yes"

        if request.POST.get("kernel_analysis"):
            if options:
                options += ","
            options += "kernel_analysis=yes"   

        if gateway and gateway in settings.GATEWAYS:
            if "," in settings.GATEWAYS[gateway]:
                tgateway = random.choice(settings.GATEWAYS[gateway].split(","))
                ngateway = settings.GATEWAYS[tgateway]
            else:
                ngateway = settings.GATEWAYS[gateway]
            if options:
                options += ","
            options += "setgw=%s" % (ngateway)

        db = Database()
        task_ids = []
        task_machines = []

        if machine.lower() == "all":
            for entry in db.list_machines():
                task_machines.append(entry.label)
        else:
            task_machines.append(machine)

        if "url" in request.POST and request.POST.get("url").strip():
            url = request.POST.get("url").strip()
            if not url:
                return render_to_response("error.html",
                                          {"error": "You specified an invalid URL!"},
                                          context_instance=RequestContext(request))

	    provious_analysis = results_db.analysis.find({"target.url": url}).sort([["_id", -1]])
            
            task = []

            for single in provious_analysis:
                #pp.pprint(single)
                single["info"]["base64"] = until.encrpt(single["info"]["id"])
                single["info"]["url"] = single["target"]["url"]
                pp.pprint(single["info"])
                task.append(single["info"])

            second_post = json.dumps({"url":url,"package":package,"timeout":timeout,"options":options,"priority":priority,"custom":custom,"memory":memory,"enforce_timeout":enforce_timeout,"tags":tags,"clock":clock,"user_status":user_status,"user_id":user_id},sort_keys=True)

	    if provious_analysis.count()>=1:
               return render_to_response("submission/ShowSimilarUrl.html",
                                         {"tasks" : task, "params" : second_post},
                                         context_instance=RequestContext(request))                

            url = url.replace("hxxps://", "https://").replace("hxxp://", "http://").replace("[.]", ".")
            for entry in task_machines:
                task_id = db.add_url(url=url,
                                     package=package,
                                     timeout=timeout,
                                     options=options,
                                     priority=priority,
                                     machine=entry,
                                     custom=custom,
                                     memory=memory,
                                     enforce_timeout=enforce_timeout,
                                     tags=tags,
                                     clock=clock,
                                     user_status=user_status,
                                     user_id=user_id)
                if task_id:
                    #pp.pprint(task_id)
                    task_ids.append(until.encrpt(task_id))


        tasks_count = len(task_ids)
        if tasks_count > 0:
            return render_to_response("submission/complete.html",
                                      {"tasks" : task_ids,
                                       "tasks_count" : tasks_count},
                                      context_instance=RequestContext(request))
        else:
            return render_to_response("error.html",
                                      {"error": "Error adding task to Cuckoo's database."},
                                      context_instance=RequestContext(request))
    else:
        enabledconf = dict()
        enabledconf["vt"] = settings.VTDL_ENABLED
        enabledconf["kernel"] = settings.OPT_ZER0M0N
        enabledconf["memory"] = Config("processing").memory.get("enabled")
        enabledconf["procmemory"] = Config("processing").procmemory.get("enabled")
        enabledconf["tor"] = Config("auxiliary").tor.get("enabled")
        if Config("auxiliary").gateways:
            enabledconf["gateways"] = True
        else:
            enabledconf["gateways"] = False
        enabledconf["tags"] = False
        # Get enabled machinery
        machinery = Config("cuckoo").cuckoo.get("machinery")
        # Get VM names for machinery config elements
        vms = [x.strip() for x in getattr(Config(machinery), machinery).get("machines").split(",")]
        # Check each VM config element for tags
        for vmtag in vms:
            if "tags" in getattr(Config(machinery), vmtag).keys():
                enabledconf["tags"] = True

        files = os.listdir(os.path.join(settings.CUCKOO_PATH, "analyzer", "windows", "modules", "packages"))

        packages = []
        for name in files:
            name = os.path.splitext(name)[0]
            if name == "__init__":
                continue

            packages.append(name)

        # Prepare a list of VM names, description label based on tags.
        machines = []
        for machine in Database().list_machines():
            tags = []
            for tag in machine.tags:
                tags.append(tag.name)

            if tags:
                label = machine.label + ": " + ", ".join(tags)
            else:
                label = machine.label

            machines.append((machine.label, label))

        # Prepend ALL/ANY options.
        machines.insert(0, ("", "First available"))
        machines.insert(1, ("all", "All"))

        return render_to_response("submission/submit_url.html",
                                  {"packages": sorted(packages),
                                   "machines": machines,
                                   "gateways": settings.GATEWAYS,
                                   "config": enabledconf},
                                  context_instance=RequestContext(request))
Пример #52
0
 def emit(self, record):
     if hasattr(record, "task_id"):
         db = Database()
         db.add_error(record.msg, int(record.task_id))
Пример #53
0
    def submit_task(
        self,
        target,
        package,
        timeout,
        task_options,
        priority,
        machine,
        platform,
        memory,
        enforce_timeout,
        clock,
        tags,
        parent_id,
        tlp,
    ):

        db = Database()

        if os.path.exists(target):
            task_id = False
            if distributed:
                options = {
                    "package": package,
                    "timeout": timeout,
                    "options": task_options,
                    "priority": priority,
                    # "machine": machine,
                    "platform": platform,
                    "memory": memory,
                    "enforce_timeout": enforce_timeout,
                    "clock": clock,
                    "tags": tags,
                    "parent_id": parent_id,
                }
                multipart_file = [("file", (os.path.basename(target), open(target, "rb")))]
                try:
                    res = requests.post(reporting_conf.submitCAPE.url, files=multipart_file, data=options)
                    if res and res.ok:
                        task_id = res.json()["data"]["task_ids"][0]
                except Exception as e:
                    log.error(e)
            else:
                task_id = db.add_path(
                    file_path=target,
                    package=package,
                    timeout=timeout,
                    options=task_options,
                    priority=priority,  # increase priority to expedite related submission
                    machine=machine,
                    platform=platform,
                    memory=memory,
                    enforce_timeout=enforce_timeout,
                    clock=None,
                    tags=None,
                    parent_id=parent_id,
                    tlp=tlp,
                )
            if task_id:
                log.info('CAPE detection on file "%s": %s - added as CAPE task with ID %s', target, package, task_id)
                return task_id
            else:
                log.warn("Error adding CAPE task to database: %s", package)
        else:
            log.info("File doesn't exist")
Пример #54
0
from sqlalchemy.types import TypeDecorator

Base = declarative_base()


# we need original db to reserve ID in db,
# to store later report, from master or slave
reporting_conf = Config("reporting")

# init
logging.getLogger("elasticsearch").setLevel(logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)

STATUSES = {}
main_db = Database()

dead_count = 5
if reporting_conf.distributed.dead_count:
    dead_count = reporting_conf.distributed.dead_count

INTERVAL = 10

# controller of dead nodes
failed_count = dict()
# status controler count to reset number
status_count = dict()

lock_retriever = threading.Lock()
dist_lock = threading.BoundedSemaphore(int(reporting_conf.distributed.dist_threads))
remove_lock = threading.BoundedSemaphore(20)
Пример #55
0
class AnalysisManager(threading.Thread):
    """Analysis Manager.

    This class handles the full analysis process for a given task. It takes
    care of selecting the analysis machine, preparing the configuration and
    interacting with the guest agent and analyzer components to launch and
    complete the analysis and store, process and report its results.
    """
    def __init__(self, task, error_queue):
        """@param task: task object containing the details for the analysis."""
        threading.Thread.__init__(self)

        self.task = task
        self.errors = error_queue
        self.cfg = Config()
        self.storage = ""
        self.binary = ""
        self.machine = None
        self.db = Database()

        self.task.options = parse_options(self.task.options)

    def init_storage(self):
        """Initialize analysis storage folder."""
        self.storage = os.path.join(CUCKOO_ROOT, "storage", "analyses",
                                    str(self.task.id))

        # If the analysis storage folder already exists, we need to abort the
        # analysis or previous results will be overwritten and lost.
        if os.path.exists(self.storage):
            log.error(
                "Analysis results folder already exists at path \"%s\","
                " analysis aborted", self.storage)
            return False

        # If we're not able to create the analysis storage folder, we have to
        # abort the analysis.
        try:
            create_folder(folder=self.storage)
        except CuckooOperationalError:
            log.error("Unable to create analysis folder %s", self.storage)
            return False

        return True

    def check_file(self):
        """Checks the integrity of the file to be analyzed."""
        sample = self.db.view_sample(self.task.sample_id)

        sha256 = File(self.task.target).get_sha256()
        if sha256 != sample.sha256:
            log.error("Target file has been modified after submission: \"%s\"",
                      self.task.target)
            return False

        return True

    def store_file(self):
        """Store a copy of the file being analyzed."""
        if not os.path.exists(self.task.target):
            log.error(
                "The file to analyze does not exist at path \"%s\", "
                "analysis aborted", self.task.target)
            return False

        sha256 = File(self.task.target).get_sha256()
        self.binary = os.path.join(CUCKOO_ROOT, "storage", "binaries", sha256)

        if os.path.exists(self.binary):
            log.info("File already exists at \"%s\"", self.binary)
        else:
            # TODO: do we really need to abort the analysis in case we are not
            # able to store a copy of the file?
            try:
                shutil.copy(self.task.target, self.binary)
            except (IOError, shutil.Error) as e:
                log.error(
                    "Unable to store file from \"%s\" to \"%s\", "
                    "analysis aborted", self.task.target, self.binary)
                return False

        try:
            new_binary_path = os.path.join(self.storage, "binary")

            if hasattr(os, "symlink"):
                os.symlink(self.binary, new_binary_path)
            else:
                shutil.copy(self.binary, new_binary_path)
        except (AttributeError, OSError) as e:
            log.error(
                "Unable to create symlink/copy from \"%s\" to "
                "\"%s\": %s", self.binary, self.storage, e)

        return True

    def acquire_machine(self):
        """Acquire an analysis machine from the pool of available ones."""
        machine = None

        # Start a loop to acquire the a machine to run the analysis on.
        while True:
            machine_lock.acquire()

            # In some cases it's possible that we enter this loop without
            # having any available machines. We should make sure this is not
            # such case, or the analysis task will fail completely.
            if not machinery.availables():
                machine_lock.release()
                time.sleep(1)
                continue

            # If the user specified a specific machine ID, a platform to be
            # used or machine tags acquire the machine accordingly.
            machine = machinery.acquire(machine_id=self.task.machine,
                                        platform=self.task.platform,
                                        tags=self.task.tags)

            # If no machine is available at this moment, wait for one second
            # and try again.
            if not machine:
                machine_lock.release()
                log.debug("Task #%d: no machine available yet", self.task.id)
                time.sleep(1)
            else:
                log.info("Task #%d: acquired machine %s (label=%s)",
                         self.task.id, machine.name, machine.label)
                break

        self.machine = machine

    def build_options(self):
        """Generate analysis options.
        @return: options dict.
        """
        options = {}

        if self.task.category == "file":
            options["file_name"] = File(self.task.target).get_name()
            options["file_type"] = File(self.task.target).get_type()
            options["pe_exports"] = \
                ",".join(File(self.task.target).get_exported_functions())

            package, activity = File(self.task.target).get_apk_entry()
            self.task.options["apk_entry"] = "%s:%s" % (package, activity)

        options["id"] = self.task.id
        options["ip"] = self.machine.resultserver_ip
        options["port"] = self.machine.resultserver_port
        options["category"] = self.task.category
        options["target"] = self.task.target
        options["package"] = self.task.package
        options["options"] = emit_options(self.task.options)
        options["enforce_timeout"] = self.task.enforce_timeout
        options["clock"] = self.task.clock
        options["terminate_processes"] = self.cfg.cuckoo.terminate_processes

        if not self.task.timeout:
            options["timeout"] = self.cfg.timeouts.default
        else:
            options["timeout"] = self.task.timeout

        # copy in other analyzer specific options, TEMPORARY (most likely)
        vm_options = getattr(machinery.options, self.machine.name)
        for k in vm_options:
            if k.startswith("analyzer_"):
                options[k] = vm_options[k]

        return options

    def route_network(self):
        """Enable network routing if desired."""
        # Determine the desired routing strategy (none, internet, VPN).
        route = self.task.options.get("route", self.cfg.routing.route)

        if route == "none":
            self.interface = None
            self.rt_table = None
        elif route == "internet" and self.cfg.routing.internet != "none":
            self.interface = self.cfg.routing.internet
            self.rt_table = self.cfg.routing.rt_table
        elif route in vpns:
            self.interface = vpns[route].interface
            self.rt_table = vpns[route].rt_table
        else:
            log.warning(
                "Unknown network routing destination specified, "
                "ignoring routing for this analysis: %r", route)
            self.interface = None
            self.rt_table = None

        # Checking if network interface is still available. If VPN process
        # dies for some reason, tunX interface will be no longer available.
        if self.interface and not rooter("nic_available", self.interface):
            log.error(
                "Network interface '%s' configured for this analysis "
                "is not available in the system. Switching to route=none"
                " mode.", self.interface)
            route = "none"
            self.task.options["route"] = "none"
            self.interface = None
            self.rt_table = None

        if self.interface:
            rooter("forward_enable", self.machine.interface, self.interface,
                   self.machine.ip)

        if self.rt_table:
            rooter("srcroute_enable", self.rt_table, self.machine.ip)

        # Propagate the taken route to the database.
        self.db.set_route(self.task.id, route)

    def unroute_network(self):
        if self.interface:
            rooter("forward_disable", self.machine.interface, self.interface,
                   self.machine.ip)

        if self.rt_table:
            rooter("srcroute_disable", self.rt_table, self.machine.ip)

    def wait_finish(self):
        """Some VMs don't have an actual agent. Mainly those that are used as
        assistance for an analysis through the services auxiliary module. This
        method just waits until the analysis is finished rather than actively
        trying to engage with the Cuckoo Agent."""
        self.db.guest_set_status(self.task.id, "running")
        while self.db.guest_get_status(self.task.id) == "running":
            time.sleep(1)

    def guest_manage(self, options):
        # Handle a special case where we're creating a baseline report of this
        # particular virtual machine - a report containing all the results
        # that are gathered if no additional samples are ran in the VM. These
        # results, such as loaded drivers and opened sockets in volatility, or
        # DNS requests to hostnames related to Microsoft Windows, etc may be
        # omitted or at the very least given less priority when creating a
        # report for an analysis that ran on this VM later on.
        if self.task.category == "baseline":
            time.sleep(options["timeout"])
        else:
            # Initialize the guest manager.
            guest = GuestManager(self.machine.name, self.machine.ip,
                                 self.machine.platform, self.task.id)

            # Start the analysis.
            self.db.guest_set_status(self.task.id, "starting")
            monitor = self.task.options.get("monitor", "latest")
            guest.start_analysis(options, monitor)

            # In case the Agent didn't respond and we force-quit the analysis
            # at some point while it was still starting the analysis the state
            # will be "stop" (or anything but "running", really).
            if self.db.guest_get_status(self.task.id) == "starting":
                self.db.guest_set_status(self.task.id, "running")
                guest.wait_for_completion()

            self.db.guest_set_status(self.task.id, "stopping")

    def launch_analysis(self):
        """Start analysis."""
        succeeded = False

        target = self.task.target
        if self.task.category == "file":
            target = os.path.basename(target)

        log.info("Starting analysis of %s \"%s\" (task #%d, options \"%s\")",
                 self.task.category.upper(), target, self.task.id,
                 emit_options(self.task.options))

        # Initialize the analysis folders.
        if not self.init_storage():
            return False

        if self.task.category == "file":
            # Check whether the file has been changed for some unknown reason.
            # And fail this analysis if it has been modified.
            if not self.check_file():
                return False

            # Store a copy of the original file.
            if not self.store_file():
                return False

        # Acquire analysis machine.
        try:
            self.acquire_machine()
        except CuckooOperationalError as e:
            machine_lock.release()
            log.error("Cannot acquire machine: {0}".format(e))
            return False

        # At this point we can tell the ResultServer about it.
        try:
            ResultServer().add_task(self.task, self.machine)
        except Exception as e:
            machinery.release(self.machine.label)
            self.errors.put(e)

        aux = RunAuxiliary(task=self.task, machine=self.machine)
        aux.start()

        # Generate the analysis configuration file.
        options = self.build_options()

        try:
            unlocked = False
            self.interface = None

            # Mark the selected analysis machine in the database as started.
            guest_log = self.db.guest_start(self.task.id, self.machine.name,
                                            self.machine.label,
                                            machinery.__class__.__name__)
            # Start the machine.
            machinery.start(self.machine.label, self.task)

            # Enable network routing.
            self.route_network()

            # By the time start returns it will have fully started the Virtual
            # Machine. We can now safely release the machine lock.
            machine_lock.release()
            unlocked = True

            # Run and manage the components inside the guest unless this
            # machine has the "noagent" option specified (please refer to the
            # wait_finish() function for more details on this function).
            if "noagent" not in self.machine.options:
                self.guest_manage(options)
            else:
                self.wait_finish()

            succeeded = True
        except CuckooMachineError as e:
            if not unlocked:
                machine_lock.release()
            log.error(str(e), extra={"task_id": self.task.id})
            log.critical(
                "A critical error has occurred trying to use the machine "
                "with name %s during an analysis due to which it is no "
                "longer in a working state, please report this issue and all "
                "of the related environment details to the developers so we "
                "can improve this situation. (Note that before we would "
                "simply remove this VM from doing any more analyses, but as "
                "all the VMs will eventually be depleted that way, hopefully "
                "we'll find a better solution now).",
                self.machine.name,
            )
        except CuckooGuestError as e:
            if not unlocked:
                machine_lock.release()
            log.error(str(e), extra={"task_id": self.task.id})
        finally:
            # Stop Auxiliary modules.
            aux.stop()

            # Take a memory dump of the machine before shutting it off.
            if self.cfg.cuckoo.memory_dump or self.task.memory:
                try:
                    dump_path = os.path.join(self.storage, "memory.dmp")
                    machinery.dump_memory(self.machine.label, dump_path)
                except NotImplementedError:
                    log.error("The memory dump functionality is not available "
                              "for the current machine manager.")
                except CuckooMachineError as e:
                    log.error(e)

            try:
                # Stop the analysis machine.
                machinery.stop(self.machine.label)
            except CuckooMachineError as e:
                log.warning("Unable to stop machine %s: %s",
                            self.machine.label, e)

            # Mark the machine in the database as stopped. Unless this machine
            # has been marked as dead, we just keep it as "started" in the
            # database so it'll not be used later on in this session.
            self.db.guest_stop(guest_log)

            # After all this, we can make the ResultServer forget about the
            # internal state for this analysis task.
            ResultServer().del_task(self.task, self.machine)

            # Drop the network routing rules if any.
            self.unroute_network()

            try:
                # Release the analysis machine. But only if the machine has
                # not turned dead yet.
                machinery.release(self.machine.label)
            except CuckooMachineError as e:
                log.error(
                    "Unable to release machine %s, reason %s. "
                    "You might need to restore it manually.",
                    self.machine.label, e)

        return succeeded

    def process_results(self):
        """Process the analysis results and generate the enabled reports."""
        results = RunProcessing(task=self.task.to_dict()).run()
        RunSignatures(results=results).run()
        RunReporting(task=self.task.to_dict(), results=results).run()

        # If the target is a file and the user enabled the option,
        # delete the original copy.
        if self.task.category == "file" and self.cfg.cuckoo.delete_original:
            if not os.path.exists(self.task.target):
                log.warning(
                    "Original file does not exist anymore: \"%s\": "
                    "File not found.", self.task.target)
            else:
                try:
                    os.remove(self.task.target)
                except OSError as e:
                    log.error(
                        "Unable to delete original file at path "
                        "\"%s\": %s", self.task.target, e)

        # If the target is a file and the user enabled the delete copy of
        # the binary option, then delete the copy.
        if self.task.category == "file" and self.cfg.cuckoo.delete_bin_copy:
            if not os.path.exists(self.binary):
                log.warning(
                    "Copy of the original file does not exist anymore: \"%s\": File not found",
                    self.binary)
            else:
                try:
                    os.remove(self.binary)
                except OSError as e:
                    log.error(
                        "Unable to delete the copy of the original file at path \"%s\": %s",
                        self.binary, e)

        log.info("Task #%d: reports generation completed (path=%s)",
                 self.task.id, self.storage)

        return True

    def run(self):
        """Run manager thread."""
        global active_analysis_count
        active_analysis_count += 1
        try:
            self.launch_analysis()

            self.db.set_status(self.task.id, TASK_COMPLETED)

            # If the task is still available in the database, update our task
            # variable with what's in the database, as otherwise we're missing
            # out on the status and completed_on change. This would then in
            # turn thrown an exception in the analysisinfo processing module.
            self.task = self.db.view_task(self.task.id) or self.task

            log.debug("Released database task #%d", self.task.id)

            if self.cfg.cuckoo.process_results:
                self.process_results()
                self.db.set_status(self.task.id, TASK_REPORTED)

            # We make a symbolic link ("latest") which links to the latest
            # analysis - this is useful for debugging purposes. This is only
            # supported under systems that support symbolic links.
            if hasattr(os, "symlink"):
                latest = os.path.join(CUCKOO_ROOT, "storage", "analyses",
                                      "latest")

                # First we have to remove the existing symbolic link, then we
                # have to create the new one.
                # Deal with race conditions using a lock.
                latest_symlink_lock.acquire()
                try:
                    # As per documentation, lexists() returns True for dead
                    # symbolic links.
                    if os.path.lexists(latest):
                        os.remove(latest)

                    os.symlink(self.storage, latest)
                except OSError as e:
                    log.warning("Error pointing latest analysis symlink: %s" %
                                e)
                finally:
                    latest_symlink_lock.release()

            log.info("Task #%d: analysis procedure completed", self.task.id)
        except:
            log.exception("Failure in AnalysisManager.run")

        active_analysis_count -= 1
Пример #56
0
class AnalysisManager(threading.Thread):
    """Analysis Manager.

    This class handles the full analysis process for a given task. It takes
    care of selecting the analysis machine, preparing the configuration and
    interacting with the guest agent and analyzer components to launch and
    complete the analysis and store, process and report its results.
    """
    def __init__(self, task, error_queue):
        """@param task: task object containing the details for the analysis."""
        threading.Thread.__init__(self)

        self.task = task
        self.errors = error_queue
        self.cfg = Config()
        self.storage = ""
        self.binary = ""
        self.machine = None
        self.db = Database()
        self.interface = None
        self.rt_table = None

    def init_storage(self):
        """Initialize analysis storage folder."""
        self.storage = os.path.join(CUCKOO_ROOT, "storage", "analyses",
                                    str(self.task.id))

        # If the analysis storage folder already exists, we need to abort the
        # analysis or previous results will be overwritten and lost.
        if os.path.exists(self.storage):
            log.error(
                "Task #{0}: Analysis results folder already exists at path '{1}', "
                "analysis aborted".format(self.task.id, self.storage))
            return False

        # If we're not able to create the analysis storage folder, we have to
        # abort the analysis.
        try:
            create_folder(folder=self.storage)
        except CuckooOperationalError:
            log.error("Task #{0}: Unable to create analysis folder {1}".format(
                self.task.id, self.storage))
            return False

        return True

    def check_file(self):
        """Checks the integrity of the file to be analyzed."""
        sample = self.db.view_sample(self.task.sample_id)

        sha256 = File(self.task.target).get_sha256()
        if sha256 != sample.sha256:
            log.error(
                "Task #{0}: Target file has been modified after submission: "
                "'{1}'".format(self.task.id, self.task.target))
            return False

        return True

    def store_file(self):
        """Store a copy of the file being analyzed."""
        if not os.path.exists(self.task.target):
            log.error(
                "Task #{0}: The file to analyze does not exist at path '{1}', "
                "analysis aborted".format(self.task.id, self.task.target))
            return False

        sha256 = File(self.task.target).get_sha256()
        self.binary = os.path.join(CUCKOO_ROOT, "storage", "binaries", sha256)

        if os.path.exists(self.binary):
            log.info("Task #{0}: File already exists at '{1}'".format(
                self.task.id, self.binary))
        else:
            # TODO: do we really need to abort the analysis in case we are not
            # able to store a copy of the file?
            try:
                shutil.copy(self.task.target, self.binary)
            except (IOError, shutil.Error) as e:
                log.error(
                    "Task #{0}: Unable to store file from '{1}' to '{2}', "
                    "analysis aborted".format(self.task.id, self.task.target,
                                              self.binary))
                return False

        try:
            new_binary_path = os.path.join(self.storage, "binary")

            if hasattr(os, "symlink"):
                os.symlink(self.binary, new_binary_path)
            else:
                shutil.copy(self.binary, new_binary_path)
        except (AttributeError, OSError) as e:
            log.error("Task #{0}: Unable to create symlink/copy from '{1}' to "
                      "'{2}': {3}".format(self.task.id, self.binary,
                                          self.storage, e))

        return True

    def acquire_machine(self):
        """Acquire an analysis machine from the pool of available ones."""
        machine = None

        # Start a loop to acquire the a machine to run the analysis on.
        while True:
            machine_lock.acquire()

            # In some cases it's possible that we enter this loop without
            # having any available machines. We should make sure this is not
            # such case, or the analysis task will fail completely.
            if not machinery.availables():
                machine_lock.release()
                time.sleep(1)
                continue

            # If the user specified a specific machine ID, a platform to be
            # used or machine tags acquire the machine accordingly.
            machine = machinery.acquire(machine_id=self.task.machine,
                                        platform=self.task.platform,
                                        tags=self.task.tags)

            # If no machine is available at this moment, wait for one second
            # and try again.
            if not machine:
                machine_lock.release()
                log.debug("Task #{0}: no machine available yet".format(
                    self.task.id))
                time.sleep(1)
            else:
                log.info("Task #{0}: acquired machine {1} (label={2})".format(
                    self.task.id, machine.name, machine.label))
                break

        self.machine = machine

    def build_options(self):
        """Generate analysis options.
        @return: options dict.
        """
        options = {}

        options["id"] = self.task.id
        options["ip"] = self.machine.resultserver_ip
        options["port"] = self.machine.resultserver_port
        options["category"] = self.task.category
        options["target"] = self.task.target
        options["package"] = self.task.package

        if self.task.package == "service":
            if "service-dll-of-interest" not in self.task.options:
                if self.task.options == "":
                    self.task.options = "service-dll-of-interest=c:\\windows\\system32\\nwsapagent.dll"
                else:
                    self.task.options += ",service-dll-of-interest=c:\\windows\\system32\\nwsapagent.dll"
        options["options"] = self.task.options

        options["enforce_timeout"] = self.task.enforce_timeout
        options["clock"] = self.task.clock
        options["terminate_processes"] = self.cfg.cuckoo.terminate_processes

        if not self.task.timeout or self.task.timeout == 0:
            options["timeout"] = self.cfg.timeouts.default
        else:
            options["timeout"] = self.task.timeout

        if self.task.category == "file":
            options["file_name"] = File(self.task.target).get_name()
            options["file_type"] = File(self.task.target).get_type()
            # if it's a PE file, collect export information to use in more smartly determining the right
            # package to use
            options["exports"] = ""
            if HAVE_PEFILE and ("PE32" in options["file_type"] or
                                "MS-DOS executable" in options["file_type"]):
                try:
                    pe = pefile.PE(self.task.target)
                    if hasattr(pe, "DIRECTORY_ENTRY_EXPORT"):
                        exports = []
                        for exported_symbol in pe.DIRECTORY_ENTRY_EXPORT.symbols:
                            exports.append(
                                re.sub(r'[^A-Za-z0-9_?@-]', '',
                                       exported_symbol.name))
                        options["exports"] = ",".join(exports)
                except:
                    pass

        return options

    def launch_analysis(self):
        """Start analysis."""
        succeeded = False
        dead_machine = False

        log.info("Task #{0}: Starting analysis of {1} '{2}'".format(
            self.task.id, self.task.category.upper(), self.task.target))

        # Initialize the analysis folders.
        if not self.init_storage():
            return False

        if self.task.category in ["file", "pcap"]:
            # Check whether the file has been changed for some unknown reason.
            # And fail this analysis if it has been modified.
            if not self.check_file():
                return False

            # Store a copy of the original file.
            if not self.store_file():
                return False

        if self.task.category == "pcap":
            # symlink the "binary" to dump.pcap
            if hasattr(os, "symlink"):
                os.symlink(self.binary, os.path.join(self.storage,
                                                     "dump.pcap"))
            else:
                shutil.copy(self.binary, os.path.join(self.storage,
                                                      "dump.pcap"))
            # create the logs/files directories as
            # normally the resultserver would do it
            dirnames = ["logs", "files", "aux"]
            for dirname in dirnames:
                try:
                    os.makedirs(os.path.join(self.storage, dirname))
                except:
                    pass
            return True

        # Acquire analysis machine.
        try:
            self.acquire_machine()
        except CuckooOperationalError as e:
            machine_lock.release()
            log.error("Task #{0}: Cannot acquire machine: {1}".format(
                self.task.id, e))
            return False

        # Generate the analysis configuration file.
        options = self.build_options()

        # At this point we can tell the ResultServer about it.
        try:
            ResultServer().add_task(self.task, self.machine)
        except Exception as e:
            machinery.release(self.machine.label)
            self.errors.put(e)

        aux = RunAuxiliary(task=self.task, machine=self.machine)

        try:
            unlocked = False

            # Mark the selected analysis machine in the database as started.
            guest_log = self.db.guest_start(self.task.id, self.machine.name,
                                            self.machine.label,
                                            machinery.__class__.__name__)
            # Start the machine.
            machinery.start(self.machine.label)
            # Enable network routing.
            self.route_network()

            # By the time start returns it will have fully started the Virtual
            # Machine. We can now safely release the machine lock.
            machine_lock.release()
            unlocked = True

            aux.start()

            # Initialize the guest manager.
            guest = GuestManager(self.machine.name, self.machine.ip,
                                 self.machine.platform)

            options["clock"] = self.db.update_clock(self.task.id)
            # Start the analysis.
            guest.start_analysis(options)

            guest.wait_for_completion()
            succeeded = True
        except CuckooMachineError as e:
            if not unlocked:
                machine_lock.release()
            log.error(str(e), extra={"task_id": self.task.id})
            dead_machine = True
        except CuckooGuestError as e:
            if not unlocked:
                machine_lock.release()
            log.error(str(e), extra={"task_id": self.task.id})
        finally:
            # Stop Auxiliary modules.
            aux.stop()

            # Take a memory dump of the machine before shutting it off.
            if self.cfg.cuckoo.memory_dump or self.task.memory:
                try:
                    dump_path = os.path.join(self.storage, "memory.dmp")
                    machinery.dump_memory(self.machine.label, dump_path)

                except NotImplementedError:
                    log.error("The memory dump functionality is not available "
                              "for the current machine manager.")

                except CuckooMachineError as e:
                    log.error(e)

            try:
                # Stop the analysis machine.
                machinery.stop(self.machine.label)

            except CuckooMachineError as e:
                log.warning(
                    "Task #{0}: Unable to stop machine {1}: {2}".format(
                        self.task.id, self.machine.label, e))

            # Mark the machine in the database as stopped. Unless this machine
            # has been marked as dead, we just keep it as "started" in the
            # database so it'll not be used later on in this session.
            self.db.guest_stop(guest_log)

            # After all this, we can make the ResultServer forget about the
            # internal state for this analysis task.
            ResultServer().del_task(self.task, self.machine)
            # Drop the network routing rules if any.
            self.unroute_network()

            if dead_machine:
                # Remove the guest from the database, so that we can assign a
                # new guest when the task is being analyzed with another
                # machine.
                self.db.guest_remove(guest_log)

                # Remove the analysis directory that has been created so
                # far, as launch_analysis() is going to be doing that again.
                shutil.rmtree(self.storage)

                # This machine has turned dead, so we throw an exception here
                # which informs the AnalysisManager that it should analyze
                # this task again with another available machine.
                raise CuckooDeadMachine()

            try:
                # Release the analysis machine. But only if the machine has
                # not turned dead yet.
                machinery.release(self.machine.label)

            except CuckooMachineError as e:
                log.error("Task #{0}: Unable to release machine {1}, reason "
                          "{2}. You might need to restore it manually.".format(
                              self.task.id, self.machine.label, e))

        return succeeded

    def process_results(self):
        """Process the analysis results and generate the enabled reports."""
        # This is the results container. It's what will be used by all the
        # reporting modules to make it consumable by humans and machines.
        # It will contain all the results generated by every processing
        # module available. Its structure can be observed through the JSON
        # dump in the analysis' reports folder. (If jsondump is enabled.)
        results = {}
        results["statistics"] = {}
        results["statistics"]["processing"] = list()
        results["statistics"]["signatures"] = list()
        results["statistics"]["reporting"] = list()
        GetFeeds(results=results).run()
        RunProcessing(task=self.task.to_dict(), results=results).run()
        RunSignatures(task=self.task.to_dict(), results=results).run()
        RunReporting(task=self.task.to_dict(), results=results).run()

        # If the target is a file and the user enabled the option,
        # delete the original copy.
        if self.task.category == "file" and self.cfg.cuckoo.delete_original:
            if not os.path.exists(self.task.target):
                log.warning("Task #{0}: Original file does not exist anymore: "
                            "'{1}': File not found.".format(
                                self.task.id, self.task.target))
            else:
                try:
                    os.remove(self.task.target)

                except OSError as e:
                    log.error("Task #{0}: Unable to delete original file at "
                              "path '{1}': {2}".format(self.task.id,
                                                       self.task.target, e))

        # If the target is a file and the user enabled the delete copy of
        # the binary option, then delete the copy.
        if self.task.category == "file" and self.cfg.cuckoo.delete_bin_copy:
            if not os.path.exists(self.binary):
                log.warning(
                    "Task #{0}: Copy of the original file does not exist anymore: '{1}': "
                    "File not found".format(self.task.id, self.binary))
            else:
                try:
                    os.remove(self.binary)
                except OSError as e:
                    log.error(
                        "Task #{0}: Unable to delete the copy of the original file at path "
                        "'{1}': {2}".format(self.task.id, self.binary, e))

        log.info("Task #{0}: reports generation completed (path={1})".format(
            self.task.id, self.storage))

        return True

    def run(self):
        """Run manager thread."""
        global active_analysis_count
        active_analysis_count += 1
        try:
            while True:
                try:
                    success = self.launch_analysis()
                except CuckooDeadMachine:
                    continue

                break

            self.db.set_status(self.task.id, TASK_COMPLETED)

            # If the task is still available in the database, update our task
            # variable with what's in the database, as otherwise we're missing
            # out on the status and completed_on change. This would then in
            # turn thrown an exception in the analysisinfo processing module.
            self.task = self.db.view_task(self.task.id) or self.task

            log.debug(
                "Task #{0}: Released database task with status {1}".format(
                    self.task.id, success))

            if self.cfg.cuckoo.process_results:
                self.process_results()
                self.db.set_status(self.task.id, TASK_REPORTED)

            # We make a symbolic link ("latest") which links to the latest
            # analysis - this is useful for debugging purposes. This is only
            # supported under systems that support symbolic links.
            if hasattr(os, "symlink"):
                latest = os.path.join(CUCKOO_ROOT, "storage", "analyses",
                                      "latest")

                # First we have to remove the existing symbolic link, then we
                # have to create the new one.
                # Deal with race conditions using a lock.
                latest_symlink_lock.acquire()
                try:
                    # As per documentation, lexists() returns True for dead
                    # symbolic links.
                    if os.path.lexists(latest):
                        os.remove(latest)

                    os.symlink(self.storage, latest)
                except OSError as e:
                    log.warning(
                        "Task #{0}: Error pointing latest analysis symlink: {1}"
                        .format(self.task.id, e))
                finally:
                    latest_symlink_lock.release()

            log.info("Task #{0}: analysis procedure completed".format(
                self.task.id))
        except Exception as e:
            log.exception(
                "Task #{0}: Failure in AnalysisManager.run: {1}".format(
                    self.task.id, e))

        active_analysis_count -= 1

    def route_network(self):
        """Enable network routing if desired."""
        # Determine the desired routing strategy (none, internet, VPN).
        self.route = "none"
        if self.task.options:
            for option in self.task.options.split(","):
                key, value = option.split("=")
                if key == "route":
                    self.route = value
                    break

        if self.route == "none":
            self.interface = None
            self.rt_table = None
        elif self.route == "inetsim":
            self.interface = self.cfg.routing.inetsim_interface
        elif self.route == "tor":
            self.interface = self.cfg.routing.tor_interface
        elif self.route == "internet" and self.cfg.routing.internet != "none":
            self.interface = self.cfg.routing.internet
            self.rt_table = self.cfg.routing.rt_table
        elif self.route in vpns:
            self.interface = vpns[self.route].interface
            self.rt_table = vpns[self.route].rt_table
        else:
            log.warning(
                "Unknown network routing destination specified, "
                "ignoring routing for this analysis: %r", self.route)
            self.interface = self.cfg.routing.inetsim_interface
            self.rt_table = None
            self.route = "tor"

        # Check if the network interface is still available. If a VPN dies for
        # some reason, its tunX interface will no longer be available.
        if self.interface and not rooter("nic_available", self.interface):
            log.error(
                "The network interface '%s' configured for this analysis is "
                "not available at the moment, switching to route=none mode.",
                self.interface)
            self.route = "tor"
            self.interface = self.cfg.routing.tor_interface
            self.rt_table = None

        if self.route == "inetsim":
            rooter("inetsim_enable", self.machine.ip,
                   self.cfg.routing.inetsim_server,
                   str(self.cfg.resultserver.port))

        if self.route == "tor":
            rooter("tor_enable", self.machine.ip,
                   str(self.cfg.resultserver.port),
                   str(self.cfg.routing.tor_dnsport),
                   str(self.cfg.routing.tor_proxyport))

        if self.route == "none":
            rooter("drop_enable", self.machine.ip,
                   str(self.cfg.resultserver.port))

        if self.interface:
            rooter("forward_enable", self.machine.interface, self.interface,
                   self.machine.ip)

        if self.rt_table:
            rooter("srcroute_enable", self.rt_table, self.machine.ip)

    def unroute_network(self):
        if self.interface:
            rooter("forward_disable", self.machine.interface, self.interface,
                   self.machine.ip)

        if self.rt_table:
            rooter("srcroute_disable", self.rt_table, self.machine.ip)

        if self.route == "inetsim":
            rooter("inetsim_disable", self.machine.ip,
                   self.cfg.routing.inetsim_server,
                   str(self.cfg.resultserver.port))

        if self.route == "tor":
            rooter("tor_disable", self.machine.ip,
                   str(self.cfg.resultserver.port),
                   str(self.cfg.routing.tor_dnsport),
                   str(self.cfg.routing.tor_proxyport))

        if self.route == "none":
            rooter("drop_disable", self.machine.ip,
                   str(self.cfg.resultserver.port))
Пример #57
0
def search(request):
    if "search" not in request.POST:
        return render_to_response("analysis/search.html", {
            "analyses": None,
            "term": None,
            "error": None
        },
                                  context_instance=RequestContext(request))

    search = request.POST["search"].strip()
    if ":" in search:
        term, value = search.split(":", 1)
    else:
        term, value = "", search

    if term:
        # Check on search size.
        if len(value) < 3:
            return render_to_response(
                "analysis/search.html", {
                    "analyses": None,
                    "term": request.POST["search"],
                    "error":
                    "Search term too short, minimum 3 characters required"
                },
                context_instance=RequestContext(request))
        # name:foo or name: foo
        value = value.lstrip()

        # Search logic.
        if term == "name":
            records = results_db.analysis.find({
                "target.file.name": {
                    "$regex": value,
                    "$options": "-i"
                }
            }).sort([["_id", -1]])
        elif term == "type":
            records = results_db.analysis.find({
                "target.file.type": {
                    "$regex": value,
                    "$options": "-i"
                }
            }).sort([["_id", -1]])
        elif term == "string":
            records = results_db.analysis.find({
                "strings": {
                    "$regex": value,
                    "$options": "-1"
                }
            }).sort([["_id", -1]])
        elif term == "ssdeep":
            records = results_db.analysis.find({
                "target.file.ssdeep": {
                    "$regex": value,
                    "$options": "-i"
                }
            }).sort([["_id", -1]])
        elif term == "crc32":
            records = results_db.analysis.find({
                "target.file.crc32": value
            }).sort([["_id", -1]])
        elif term == "file":
            records = results_db.analysis.find({
                "behavior.summary.files": {
                    "$regex": value,
                    "$options": "-i"
                }
            }).sort([["_id", -1]])
        elif term == "key":
            records = results_db.analysis.find({
                "behavior.summary.keys": {
                    "$regex": value,
                    "$options": "-i"
                }
            }).sort([["_id", -1]])
        elif term == "mutex":
            records = results_db.analysis.find({
                "behavior.summary.mutexes": {
                    "$regex": value,
                    "$options": "-i"
                }
            }).sort([["_id", -1]])
        elif term == "domain":
            records = results_db.analysis.find({
                "network.domains.domain": {
                    "$regex": value,
                    "$options": "-i"
                }
            }).sort([["_id", -1]])
        elif term == "ip":
            records = results_db.analysis.find({
                "network.hosts": value
            }).sort([["_id", -1]])
        elif term == "signature":
            records = results_db.analysis.find({
                "signatures.description": {
                    "$regex": value,
                    "$options": "-i"
                }
            }).sort([["_id", -1]])
        elif term == "url":
            records = results_db.analysis.find({
                "target.url": value
            }).sort([["_id", -1]])
        elif term == "imphash":
            records = results_db.analysis.find({
                "static.pe_imphash": value
            }).sort([["_id", -1]])
        else:
            return render_to_response("analysis/search.html", {
                "analyses": None,
                "term": request.POST["search"],
                "error": "Invalid search term: %s" % term
            },
                                      context_instance=RequestContext(request))
    else:
        value = value.lower()

        if re.match(r"^([a-fA-F\d]{32})$", value):
            records = results_db.analysis.find({
                "target.file.md5": value
            }).sort([["_id", -1]])
        elif re.match(r"^([a-fA-F\d]{40})$", value):
            records = results_db.analysis.find({
                "target.file.sha1": value
            }).sort([["_id", -1]])
        elif re.match(r"^([a-fA-F\d]{64})$", value):
            records = results_db.analysis.find({
                "target.file.sha256": value
            }).sort([["_id", -1]])
        elif re.match(r"^([a-fA-F\d]{128})$", value):
            records = results_db.analysis.find({
                "target.file.sha512": value
            }).sort([["_id", -1]])
        else:
            return render_to_response(
                "analysis/search.html", {
                    "analyses": None,
                    "term": None,
                    "error": "Unable to recognize the search syntax"
                },
                context_instance=RequestContext(request))

    # Get data from cuckoo db.
    db = Database()
    analyses = []

    for result in records:
        new = db.view_task(result["info"]["id"])

        if not new:
            continue

        new = new.to_dict()

        if result["info"]["category"] == "file":
            if new["sample_id"]:
                sample = db.view_sample(new["sample_id"])
                if sample:
                    new["sample"] = sample.to_dict()

        analyses.append(new)

    return render_to_response("analysis/search.html", {
        "analyses": analyses,
        "term": request.POST["search"],
        "error": None
    },
                              context_instance=RequestContext(request))
Пример #58
0
    def run(self):
        """Run information gathering.
        @return: information dict.
        """
        self.key = "info"

        db = Database()
        dbtask = db.view_task(self.task["id"], details=True)

        if dbtask:
            task = dbtask.to_dict()
        else:
            # task is gone from the database
            if os.path.isfile(self.taskinfo_path):
                # we've got task.json, so grab info from there
                task = json_decode(open(self.taskinfo_path).read())
            else:
                # we don't have any info on the task :(
                emptytask = Task()
                emptytask.id = self.task["id"]
                task = emptytask.to_dict()

        filepath = os.path.join(CUCKOO_ROOT, ".git", "refs", "heads", "master")

        if os.path.exists(filepath) and os.access(filepath, os.R_OK):
            git_head = open(filepath, "rb").read().strip()
        else:
            git_head = None

        filepath = os.path.join(CUCKOO_ROOT, ".git", "FETCH_HEAD")

        if os.path.exists(filepath) and os.access(filepath, os.R_OK):
            git_fetch_head = open(filepath, "rb").read().strip()

            # Only obtain the hash.
            if git_fetch_head:
                git_fetch_head = git_fetch_head.split()[0]
        else:
            git_fetch_head = None

        monitor = os.path.join(CUCKOO_ROOT, "data", "monitor",
                               task["options"].get("monitor", "latest"))

        if os.path.islink(monitor):
            monitor = os.readlink(monitor)
        elif os.path.isfile(monitor):
            monitor = open(monitor, "rb").read().strip()
        elif os.path.isdir(monitor):
            monitor = os.path.basename(monitor)
        else:
            monitor = None

        return dict(
            version=CUCKOO_VERSION,
            git={
                "head": git_head,
                "fetch_head": git_fetch_head,
            },
            monitor=monitor,
            started=task["started_on"],
            ended=task.get("completed_on", "none"),
            duration=task.get("duration", -1),
            id=int(task["id"]),
            category=task["category"],
            custom=task["custom"],
            owner=task["owner"],
            machine=task["guest"],
            package=task["package"],
            platform=task["platform"],
            options=emit_options(task["options"]),
            route=task["route"],
        )
Пример #59
0
def remove(request, task_id):
    """Remove an analysis.
    @todo: remove folder from storage.
    """
    anals = results_db.analysis.find({"info.id": int(task_id)})

    # Checks if more analysis found with the same ID, like if process.py was run manually.
    if anals.count() > 1:
        message = "Multiple tasks with this ID deleted, thanks for all the fish. (The specified analysis was duplicated in mongo)"
    elif anals.count() == 1:
        message = "Task deleted, thanks for all the fish."

    if anals.count() > 0:
        # Delete dups too.
        for analysis in anals:
            # Delete sample if not used.
            if "file_id" in analysis["target"]:
                if results_db.analysis.find({
                        "target.file_id":
                        ObjectId(analysis["target"]["file_id"])
                }).count() == 1:
                    fs.delete(ObjectId(analysis["target"]["file_id"]))

            # Delete screenshots.
            for shot in analysis["shots"]:
                if results_db.analysis.find({
                        "shots": ObjectId(shot)
                }).count() == 1:
                    fs.delete(ObjectId(shot))

            # Delete network pcap.
            if "pcap_id" in analysis["network"] and results_db.analysis.find({
                    "network.pcap_id":
                    ObjectId(analysis["network"]["pcap_id"])
            }).count() == 1:
                fs.delete(ObjectId(analysis["network"]["pcap_id"]))

            # Delete sorted pcap
            if "sorted_pcap_id" in analysis[
                    "network"] and results_db.analysis.find({
                        "network.sorted_pcap_id":
                        ObjectId(analysis["network"]["sorted_pcap_id"])
                    }).count() == 1:
                fs.delete(ObjectId(analysis["network"]["sorted_pcap_id"]))

            # Delete mitmproxy dump.
            if "mitmproxy_id" in analysis[
                    "network"] and results_db.analysis.find({
                        "network.mitmproxy_id":
                        ObjectId(analysis["network"]["mitmproxy_id"])
                    }).count() == 1:
                fs.delete(ObjectId(analysis["network"]["mitmproxy_id"]))

            # Delete dropped.
            for drop in analysis["dropped"]:
                if "object_id" in drop and results_db.analysis.find({
                        "dropped.object_id":
                        ObjectId(drop["object_id"])
                }).count() == 1:
                    fs.delete(ObjectId(drop["object_id"]))

            # Delete calls.
            for process in analysis.get("behavior", {}).get("processes", []):
                for call in process["calls"]:
                    results_db.calls.remove({"_id": ObjectId(call)})

            # Delete analysis data.
            results_db.analysis.remove({"_id": ObjectId(analysis["_id"])})
    else:
        return render_to_response(
            "error.html", {"error": "The specified analysis does not exist"},
            context_instance=RequestContext(request))

    # Delete from SQL db.
    db = Database()
    db.delete_task(task_id)

    return render_to_response("success.html", {"message": message},
                              context_instance=RequestContext(request))
Пример #60
0
def import_analysis(request):
    if request.method == "GET":
        return render(request, "analysis/import.html")

    db = Database()
    task_ids = []
    analyses = request.FILES.getlist("sample")

    for analysis in analyses:
        if not analysis.size:
            return render(request, "error.html", {
                "error": "You uploaded an empty analysis.",
            })

        # if analysis.size > settings.MAX_UPLOAD_SIZE:
            # return render(request, "error.html", {
            #     "error": "You uploaded a file that exceeds that maximum allowed upload size.",
            # })

        if not analysis.name.endswith(".zip"):
            return render(request, "error.html", {
                "error": "You uploaded an analysis that wasn't a .zip.",
            })

        zf = zipfile.ZipFile(analysis)

        # As per Python documentation we have to make sure there are no
        # incorrect filenames.
        for filename in zf.namelist():
            if filename.startswith("/") or ".." in filename or ":" in filename:
                return render(request, "error.html", {
                    "error": "The zip file contains incorrect filenames, "
                             "please provide a legitimate .zip file.",
                })

        analysis_info = json.loads(zf.read("analysis.json"))
        category = analysis_info["target"]["category"]

        if category == "file":
            binary = store_temp_file(zf.read("binary"), "binary")

            if os.path.isfile(binary):
                task_id = db.add_path(file_path=binary,
                                      package="",
                                      timeout=0,
                                      options="",
                                      priority=0,
                                      machine="",
                                      custom="",
                                      memory=False,
                                      enforce_timeout=False,
                                      tags=None)
                if task_id:
                    task_ids.append(task_id)

        elif category == "url":
            url = analysis_info["target"]["url"]
            if not url:
                return render(request, "error.html", {
                    "error": "You specified an invalid URL!",
                })

            task_id = db.add_url(url=url,
                                 package="",
                                 timeout=0,
                                 options="",
                                 priority=0,
                                 machine="",
                                 custom="",
                                 memory=False,
                                 enforce_timeout=False,
                                 tags=None)
            if task_id:
                task_ids.append(task_id)

        if not task_id:
            continue

        # Extract all of the files related to this analysis. This probably
        # requires some hacks depending on the user/group the Web
        # Interface is running under.
        analysis_path = os.path.join(
            CUCKOO_ROOT, "storage", "analyses", "%d" % task_id
        )

        if not os.path.exists(analysis_path):
            os.mkdir(analysis_path)

        zf.extractall(analysis_path)

        # We set this analysis as completed so that it will be processed
        # automatically (assuming process.py / process2.py is running).
        db.set_status(task_id, TASK_COMPLETED)

    if task_ids:
        return render(request, "submission/complete.html", {
            "tasks": task_ids,
            "baseurl": request.build_absolute_uri("/")[:-1],
        })