Example #1
0
def _generate_test_job_with_file(params, filename):
    test_file = f"{settings.PROJECT_LOCATION}/test_files/{filename}"
    with open(test_file, "rb") as f:
        django_file = File(f)
        params["file"] = django_file
        params["md5"] = hashlib.md5(django_file.file.read()).hexdigest()
        test_job = Job(**params)
        test_job.save()
    return test_job
Example #2
0
def _generate_test_job_with_file(params, filename):
    test_file = "{}/test_files/{}".format(settings.PROJECT_LOCATION, filename)
    with open(test_file, "rb") as f:
        django_file = File(f)
        params['file'] = django_file
        params['md5'] = hashlib.md5(django_file.file.read()).hexdigest()
        test_job = Job(**params)
        test_job.save()
    return test_job
Example #3
0
 def setUp(self):
     params = self.get_params()
     params["md5"] = hashlib.md5(
         params["observable_name"].encode("utf-8")).hexdigest()
     test_job = Job(**params)
     test_job.save()
     self.job_id = test_job.id
     self.observable_name = test_job.observable_name
     self.observable_classification = test_job.observable_classification
Example #4
0
 def setUp(self):
     super().setUp()
     # get params
     params = self.get_params()
     # save job instance
     self.test_job = Job(**params)
     # overwrite if set in env var
     if len(self.analyzers_to_test):
         self.test_job.analyzers_to_execute = self.analyzers_to_test
     self._read_file_save_job(filename=params["file_name"])
Example #5
0
class _FileAnalyzersScriptsTestCase(_AbstractAnalyzersScriptTestCase):

    # define runtime configs
    runtime_configuration = {
        "VirusTotal_v2_Scan_File": {"wait_for_scan_anyway": True, "max_tries": 1},
        "VirusTotal_v3_Scan_File": {"max_tries": 1, "poll_distance": 1},
        "VirusTotal_v3_Get_File": {"max_tries": 1, "poll_distance": 1},
        "VirusTotal_v3_Get_File_And_Scan": {
            "max_tries": 1,
            "poll_distance": 1,
            "force_active_scan": True,
            "force_active_scan_if_old": False,
        },
        "Cuckoo_Scan": {"max_poll_tries": 1, "max_post_tries": 1},
        "PEframe_Scan": {"max_tries": 1},
        "MWDB_Scan": {
            "upload_file": True,
            "max_tries": 1,
        },
        "Doc_Info_Experimental": {
            "additional_passwords_to_check": ["testpassword"],
            "experimental": True,
        },
    }

    @classmethod
    def get_params(cls):
        return {
            **super().get_params(),
            "is_sample": True,
        }

    def setUp(self):
        super().setUp()
        # get params
        params = self.get_params()
        # save job instance
        self.test_job = Job(**params)
        # overwrite if set in env var
        if len(self.analyzers_to_test):
            self.test_job.analyzers_to_execute = self.analyzers_to_test
        self._read_file_save_job(filename=params["file_name"])

    def _read_file_save_job(self, filename: str):
        test_file = f"{settings.PROJECT_LOCATION}/test_files/{filename}"
        with open(test_file, "rb") as f:
            self.test_job.file = File(f)
            self.test_job.md5 = hashlib.md5(f.read()).hexdigest()
            self.test_job.save()
Example #6
0
def get_observable_data(job_id):
    job_object = Job.object_by_job_id(job_id)

    observable_name = job_object.observable_name
    observable_classification = job_object.observable_classification

    return observable_name, observable_classification
Example #7
0
def get_filepath_filename(job_id):
    # this function allows to minimize access to the database
    # in this way the analyzers could not touch the DB until the end of the analysis
    job_object = Job.object_by_job_id(job_id)
    filename = job_object.file_name
    file_path = job_object.file.path
    return file_path, filename
Example #8
0
 def setUp(self):
     params = {
         "source": "test",
         "is_sample": False,
         "observable_name": os.environ.get("TEST_MD5", ""),
         "observable_classification": "hash",
         "force_privacy": False,
         "analyzers_requested": ["test"]
     }
     params["md5"] = hashlib.md5(
         params['observable_name'].encode('utf-8')).hexdigest()
     test_job = Job(**params)
     test_job.save()
     self.job_id = test_job.id
     self.observable_name = test_job.observable_name
     self.observable_classification = test_job.observable_classification
Example #9
0
 def setUp(self):
     params = {
         "source": "test",
         "is_sample": False,
         "observable_name": os.environ.get("TEST_DOMAIN", "www.google.com"),
         "observable_classification": "domain",
         "force_privacy": False,
         "analyzers_requested": ["test"],
     }
     params["md5"] = hashlib.md5(
         params["observable_name"].encode("utf-8")).hexdigest()
     test_job = Job(**params)
     test_job.save()
     self.job_id = test_job.id
     self.observable_name = test_job.observable_name
     self.observable_classification = test_job.observable_classification
Example #10
0
def set_job_status(job_id, status, errors=None):
    message = f"setting job_id {job_id} to status {status}"
    if status == "failed":
        logger.error(message)
    else:
        logger.info(message)
    job_object = Job.object_by_job_id(job_id)
    if errors:
        job_object.errors.extend(errors)
    job_object.status = status
    job_object.save()
Example #11
0
class _ObservableAnalyzersScriptsTestCase(_AbstractAnalyzersScriptTestCase):

    # define runtime configs
    runtime_configuration = {
        "Triage_Search": {
            "max_tries": 1,
        },
        "VirusTotal_v3_Get_Observable": {
            "max_tries": 1,
            "poll_distance": 1,
        },
    }

    @classmethod
    def get_params(cls):
        return {
            **super().get_params(),
            "is_sample": False,
        }

    def setUp(self):
        super().setUp()
        # init job instance
        params = self.get_params()
        params["md5"] = hashlib.md5(
            params["observable_name"].encode("utf-8")
        ).hexdigest()
        self.test_job = Job(**params)
        # overwrite if not set in env var
        if len(self.analyzers_to_test):
            self.test_job.analyzers_to_execute = self.analyzers_to_test
        else:
            self.test_job.analyzers_to_execute = [
                config.name
                for config in self.analyzer_configs.values()
                if config.is_observable_type_supported(
                    params["observable_classification"]
                )
            ]
        # save job
        self.test_job.save()
Example #12
0
 def setUp(self):
     params = {
         "source":
         "test",
         "is_sample":
         False,
         "observable_name":
         os.environ.get("TEST_MD5", "446c5fbb11b9ce058450555c1c27153c"),
         "observable_classification":
         "hash",
         "force_privacy":
         False,
         "analyzers_requested": ["test"],
     }
     params["md5"] = hashlib.md5(
         params["observable_name"].encode("utf-8")).hexdigest()
     test_job = Job(**params)
     test_job.save()
     self.job_id = test_job.id
     self.observable_name = test_job.observable_name
     self.observable_classification = test_job.observable_classification
Example #13
0
 def setUp(self):
     super().setUp()
     # init job instance
     params = self.get_params()
     params["md5"] = hashlib.md5(
         params["observable_name"].encode("utf-8")
     ).hexdigest()
     self.test_job = Job(**params)
     # overwrite if not set in env var
     if len(self.analyzers_to_test):
         self.test_job.analyzers_to_execute = self.analyzers_to_test
     else:
         self.test_job.analyzers_to_execute = [
             config.name
             for config in self.analyzer_configs.values()
             if config.is_observable_type_supported(
                 params["observable_classification"]
             )
         ]
     # save job
     self.test_job.save()
Example #14
0
def set_report_and_cleanup(job_id, report):
    analyzer_name = report.get("name", "")
    job_repr = f"({analyzer_name}, job_id: #{job_id})"
    logger.info(f"STARTING set_report_and_cleanup for <-- {job_repr}.")
    job_object = None

    try:
        with transaction.atomic():
            job_object = Job.object_by_job_id(job_id, transaction=True)
            job_object.analysis_reports.append(report)
            job_object.save(update_fields=["analysis_reports"])
            if job_object.status == "failed":
                raise AlreadyFailedJobException()

        num_analysis_reports = len(job_object.analysis_reports)
        num_analyzers_to_execute = len(job_object.analyzers_to_execute)
        logger.info(
            f"REPORT: num analysis reports:{num_analysis_reports}, "
            f"num analyzer to execute:{num_analyzers_to_execute}"
            f" <-- {job_repr}."
        )

        # check if it was the last analysis...
        # ..In case, set the analysis as "reported" or "failed"
        if num_analysis_reports == num_analyzers_to_execute:
            status_to_set = "reported_without_fails"
            # set status "failed" in case all analyzers failed
            failed_analyzers = 0
            for analysis_report in job_object.analysis_reports:
                if not analysis_report.get("success", False):
                    failed_analyzers += 1
            if failed_analyzers == num_analysis_reports:
                status_to_set = "failed"
            elif failed_analyzers >= 1:
                status_to_set = "reported_with_fails"
            set_job_status(job_id, status_to_set)
            job_object.finished_analysis_time = get_now()
            job_object.save(update_fields=["finished_analysis_time"])

    except AlreadyFailedJobException:
        logger.error(
            f"job_id {job_id} status failed. Do not process the report {report}"
        )

    except Exception as e:
        logger.exception(f"job_id: {job_id}, Error: {e}")
        set_job_status(job_id, "failed", errors=[str(e)])
        job_object.finished_analysis_time = get_now()
        job_object.save(update_fields=["finished_analysis_time"])