コード例 #1
0
    def test_mhtml_analysis(self):

        root = create_root_analysis(analysis_mode='test_groups')
        root.initialize_storage()
        shutil.copy(os.path.join('test_data', 'mhtml', 'Invoice_PDF.mht'), root.storage_dir)
        file_observable = root.add_observable(F_FILE, 'Invoice_PDF.mht')
        root.save()
        root.schedule()
    
        engine = TestEngine(pool_size_limit=1)
        engine.enable_alerting()
        engine.enable_module('analysis_module_mhtml', 'test_groups')
        engine.controlled_stop()
        engine.start()
        engine.wait()

        root = RootAnalysis(storage_dir=root.storage_dir)
        root.load()
        file_observable = root.get_observable(file_observable.id)
        self.assertIsNotNone(file_observable)

        from saq.modules.file_analysis import MHTMLAnalysis
        analysis = file_observable.get_analysis(MHTMLAnalysis)
        self.assertIsNotNone(analysis)
        # should have extracted a single file
        self.assertEquals(len(analysis.details), 1)
        self.assertEquals(len(analysis.get_observables_by_type(F_FILE)), 1)
コード例 #2
0
def get_file(uuid, file_uuid_or_name):
    storage_dir = storage_dir_from_uuid(uuid)
    if saq.CONFIG['service_engine']['work_dir'] and not os.path.isdir(storage_dir):
        storage_dir = workload_storage_dir(uuid)

    root = RootAnalysis(storage_dir=storage_dir)
    root.load()

    # is this a UUID?
    try:
        validate_uuid(file_uuid_or_name)
        file_observable = root.get_observable(file_uuid_or_name)
        if file_observable is None:
            abort(Response("invalid file_uuid {}".format(file_uuid_or_name), 400))

    except ValueError:
        file_observable = root.find_observable(lambda o: o.type == F_FILE and o.value == file_uuid_or_name)
        if file_observable is None:
            abort(Response("invalid file name {}".format(file_uuid_or_name), 400))
        

    # NOTE we use an absolute path here because if we don't then
    # send_from_directory makes it relavive from the app root path
    # which is (/opt/ace/aceapi)

    target_path = os.path.join(saq.SAQ_HOME, root.storage_dir, file_observable.value)
    if not os.path.exists(target_path):
        abort(Response("file path {} does not exist".format(target_path), 400))

    # XXX revisit how we save (name) files
    return send_from_directory(os.path.dirname(target_path), 
                               os.path.basename(target_path), 
                               as_attachment=True,
                               attachment_filename=os.path.basename(target_path).encode().decode('latin-1', errors='ignore'))
コード例 #3
0
    def test_intel_analysis(self):
        if not saq.CONFIG['sip'].getboolean('enabled'):
            return

        root = create_root_analysis(analysis_mode=ANALYSIS_MODE_CORRELATION)
        root.initialize_storage()
        i = root.add_observable(F_INDICATOR,
                                'sip:{}'.format(self.test_indicator_id))
        self.assertIsNotNone(i)
        root.save()
        root.schedule()

        engine = TestEngine(local_analysis_modes=[ANALYSIS_MODE_CORRELATION])
        engine.enable_module('analysis_module_intel_analyzer',
                             ANALYSIS_MODE_CORRELATION)
        engine.controlled_stop()
        engine.start()
        engine.wait()

        root = RootAnalysis(storage_dir=root.storage_dir)
        root.load()
        i = root.get_observable(i.id)
        self.assertIsNotNone(i)

        from saq.modules.intel import IntelAnalysis
        analysis = i.get_analysis(IntelAnalysis)
        self.assertIsNotNone(analysis)

        # what we get here should be the same as what we got when we inserted it
        self.assertEquals(analysis.details, self.test_indicator)
コード例 #4
0
    def test_submit_alert(self):

        # disable cleaup for analysis mode analysis
        saq.CONFIG['analysis_mode_analysis']['cleanup'] = 'no'

        self.start_api_server()

        root = create_root_analysis(analysis_mode=ANALYSIS_MODE_ANALYSIS)
        root.initialize_storage()
        url = root.add_observable(F_URL, TEST_URL)
        url.add_directive(DIRECTIVE_CRAWL)
        root.save()
        root.schedule()

        engine = TestEngine(analysis_pools={
            ANALYSIS_MODE_ANALYSIS: 1,
            ANALYSIS_MODE_CLOUDPHISH: 1
        },
                            local_analysis_modes=[
                                ANALYSIS_MODE_ANALYSIS,
                                ANALYSIS_MODE_CLOUDPHISH
                            ])

        engine.enable_module('analysis_module_cloudphish',
                             ANALYSIS_MODE_ANALYSIS)
        engine.enable_module('analysis_module_cloudphish_request_analyzer',
                             ANALYSIS_MODE_CLOUDPHISH)
        engine.enable_module('analysis_module_crawlphish',
                             ANALYSIS_MODE_CLOUDPHISH)
        engine.enable_module('analysis_module_forced_detection',
                             ANALYSIS_MODE_CLOUDPHISH)
        engine.enable_module('analysis_module_detection',
                             ANALYSIS_MODE_CLOUDPHISH)

        engine.start()

        # should see cloudphish module complete
        wait_for_log_count('analysis CloudphishAnalysis is completed', 1, 10)

        engine.controlled_stop()
        engine.wait()

        # check the results
        root = RootAnalysis(storage_dir=storage_dir_from_uuid(root.uuid))
        root.load()
        url = root.get_observable(url.id)
        self.assertIsNotNone(url)

        # this url should now have 3 analysis objects attached to it (cloudphish, crawlphish and forced detection)
        self.assertEquals(len(url.analysis), 3)

        from saq.modules.cloudphish import CloudphishAnalysis
        cloudphish_analysis = url.get_analysis(CloudphishAnalysis)
        self.assertIsNotNone(cloudphish_analysis)
        self.assertEquals(cloudphish_analysis.analysis_result,
                          SCAN_RESULT_ALERT)

        from saq.modules.url import CrawlphishAnalysisV2
        crawlphish_analysis = url.get_analysis(CrawlphishAnalysisV2)
        self.assertIsNotNone(crawlphish_analysis)
コード例 #5
0
ファイル: test_cloudphish.py プロジェクト: krayzpipes/ACE-1
    def test_submit_timeout_with_alert(self, db, c):

        # any cloudphish submission we make can turn into an alert
        # here we test a cloudphish submission that quickly times out
        # followed by cloudphish alerting on the submission

        # set the timeouts really low
        saq.CONFIG['analysis_module_cloudphish']['frequency'] = '1'
        saq.CONFIG['analysis_module_cloudphish']['query_timeout'] = '1'

        # disable cleaup for analysis mode analysis
        saq.CONFIG['analysis_mode_analysis']['cleanup'] = 'no'
        
        self.start_api_server()

        root = create_root_analysis(analysis_mode=ANALYSIS_MODE_ANALYSIS)
        root.initialize_storage()
        url = root.add_observable(F_URL, TEST_URL)
        url.add_directive(DIRECTIVE_CRAWL)
        root.save()
        root.schedule()

        engine = TestEngine(analysis_pools={},
                            local_analysis_modes=[ANALYSIS_MODE_ANALYSIS,
                                                  ANALYSIS_MODE_CLOUDPHISH,
                                                  ANALYSIS_MODE_CORRELATION])

        engine.enable_alerting()
        engine.enable_module('analysis_module_cloudphish', ANALYSIS_MODE_ANALYSIS)
        engine.enable_module('analysis_module_cloudphish_request_analyzer', ANALYSIS_MODE_CLOUDPHISH)
        engine.enable_module('analysis_module_crawlphish', ANALYSIS_MODE_CLOUDPHISH)
        engine.enable_module('analysis_module_forced_detection', ANALYSIS_MODE_CLOUDPHISH)
        engine.enable_module('analysis_module_cloudphish_delayed_test', ANALYSIS_MODE_CLOUDPHISH)

        engine.start()

        # watch for the original analysis to time out
        wait_for_log_count('has timed out', 1, 10)

        # we should see cloudphish eventually complete and alert though
        engine.controlled_stop()
        engine.wait()

        # check the results
        root = RootAnalysis(storage_dir=root.storage_dir)
        root.load()
        url = root.get_observable(url.id)
        self.assertIsNotNone(url)

        # should see an error here
        from saq.modules.cloudphish import CloudphishAnalysis
        cloudphish_analysis = url.get_analysis(CloudphishAnalysis)
        self.assertIsNotNone(cloudphish_analysis)
        self.assertEquals(cloudphish_analysis.result, SCAN_RESULT_ERROR)

        # however we should have an alert generated
        c.execute("SELECT COUNT(*) FROM alerts")
        self.assertEquals(c.fetchone()[0], 1)
コード例 #6
0
ファイル: test_hal9000.py プロジェクト: krayzpipes/ACE-1
    def test_hal9000_alert_no_disposition(self, db, c):

        # same as above except we end up alerting

        root = create_root_analysis(analysis_mode=ANALYSIS_MODE_ANALYSIS)
        root.initialize_storage()
        test_observable = root.add_observable(F_TEST, 'test')
        root.save()
        root.schedule()
    
        engine = TestEngine(local_analysis_modes=[ ANALYSIS_MODE_ANALYSIS, ANALYSIS_MODE_CORRELATION ])
        engine.enable_alerting()
        engine.set_cleanup(ANALYSIS_MODE_ANALYSIS, False)
        engine.enable_module('analysis_module_forced_detection', ANALYSIS_MODE_ANALYSIS)
        engine.enable_module('analysis_module_hal9000', [ ANALYSIS_MODE_ANALYSIS, ANALYSIS_MODE_CORRELATION ])
        engine.controlled_stop()
        engine.start()
        engine.wait()

        root = RootAnalysis(storage_dir=storage_dir_from_uuid(root.uuid))
        root.load()

        # make sure we alerted
        self.assertEquals(root.analysis_mode, ANALYSIS_MODE_CORRELATION)

        test_observable = root.get_observable(test_observable.id)
        self.assertIsNotNone(test_observable)
        analysis = test_observable.get_analysis(HAL9000Analysis)
        self.assertIsNotNone(analysis)

        # total count and mal count should both be 0
        self.assertEquals(analysis.total_count, 0)
        self.assertEquals(analysis.mal_count, 0)
        
        # we should have a single entry in the database for this observable
        hal9000_id = _compute_hal9000_md5(test_observable)
        
        # since we have NOT set a disposition yet we should have nothing in the database about it
        c.execute("SELECT total_count, mal_count FROM observables WHERE id = UNHEX(%s)", (hal9000_id,))
        result = c.fetchone()
        self.assertIsNone(result)

        # verify the correct state is kept
        state = root.state['hal9000']
        
        self.assertTrue(STATE_KEY_ID_TRACKING in state)
        tracking = state[STATE_KEY_ID_TRACKING]
        self.assertTrue(hal9000_id in tracking)
        tracking_info = tracking[hal9000_id]
        self.assertTrue('id' in tracking_info)
        self.assertEquals(tracking_info['id'], test_observable.id)
        self.assertTrue(KEY_TOTAL_COUNT in tracking_info)
        self.assertTrue(KEY_MAL_COUNT in tracking_info)
        
        # we have not made any changes yet either
        self.assertIsNone(tracking_info[KEY_TOTAL_COUNT])
        self.assertIsNone(tracking_info[KEY_MAL_COUNT])
コード例 #7
0
ファイル: test_cloudphish.py プロジェクト: krayzpipes/ACE-1
    def test_submit_double_alert(self, db, c):

        # in this scenario we alert both with the original submission
        # and with the cloudphish submission

        self.start_api_server()

        root = create_root_analysis(analysis_mode=ANALYSIS_MODE_ANALYSIS)
        root.initialize_storage()
        url = root.add_observable(F_URL, TEST_URL)
        url.add_directive(DIRECTIVE_CRAWL)
        root.save()
        root.schedule()

        engine = TestEngine(local_analysis_modes=[ANALYSIS_MODE_ANALYSIS,
                                                  ANALYSIS_MODE_CLOUDPHISH,
                                                  ANALYSIS_MODE_CORRELATION])

        engine.enable_alerting()
        engine.enable_module('analysis_module_cloudphish', ANALYSIS_MODE_ANALYSIS)
        engine.enable_module('analysis_module_cloudphish_request_analyzer', ANALYSIS_MODE_CLOUDPHISH)
        engine.enable_module('analysis_module_crawlphish', ANALYSIS_MODE_CLOUDPHISH)
        engine.enable_module('analysis_module_forced_detection', ANALYSIS_MODE_CLOUDPHISH)

        engine.start()

        # should see cloudphish module complete
        wait_for_log_count('analysis CloudphishAnalysis is completed', 1, 10)

        engine.controlled_stop()
        engine.wait()

        # check the results
        root = RootAnalysis(storage_dir=storage_dir_from_uuid(root.uuid))
        root.load()
        url = root.get_observable(url.id)
        self.assertIsNotNone(url)

        # this url should now have 3 analysis objects attached to it (cloudphish, crawlphish and forced detection)
        self.assertEquals(len(url.analysis), 3)

        from saq.modules.cloudphish import CloudphishAnalysis
        cloudphish_analysis = url.get_analysis(CloudphishAnalysis)
        self.assertIsNotNone(cloudphish_analysis)
        self.assertEquals(cloudphish_analysis.analysis_result, SCAN_RESULT_ALERT)

        from saq.modules.url import CrawlphishAnalysisV2
        crawlphish_analysis = url.get_analysis(CrawlphishAnalysisV2)
        self.assertIsNotNone(crawlphish_analysis)

        # there should be two alerts generated in the database
        c.execute("SELECT COUNT(*) FROM alerts")
        self.assertEquals(c.fetchone()[0], 2)

        # the cloudphish alert should have a reference back to the original alert
        self.assertEquals(cloudphish_analysis.context['c'], root.uuid)
コード例 #8
0
    def test_hal9000_no_alert(self, db, c):

        root = create_root_analysis(analysis_mode=ANALYSIS_MODE_ANALYSIS)
        root.initialize_storage()
        test_observable = root.add_observable(F_TEST, 'test')
        root.save()
        root.schedule()

        engine = TestEngine(local_analysis_modes=[ANALYSIS_MODE_ANALYSIS])
        engine.set_cleanup(ANALYSIS_MODE_ANALYSIS, False)
        engine.enable_module('analysis_module_hal9000', ANALYSIS_MODE_ANALYSIS)
        engine.controlled_stop()
        engine.start()
        engine.wait()

        root = RootAnalysis(storage_dir=root.storage_dir)
        root.load()

        # make sure we did NOT alert
        self.assertEquals(root.analysis_mode, ANALYSIS_MODE_ANALYSIS)

        test_observable = root.get_observable(test_observable.id)
        self.assertIsNotNone(test_observable)
        analysis = test_observable.get_analysis(HAL9000Analysis)
        self.assertIsNotNone(analysis)

        # total count and mal count should both be 0
        self.assertEquals(analysis.total_count, 0)
        self.assertEquals(analysis.mal_count, 0)

        # we should have a single entry in the database for this observable
        hal9000_id = _compute_hal9000_md5(test_observable)

        c.execute(
            "SELECT total_count, mal_count FROM observables WHERE id = UNHEX(%s)",
            (hal9000_id, ))
        result = c.fetchone()
        self.assertIsNotNone(result)
        self.assertEquals(result[0], 1)
        self.assertEquals(result[1], 0)

        # verify the correct state is kept
        state = root.state['hal9000']

        self.assertTrue(STATE_KEY_ID_TRACKING in state)
        tracking = state[STATE_KEY_ID_TRACKING]
        self.assertTrue(hal9000_id in tracking)
        tracking_info = tracking[hal9000_id]
        self.assertTrue('id' in tracking_info)
        self.assertEquals(tracking_info['id'], test_observable.id)
        self.assertTrue(KEY_TOTAL_COUNT in tracking_info)
        self.assertTrue(KEY_MAL_COUNT in tracking_info)

        # since this doesn't become an alert we don't bother tracking the changes
        self.assertIsNone(tracking_info[KEY_TOTAL_COUNT])
        self.assertIsNone(tracking_info[KEY_MAL_COUNT])
コード例 #9
0
    def test_download(self):

        # first create something to download
        root = create_root_analysis(uuid=str(uuid.uuid4()))
        root.initialize_storage()
        root.details = {'hello': 'world'}
        with open(os.path.join(root.storage_dir, 'test.dat'), 'w') as fp:
            fp.write('test')
        file_observable = root.add_observable(F_FILE, 'test.dat')
        root.save()

        # ask for a download
        result = self.client.get(url_for('engine.download', uuid=root.uuid))

        # we should get back a tar file
        tar_path = os.path.join(saq.TEMP_DIR, 'download.tar')
        output_dir = os.path.join(saq.TEMP_DIR, 'download')

        try:
            with open(tar_path, 'wb') as fp:
                for chunk in result.response:
                    fp.write(chunk)

            with tarfile.open(name=tar_path, mode='r|') as tar:
                tar.extractall(path=output_dir)

            root = RootAnalysis(storage_dir=output_dir)
            root.load()

            self.assertTrue('hello' in root.details)
            self.assertEquals('world', root.details['hello'])

            file_observable = root.get_observable(file_observable.id)
            self.assertTrue(
                os.path.exists(
                    os.path.join(root.storage_dir, file_observable.value)))
            with open(os.path.join(root.storage_dir, file_observable.value),
                      'r') as fp:
                self.assertEquals(fp.read(), 'test')

        finally:
            try:
                os.remove(tar_path)
            except:
                pass

            try:
                shutil.rmtree(output_dir)
            except:
                pass
コード例 #10
0
ファイル: test_cloudphish.py プロジェクト: krayzpipes/ACE-1
    def test_submit_forced_download(self):
        # disable cleaup for analysis mode analysis
        saq.CONFIG['analysis_mode_analysis']['cleanup'] = 'no'

        self.start_api_server()

        root = create_root_analysis(analysis_mode=ANALYSIS_MODE_ANALYSIS)
        root.initialize_storage()
        url = root.add_observable(F_URL, TEST_URL)
        url.add_directive(DIRECTIVE_CRAWL)
        url.add_directive(DIRECTIVE_FORCE_DOWNLOAD)
        root.save()
        root.schedule()

        engine = TestEngine(analysis_pools={ANALYSIS_MODE_ANALYSIS: 1,
                                            ANALYSIS_MODE_CLOUDPHISH: 1}, 
                            local_analysis_modes=[ANALYSIS_MODE_ANALYSIS,
                                                  ANALYSIS_MODE_CLOUDPHISH])

        engine.enable_module('analysis_module_cloudphish', ANALYSIS_MODE_ANALYSIS)
        engine.enable_module('analysis_module_cloudphish_request_analyzer', ANALYSIS_MODE_CLOUDPHISH)
        engine.enable_module('analysis_module_crawlphish', ANALYSIS_MODE_CLOUDPHISH)

        engine.start()

        # should see cloudphish module complete
        wait_for_log_count('analysis CloudphishAnalysis is completed', 1, 10)

        engine.controlled_stop()
        engine.wait()

        # check the results
        root = RootAnalysis(storage_dir=root.storage_dir)
        root.load()
        url = root.get_observable(url.id)
        self.assertIsNotNone(url)

        # should only have 1 analysis attached to the url
        self.assertEquals(len(url.analysis), 1)

        from saq.modules.cloudphish import CloudphishAnalysis
        cloudphish_analysis = url.get_analysis(CloudphishAnalysis)
        self.assertIsNotNone(cloudphish_analysis)
        self.assertEquals(cloudphish_analysis.analysis_result, SCAN_RESULT_CLEAR)

        # however there should be a file attached
        self.assertEquals(len(cloudphish_analysis.observables), 1)
        self.assertEquals(cloudphish_analysis.observables[0].type, F_FILE)
        self.assertEquals(cloudphish_analysis.observables[0].value, 'Payment_Advice.pdf')
        self.assertTrue(os.path.exists(os.path.join(root.storage_dir, cloudphish_analysis.observables[0].value)))
コード例 #11
0
    def test_request_limit(self):

        # only allow one request
        saq.CONFIG['analysis_module_cloudphish'][
            'cloudphish_request_limit'] = '1'

        # don't clear the analysis
        saq.CONFIG['analysis_mode_analysis']['cleanup'] = 'no'

        self.start_api_server()

        root = create_root_analysis(analysis_mode=ANALYSIS_MODE_ANALYSIS)
        root.initialize_storage()
        url_1 = root.add_observable(F_URL, TEST_URL)
        url_2 = root.add_observable(F_URL,
                                    'http://invalid_domain.local/some/path')
        root.save()
        root.schedule()

        engine = TestEngine(analysis_pools={},
                            local_analysis_modes=[
                                ANALYSIS_MODE_ANALYSIS,
                                ANALYSIS_MODE_CLOUDPHISH
                            ])

        engine.enable_module('analysis_module_cloudphish',
                             ANALYSIS_MODE_ANALYSIS)
        engine.enable_module('analysis_module_cloudphish_request_analyzer',
                             ANALYSIS_MODE_CLOUDPHISH)
        engine.enable_module('analysis_module_crawlphish',
                             ANALYSIS_MODE_CLOUDPHISH)

        engine.controlled_stop()
        engine.start()
        engine.wait()

        root = RootAnalysis(storage_dir=root.storage_dir)
        root.load()
        url_1 = root.get_observable(url_1.id)
        url_2 = root.get_observable(url_2.id)

        from saq.modules.cloudphish import CloudphishAnalysis
        analysis_1 = url_1.get_analysis(CloudphishAnalysis)
        analysis_2 = url_2.get_analysis(CloudphishAnalysis)

        self.assertTrue(
            (isinstance(analysis_1, Analysis) and analysis_2 is False)
            or (analysis_1 is False and isinstance(analysis_2, Analysis)))
        self.assertEquals(log_count('reached cloudphish limit'), 1)
コード例 #12
0
ファイル: test_email.py プロジェクト: krayzpipes/ACE
    def test_bro_smtp_stream_analysis_no_end_command(self):
        import saq
        import saq.modules.email

        # test the same thing as test_bro_smtp_stream_analysis except we remove the > . .

        saq.CONFIG['analysis_mode_email']['cleanup'] = 'no'
        
        root = create_root_analysis(alert_type=ANALYSIS_TYPE_BRO_SMTP, analysis_mode=ANALYSIS_MODE_EMAIL)
        root.initialize_storage()
        root.details = { }
        shutil.copy(os.path.join('test_data', 'smtp_streams', 'CBmtfvapmTMqCEUw6.missing_end'), 
                    os.path.join(root.storage_dir, 'CBmtfvapmTMqCEUw6'))
        
        file_observable = root.add_observable(F_FILE, 'CBmtfvapmTMqCEUw6')
        file_observable.add_directive(DIRECTIVE_ORIGINAL_SMTP)
        file_observable.add_directive(DIRECTIVE_NO_SCAN)
        root.save()
        root.schedule()

        engine = TestEngine(local_analysis_modes=[ANALYSIS_MODE_EMAIL])
        engine.enable_module('analysis_module_file_type', 'test_groups')
        engine.enable_module('analysis_module_email_analyzer', 'test_groups')
        engine.enable_module('analysis_module_bro_smtp_analyzer', 'test_groups')
        engine.controlled_stop()
        engine.start()
        engine.wait()

        root = RootAnalysis(storage_dir=root.storage_dir)
        root.load()
        file_observable = root.get_observable(file_observable.id)
        self.assertIsNotNone(file_observable)
        analysis = file_observable.get_analysis(saq.modules.email.BroSMTPStreamAnalysis)
        self.assertIsNotNone(analysis)
        self.assertEquals(len(analysis.get_observables_by_type(F_FILE)), 1)
        self.assertEquals(len(analysis.get_observables_by_type(F_EMAIL_ADDRESS)), 2)
        self.assertEquals(len(analysis.get_observables_by_type(F_IPV4)), 1)
        self.assertEquals(len(analysis.get_observables_by_type(F_EMAIL_CONVERSATION)), 1)
        self.assertTrue(saq.modules.email.KEY_CONNECTION_ID in analysis.details)
        self.assertTrue(saq.modules.email.KEY_SOURCE_IPV4 in analysis.details)
        self.assertTrue(saq.modules.email.KEY_SOURCE_PORT in analysis.details)
        self.assertTrue(saq.modules.email.KEY_ENV_MAIL_FROM in analysis.details)
        self.assertTrue(saq.modules.email.KEY_ENV_RCPT_TO in analysis.details)
        email_file = analysis.find_observable(lambda o: o.type == F_FILE)
        self.assertIsNotNone(email_file)
        self.assertEquals(email_file.value, 'email.rfc822')
        email_analysis = email_file.get_analysis(saq.modules.email.EmailAnalysis)
        self.assertIsNotNone(email_analysis)
コード例 #13
0
    def test_open_office_extraction(self):

        root = create_root_analysis()
        root.initialize_storage()
        shutil.copy('test_data/openoffice/demo.odt', root.storage_dir)
        _file = root.add_observable(F_FILE, 'demo.odt')
        root.save()
        root.schedule()

        engine = TestEngine()
        engine.enable_module('analysis_module_archive', 'test_groups')
        engine.enable_module('analysis_module_file_type', 'test_groups')
        engine.controlled_stop()
        engine.start()
        engine.wait()

        root = RootAnalysis(storage_dir=root.storage_dir)
        root.load()
        _file = root.get_observable(_file.id)
        self.assertIsNotNone(_file)

        analysis = _file.get_analysis('ArchiveAnalysis')
        self.assertIsNotNone(analysis)
        self.assertEquals(len(analysis.find_observables(F_FILE)), 12)
コード例 #14
0
ファイル: test_hal9000.py プロジェクト: krayzpipes/ACE-1
    def test_hal9000_alert_mal_disposition(self, db, c):

        # same as above except we end up alerting and disposition as malicious

        root = create_root_analysis(analysis_mode=ANALYSIS_MODE_ANALYSIS)
        root.initialize_storage()
        test_observable = root.add_observable(F_TEST, 'test')
        root.save()
        root.schedule()
    
        engine = TestEngine(local_analysis_modes=[ ANALYSIS_MODE_ANALYSIS, 
                                                   ANALYSIS_MODE_CORRELATION, 
                                                   ANALYSIS_MODE_DISPOSITIONED ])
        engine.set_cleanup(ANALYSIS_MODE_ANALYSIS, False)
        engine.enable_alerting()
        engine.enable_module('analysis_module_forced_detection', ANALYSIS_MODE_ANALYSIS)
        engine.enable_module('analysis_module_hal9000', [ ANALYSIS_MODE_ANALYSIS, 
                                                          ANALYSIS_MODE_CORRELATION, 
                                                          ANALYSIS_MODE_DISPOSITIONED ])
        engine.controlled_stop()
        engine.start()
        engine.wait()

        # set the disposition for the alert
        set_dispositions([root.uuid], DISPOSITION_DELIVERY, UNITTEST_USER_ID)

        # run the engine again so that is processes the alert in correlation mode with the disposition set
        engine = TestEngine(local_analysis_modes=[ ANALYSIS_MODE_ANALYSIS, 
                                                   ANALYSIS_MODE_CORRELATION, 
                                                   ANALYSIS_MODE_DISPOSITIONED ])
        engine.enable_alerting()
        engine.enable_module('analysis_module_forced_detection', ANALYSIS_MODE_ANALYSIS)
        engine.enable_module('analysis_module_hal9000', [ ANALYSIS_MODE_ANALYSIS, 
                                                          ANALYSIS_MODE_CORRELATION, 
                                                          ANALYSIS_MODE_DISPOSITIONED ])
        engine.controlled_stop()
        engine.start()
        engine.wait()

        # storage dir changes when it turns into alert
        root = RootAnalysis(storage_dir=storage_dir_from_uuid(root.uuid))
        root.load()

        # make sure we alerted
        self.assertEquals(root.analysis_mode, ANALYSIS_MODE_DISPOSITIONED)

        test_observable = root.get_observable(test_observable.id)
        self.assertIsNotNone(test_observable)
        analysis = test_observable.get_analysis(HAL9000Analysis)
        self.assertIsNotNone(analysis)

        # these should still both be 0
        self.assertEquals(analysis.total_count, 0)
        self.assertEquals(analysis.mal_count, 0)
        
        # we should have a single entry in the database for this observable
        hal9000_id = _compute_hal9000_md5(test_observable)
        
        # with the disposition set we should have the corresponding values
        c.execute("SELECT total_count, mal_count FROM observables WHERE id = UNHEX(%s)", (hal9000_id,))
        result = c.fetchone()
        db.commit()
        self.assertIsNotNone(result)
        self.assertEquals(result[0], 1)
        self.assertEquals(result[1], 1)

        # verify the correct state is kept
        state = root.state['hal9000']
        
        self.assertTrue(STATE_KEY_ID_TRACKING in state)
        tracking = state[STATE_KEY_ID_TRACKING]
        self.assertTrue(hal9000_id in tracking)
        tracking_info = tracking[hal9000_id]
        self.assertTrue('id' in tracking_info)
        self.assertEquals(tracking_info['id'], test_observable.id)
        self.assertTrue(KEY_TOTAL_COUNT in tracking_info)
        self.assertTrue(KEY_MAL_COUNT in tracking_info)
        
        # we should be tracking the change we made in here
        self.assertIsNotNone(tracking_info[KEY_TOTAL_COUNT])
        self.assertIsNotNone(tracking_info[KEY_MAL_COUNT])

        # now we change it to FP
        set_dispositions([root.uuid], DISPOSITION_FALSE_POSITIVE, UNITTEST_USER_ID)

        # run the engine again so that is processes the alert in the new correlation mode with the disposition changed
        engine = TestEngine(local_analysis_modes=[ ANALYSIS_MODE_ANALYSIS, 
                                                   ANALYSIS_MODE_CORRELATION, 
                                                   ANALYSIS_MODE_DISPOSITIONED ])
        engine.enable_alerting()
        engine.enable_module('analysis_module_forced_detection', ANALYSIS_MODE_ANALYSIS)
        engine.enable_module('analysis_module_hal9000', [ ANALYSIS_MODE_ANALYSIS, 
                                                          ANALYSIS_MODE_CORRELATION, 
                                                          ANALYSIS_MODE_DISPOSITIONED ])
        engine.controlled_stop()
        engine.start()
        engine.wait()

        root = RootAnalysis(storage_dir=storage_dir_from_uuid(root.uuid))
        root.load()

        test_observable = root.get_observable(test_observable.id)
        self.assertIsNotNone(test_observable)
        analysis = test_observable.get_analysis(HAL9000Analysis)
        self.assertIsNotNone(analysis)

        # these should still both be 0
        self.assertEquals(analysis.total_count, 0)
        self.assertEquals(analysis.mal_count, 0)
        
        # we should have a single entry in the database for this observable
        hal9000_id = _compute_hal9000_md5(test_observable)
        
        # with the disposition set we should have the corresponding values
        c.execute("SELECT total_count, mal_count FROM observables WHERE id = UNHEX(%s)", (hal9000_id,))
        result = c.fetchone()
        db.commit()
        self.assertIsNotNone(result)
        self.assertEquals(result[0], 1)
        self.assertEquals(result[1], 0) # <-- should be 0 now that it's set to FP

        # verify the correct state is kept
        state = root.state['hal9000']
        
        self.assertTrue(STATE_KEY_ID_TRACKING in state)
        tracking = state[STATE_KEY_ID_TRACKING]
        self.assertTrue(hal9000_id in tracking)
        tracking_info = tracking[hal9000_id]
        self.assertTrue('id' in tracking_info)
        self.assertEquals(tracking_info['id'], test_observable.id)
        self.assertTrue(KEY_TOTAL_COUNT in tracking_info)
        self.assertTrue(KEY_MAL_COUNT in tracking_info)
        
        # we should be tracking the change we made in here
        self.assertIsNotNone(tracking_info[KEY_TOTAL_COUNT])
        self.assertIsNone(tracking_info[KEY_MAL_COUNT])

        # finally we change it to ignore, which should entirely remove the counters (set them to 0 anyways)
        set_dispositions([root.uuid], DISPOSITION_IGNORE, UNITTEST_USER_ID)

        # run the engine again so that is processes the alert in the new correlation mode with the disposition changed
        engine = TestEngine(local_analysis_modes=[ ANALYSIS_MODE_ANALYSIS, 
                                                   ANALYSIS_MODE_CORRELATION, 
                                                   ANALYSIS_MODE_DISPOSITIONED ])
        engine.enable_alerting()
        engine.enable_module('analysis_module_forced_detection', ANALYSIS_MODE_ANALYSIS)
        engine.enable_module('analysis_module_hal9000', [ ANALYSIS_MODE_ANALYSIS, 
                                                          ANALYSIS_MODE_CORRELATION, 
                                                          ANALYSIS_MODE_DISPOSITIONED ])
        engine.controlled_stop()
        engine.start()
        engine.wait()

        root = RootAnalysis(storage_dir=storage_dir_from_uuid(root.uuid))
        root.load()

        test_observable = root.get_observable(test_observable.id)
        self.assertIsNotNone(test_observable)
        analysis = test_observable.get_analysis(HAL9000Analysis)
        self.assertIsNotNone(analysis)

        # these should still both be 0
        self.assertEquals(analysis.total_count, 0)
        self.assertEquals(analysis.mal_count, 0)
        
        # we should have a single entry in the database for this observable
        hal9000_id = _compute_hal9000_md5(test_observable)
        
        # with the disposition set we should have the corresponding values
        c.execute("SELECT total_count, mal_count FROM observables WHERE id = UNHEX(%s)", (hal9000_id,))
        result = c.fetchone()
        db.commit()
        self.assertIsNotNone(result)
        self.assertEquals(result[0], 0) # <-- now both should be set to 0
        self.assertEquals(result[1], 0) 

        # verify the correct state is kept
        state = root.state['hal9000']
        
        self.assertTrue(STATE_KEY_ID_TRACKING in state)
        tracking = state[STATE_KEY_ID_TRACKING]
        self.assertTrue(hal9000_id in tracking)
        tracking_info = tracking[hal9000_id]
        self.assertTrue('id' in tracking_info)
        self.assertEquals(tracking_info['id'], test_observable.id)
        self.assertTrue(KEY_TOTAL_COUNT in tracking_info)
        self.assertTrue(KEY_MAL_COUNT in tracking_info)
        
        # we should be tracking the change we made in here
        self.assertIsNone(tracking_info[KEY_TOTAL_COUNT])
        self.assertIsNone(tracking_info[KEY_MAL_COUNT])
コード例 #15
0
    def test_hal9000_alert_mal_disposition(self, db, c):

        # same as above except we end up alerting and disposition as malicious

        root = create_root_analysis(analysis_mode=ANALYSIS_MODE_ANALYSIS)
        root.initialize_storage()
        test_observable = root.add_observable(F_TEST, 'test')
        root.save()
        root.schedule()

        engine = TestEngine(local_analysis_modes=[
            ANALYSIS_MODE_ANALYSIS, ANALYSIS_MODE_CORRELATION
        ])
        engine.set_cleanup(ANALYSIS_MODE_ANALYSIS, False)
        engine.enable_module('analysis_module_forced_detection',
                             ANALYSIS_MODE_ANALYSIS)
        engine.enable_module('analysis_module_detection',
                             ANALYSIS_MODE_ANALYSIS)
        engine.enable_module(
            'analysis_module_hal9000',
            [ANALYSIS_MODE_ANALYSIS, ANALYSIS_MODE_CORRELATION])
        engine.controlled_stop()
        engine.start()
        engine.wait()

        # XXX - fix this when you implement the gui api
        # right now the set_disposition function in the API is what both sets the disposition
        # and re-inserts the alert back into the workload

        # set the disposition for the alert
        with get_db_connection() as ace_db:
            ace_c = ace_db.cursor()
            ace_c.execute(
                """
                UPDATE alerts SET 
                    disposition = %s, 
                    disposition_user_id = %s, 
                    disposition_time = NOW(),
                    owner_id = %s, 
                    owner_time = NOW()
                WHERE 
                    uuid = %s
                    AND ( disposition IS NULL OR disposition != %s )""",
                (DISPOSITION_DELIVERY, UNITTEST_USER_ID, UNITTEST_USER_ID,
                 root.uuid, DISPOSITION_DELIVERY))

            ace_c.execute(
                """
                INSERT INTO workload ( uuid, node_id, analysis_mode, insert_date, company_id, exclusive_uuid, storage_dir ) 
                SELECT 
                    alerts.uuid, 
                    nodes.id,
                    %s, 
                    NOW(),
                    alerts.company_id, 
                    NULL, 
                    alerts.storage_dir 
                FROM 
                    alerts JOIN nodes ON alerts.location = nodes.name
                WHERE 
                    uuid = %s""", (ANALYSIS_MODE_CORRELATION, root.uuid))
            ace_db.commit()

        # run the engine again so that is processes the alert in correlation mode with the disposition set
        engine = TestEngine(local_analysis_modes=[
            ANALYSIS_MODE_ANALYSIS, ANALYSIS_MODE_CORRELATION
        ])
        engine.enable_module('analysis_module_forced_detection',
                             ANALYSIS_MODE_ANALYSIS)
        engine.enable_module('analysis_module_detection',
                             ANALYSIS_MODE_ANALYSIS)
        engine.enable_module(
            'analysis_module_hal9000',
            [ANALYSIS_MODE_ANALYSIS, ANALYSIS_MODE_CORRELATION])
        engine.controlled_stop()
        engine.start()
        engine.wait()

        root = RootAnalysis(storage_dir=root.storage_dir)
        root.load()

        # make sure we alerted
        self.assertEquals(root.analysis_mode, ANALYSIS_MODE_CORRELATION)

        test_observable = root.get_observable(test_observable.id)
        self.assertIsNotNone(test_observable)
        analysis = test_observable.get_analysis(HAL9000Analysis)
        self.assertIsNotNone(analysis)

        # these should still both be 0
        self.assertEquals(analysis.total_count, 0)
        self.assertEquals(analysis.mal_count, 0)

        # we should have a single entry in the database for this observable
        hal9000_id = _compute_hal9000_md5(test_observable)

        # with the disposition set we should have the corresponding values
        c.execute(
            "SELECT total_count, mal_count FROM observables WHERE id = UNHEX(%s)",
            (hal9000_id, ))
        result = c.fetchone()
        db.commit()
        self.assertIsNotNone(result)
        self.assertEquals(result[0], 1)
        self.assertEquals(result[1], 1)

        # verify the correct state is kept
        state = root.state['hal9000']

        self.assertTrue(STATE_KEY_ID_TRACKING in state)
        tracking = state[STATE_KEY_ID_TRACKING]
        self.assertTrue(hal9000_id in tracking)
        tracking_info = tracking[hal9000_id]
        self.assertTrue('id' in tracking_info)
        self.assertEquals(tracking_info['id'], test_observable.id)
        self.assertTrue(KEY_TOTAL_COUNT in tracking_info)
        self.assertTrue(KEY_MAL_COUNT in tracking_info)

        # we should be tracking the change we made in here
        self.assertIsNotNone(tracking_info[KEY_TOTAL_COUNT])
        self.assertIsNotNone(tracking_info[KEY_MAL_COUNT])

        # now we change it to FP
        with get_db_connection() as ace_db:
            ace_c = ace_db.cursor()
            ace_c.execute(
                """
                UPDATE alerts SET 
                    disposition = %s, 
                    disposition_user_id = %s, 
                    disposition_time = NOW(),
                    owner_id = %s, 
                    owner_time = NOW()
                WHERE 
                    uuid = %s
                    AND ( disposition IS NULL OR disposition != %s )""",
                (DISPOSITION_FALSE_POSITIVE, UNITTEST_USER_ID,
                 UNITTEST_USER_ID, root.uuid, DISPOSITION_FALSE_POSITIVE))

            ace_c.execute(
                """
                INSERT INTO workload ( uuid, node_id, analysis_mode, insert_date, company_id, exclusive_uuid, storage_dir ) 
                SELECT 
                    alerts.uuid, 
                    nodes.id,
                    %s, 
                    NOW(),
                    alerts.company_id, 
                    NULL, 
                    alerts.storage_dir 
                FROM 
                    alerts JOIN nodes ON alerts.location = nodes.name
                WHERE 
                    uuid = %s""", (ANALYSIS_MODE_CORRELATION, root.uuid))
            ace_db.commit()

        # run the engine again so that is processes the alert in the new correlation mode with the disposition changed
        engine = TestEngine(local_analysis_modes=[
            ANALYSIS_MODE_ANALYSIS, ANALYSIS_MODE_CORRELATION
        ])
        engine.enable_module('analysis_module_forced_detection',
                             ANALYSIS_MODE_ANALYSIS)
        engine.enable_module('analysis_module_detection',
                             ANALYSIS_MODE_ANALYSIS)
        engine.enable_module(
            'analysis_module_hal9000',
            [ANALYSIS_MODE_ANALYSIS, ANALYSIS_MODE_CORRELATION])
        engine.controlled_stop()
        engine.start()
        engine.wait()

        root = RootAnalysis(storage_dir=root.storage_dir)
        root.load()

        test_observable = root.get_observable(test_observable.id)
        self.assertIsNotNone(test_observable)
        analysis = test_observable.get_analysis(HAL9000Analysis)
        self.assertIsNotNone(analysis)

        # these should still both be 0
        self.assertEquals(analysis.total_count, 0)
        self.assertEquals(analysis.mal_count, 0)

        # we should have a single entry in the database for this observable
        hal9000_id = _compute_hal9000_md5(test_observable)

        # with the disposition set we should have the corresponding values
        c.execute(
            "SELECT total_count, mal_count FROM observables WHERE id = UNHEX(%s)",
            (hal9000_id, ))
        result = c.fetchone()
        db.commit()
        self.assertIsNotNone(result)
        self.assertEquals(result[0], 1)
        self.assertEquals(result[1],
                          0)  # <-- should be 0 now that it's set to FP

        # verify the correct state is kept
        state = root.state['hal9000']

        self.assertTrue(STATE_KEY_ID_TRACKING in state)
        tracking = state[STATE_KEY_ID_TRACKING]
        self.assertTrue(hal9000_id in tracking)
        tracking_info = tracking[hal9000_id]
        self.assertTrue('id' in tracking_info)
        self.assertEquals(tracking_info['id'], test_observable.id)
        self.assertTrue(KEY_TOTAL_COUNT in tracking_info)
        self.assertTrue(KEY_MAL_COUNT in tracking_info)

        # we should be tracking the change we made in here
        self.assertIsNotNone(tracking_info[KEY_TOTAL_COUNT])
        self.assertIsNone(tracking_info[KEY_MAL_COUNT])

        # finally we change it to ignore, which should entirely remove the counters (set them to 0 anyways)
        with get_db_connection() as ace_db:
            ace_c = ace_db.cursor()
            ace_c.execute(
                """
                UPDATE alerts SET 
                    disposition = %s, 
                    disposition_user_id = %s, 
                    disposition_time = NOW(),
                    owner_id = %s, 
                    owner_time = NOW()
                WHERE 
                    uuid = %s
                    AND ( disposition IS NULL OR disposition != %s )""",
                (DISPOSITION_IGNORE, UNITTEST_USER_ID, UNITTEST_USER_ID,
                 root.uuid, DISPOSITION_IGNORE))

            ace_c.execute(
                """
                INSERT INTO workload ( uuid, node_id, analysis_mode, insert_date, company_id, exclusive_uuid, storage_dir ) 
                SELECT 
                    alerts.uuid, 
                    nodes.id,
                    %s, 
                    NOW(),
                    alerts.company_id, 
                    NULL, 
                    alerts.storage_dir 
                FROM 
                    alerts JOIN nodes ON alerts.location = nodes.name
                WHERE 
                    uuid = %s""", (ANALYSIS_MODE_CORRELATION, root.uuid))
            ace_db.commit()

        # run the engine again so that is processes the alert in the new correlation mode with the disposition changed
        engine = TestEngine(local_analysis_modes=[
            ANALYSIS_MODE_ANALYSIS, ANALYSIS_MODE_CORRELATION
        ])
        engine.enable_module('analysis_module_forced_detection',
                             ANALYSIS_MODE_ANALYSIS)
        engine.enable_module('analysis_module_detection',
                             ANALYSIS_MODE_ANALYSIS)
        engine.enable_module(
            'analysis_module_hal9000',
            [ANALYSIS_MODE_ANALYSIS, ANALYSIS_MODE_CORRELATION])
        engine.controlled_stop()
        engine.start()
        engine.wait()

        root = RootAnalysis(storage_dir=root.storage_dir)
        root.load()

        test_observable = root.get_observable(test_observable.id)
        self.assertIsNotNone(test_observable)
        analysis = test_observable.get_analysis(HAL9000Analysis)
        self.assertIsNotNone(analysis)

        # these should still both be 0
        self.assertEquals(analysis.total_count, 0)
        self.assertEquals(analysis.mal_count, 0)

        # we should have a single entry in the database for this observable
        hal9000_id = _compute_hal9000_md5(test_observable)

        # with the disposition set we should have the corresponding values
        c.execute(
            "SELECT total_count, mal_count FROM observables WHERE id = UNHEX(%s)",
            (hal9000_id, ))
        result = c.fetchone()
        db.commit()
        self.assertIsNotNone(result)
        self.assertEquals(result[0], 0)  # <-- now both should be set to 0
        self.assertEquals(result[1], 0)

        # verify the correct state is kept
        state = root.state['hal9000']

        self.assertTrue(STATE_KEY_ID_TRACKING in state)
        tracking = state[STATE_KEY_ID_TRACKING]
        self.assertTrue(hal9000_id in tracking)
        tracking_info = tracking[hal9000_id]
        self.assertTrue('id' in tracking_info)
        self.assertEquals(tracking_info['id'], test_observable.id)
        self.assertTrue(KEY_TOTAL_COUNT in tracking_info)
        self.assertTrue(KEY_MAL_COUNT in tracking_info)

        # we should be tracking the change we made in here
        self.assertIsNone(tracking_info[KEY_TOTAL_COUNT])
        self.assertIsNone(tracking_info[KEY_MAL_COUNT])
コード例 #16
0
    def test_cloudphish_tracking(self, db, c):

        from saq.modules.email import EmailAnalysis

        saq.CONFIG['analysis_mode_email']['cleanup'] = 'no'
        self.start_api_server()

        root = create_root_analysis(alert_type='mailbox',
                                    analysis_mode=ANALYSIS_MODE_EMAIL)
        root.initialize_storage()
        shutil.copy(
            os.path.join('test_data', 'emails', 'splunk_logging.email.rfc822'),
            os.path.join(root.storage_dir, 'email.rfc822'))
        file_observable = root.add_observable(F_FILE, 'email.rfc822')
        file_observable.add_directive(DIRECTIVE_ORIGINAL_EMAIL)
        test_observable = root.add_observable(F_TEST, 'test_detection')
        test_observable.add_directive(DIRECTIVE_TRACKED)
        root.save()
        root.schedule()

        analysis_modes = [
            ANALYSIS_MODE_EMAIL, ANALYSIS_MODE_CLOUDPHISH,
            ANALYSIS_MODE_CORRELATION
        ]
        analysis_modules = [
            'analysis_module_file_type', 'analysis_module_email_analyzer',
            'analysis_module_mailbox_email_analyzer',
            'analysis_module_cloudphish',
            'analysis_module_cloudphish_request_analyzer',
            'analysis_module_crawlphish', 'analysis_module_url_extraction',
            'analysis_module_detection'
        ]

        engine = TestEngine(local_analysis_modes=analysis_modes)
        for module in analysis_modules:
            engine.enable_module(module, analysis_modes)

        # we only enable the BasicTestAnalyzer for the cloudphish mode so that cloudphish generates an alert
        engine.enable_module('analysis_module_basic_test',
                             ANALYSIS_MODE_CLOUDPHISH)

        engine.controlled_stop()
        engine.start()
        engine.wait()

        # get the message_id observable generated by the EmailAnalysis
        root = RootAnalysis(storage_dir=storage_dir_from_uuid(root.uuid))
        root.load()

        file_observable = root.get_observable(file_observable.id)
        self.assertIsNotNone(file_observable)
        email_analysis = file_observable.get_analysis(EmailAnalysis)
        self.assertTrue(bool(email_analysis))
        message_id = email_analysis.find_observable(
            lambda o: o.type == F_MESSAGE_ID)
        self.assertIsNotNone(message_id)

        # we should have a number of cloudphish alerts now
        c.execute("SELECT uuid FROM alerts WHERE tool != 'test_tool' LIMIT 1")
        row = c.fetchone()
        target_uuid = row[0]

        root = RootAnalysis(storage_dir=storage_dir_from_uuid(target_uuid))
        root.load()

        # this cloudphish alert should have the message_id observable
        # and it should be tagged as tracked
        self.assertIsNotNone(
            root.find_observable(lambda o: o.type == F_MESSAGE_ID and o.value
                                 == message_id.value and o.has_tag('tracked')))
コード例 #17
0
    def test_submit(self):

        # disable cleaup for analysis mode analysis
        saq.CONFIG['analysis_mode_analysis']['cleanup'] = 'no'
        saq.CONFIG['analysis_mode_cloudphish']['cleanup'] = 'no'

        self.start_api_server()

        root = create_root_analysis(analysis_mode=ANALYSIS_MODE_ANALYSIS)
        root.initialize_storage()
        url = root.add_observable(F_URL, TEST_URL)
        url.add_directive(DIRECTIVE_CRAWL)
        root.save()
        root.schedule()

        engine = TestEngine(analysis_pools={
            ANALYSIS_MODE_ANALYSIS: 1,
            ANALYSIS_MODE_CLOUDPHISH: 1
        },
                            local_analysis_modes=[
                                ANALYSIS_MODE_ANALYSIS,
                                ANALYSIS_MODE_CLOUDPHISH
                            ])

        engine.enable_module('analysis_module_cloudphish',
                             ANALYSIS_MODE_ANALYSIS)
        engine.enable_module('analysis_module_cloudphish_request_analyzer',
                             ANALYSIS_MODE_CLOUDPHISH)
        engine.enable_module('analysis_module_crawlphish',
                             ANALYSIS_MODE_CLOUDPHISH)

        engine.start()

        # wait for delayed analysis to be added
        wait_for_log_count('added delayed analysis', 1, 5)
        # and then wait for analysis to complete
        wait_for_log_count('completed analysis RootAnalysis', 1, 5)

        # watch for the download request by cloudphish
        wait_for_log_count('requesting url', 1, 5)
        # watch for crawlphish to finish
        wait_for_log_count('analysis CrawlphishAnalysisV2 is completed', 1, 5)

        # the cloudphish request analyzer should see the downloaded file
        wait_for_log_count('found downloaded file', 1, 5)
        # and should update the database
        wait_for_log_count('executing cloudphish update', 1, 5)

        # should see cloudphish module complete
        wait_for_log_count('analysis CloudphishAnalysis is completed', 1, 10)

        # we should a work request for the original request and one for the cloudphish request
        wait_for_log_count('got work item RootAnalysis', 2, 5)

        # and we should see at least one request to handle a delayed analysis request
        wait_for_log_count('got work item DelayedAnalysisRequest', 1, 5)

        engine.controlled_stop()
        engine.wait()

        # check the results
        root = RootAnalysis(storage_dir=root.storage_dir)
        root.load()
        url = root.get_observable(url.id)
        self.assertIsNotNone(url)

        # this url should only have a single analysis object attached to it (the cloudphish analysis)
        self.assertEquals(len(url.analysis), 1)

        from saq.modules.cloudphish import CloudphishAnalysis
        cloudphish_analysis = url.get_analysis(CloudphishAnalysis)
        self.assertIsNotNone(cloudphish_analysis)
        for key in [
                KEY_ANALYSIS_RESULT, KEY_DETAILS, KEY_FILE_NAME,
                KEY_HTTP_MESSAGE, KEY_HTTP_RESULT, KEY_LOCATION, KEY_RESULT,
                KEY_SHA256_CONTENT, KEY_SHA256_URL, KEY_STATUS, KEY_UUID
        ]:
            with self.subTest(key=key):
                self.assertTrue(key in cloudphish_analysis.query_result)

        q = cloudphish_analysis.query_result
        self.assertEquals(q[KEY_ANALYSIS_RESULT], SCAN_RESULT_CLEAR)

        for key in [
                KEY_DETAILS_CONTEXT, KEY_DETAILS_SHA256_URL, KEY_DETAILS_URL
        ]:
            with self.subTest(key=key):
                self.assertTrue(key in q[KEY_DETAILS])

        # this is what we should have for context
        self.assertEquals(q[KEY_DETAILS][KEY_DETAILS_CONTEXT]['c'], root.uuid)
        self.assertEquals(
            q[KEY_DETAILS][KEY_DETAILS_CONTEXT]['ignore_filters'], '0')
        self.assertEquals(q[KEY_DETAILS][KEY_DETAILS_CONTEXT]['reprocess'],
                          '0')

        self.assertEquals(
            q[KEY_DETAILS][KEY_DETAILS_SHA256_URL].upper(),
            'B009F8821B162674B819A2365B07A536645A42657E75BB3996C8B6127E993806')
        self.assertEquals(q[KEY_DETAILS][KEY_DETAILS_URL], TEST_URL)

        self.assertEquals(q[KEY_FILE_NAME], 'Payment_Advice.pdf')
        self.assertEquals(q[KEY_HTTP_MESSAGE], 'OK')
        self.assertEquals(q[KEY_HTTP_RESULT], 200)
        self.assertEquals(q[KEY_LOCATION], saq.SAQ_NODE)
        self.assertEquals(q[KEY_RESULT], RESULT_OK)
        self.assertEquals(
            q[KEY_SHA256_CONTENT].upper(),
            'FA13C652534F9207BEEC811A50948860F5B3194AEAE686FCDECAC645FAE65D15')
        self.assertEquals(
            q[KEY_SHA256_URL].upper(),
            'B009F8821B162674B819A2365B07A536645A42657E75BB3996C8B6127E993806')
        self.assertEquals(q[KEY_STATUS], STATUS_ANALYZED)
        self.assertIsNotNone(q[KEY_UUID])

        # these properties should match the query_result keys-value pairs
        self.assertEquals(cloudphish_analysis.result, q[KEY_RESULT])
        self.assertEquals(cloudphish_analysis.result_details, q[KEY_DETAILS])
        self.assertEquals(cloudphish_analysis.status, q[KEY_STATUS])
        self.assertEquals(cloudphish_analysis.analysis_result,
                          q[KEY_ANALYSIS_RESULT])
        self.assertEquals(cloudphish_analysis.http_result, q[KEY_HTTP_RESULT])
        self.assertEquals(cloudphish_analysis.http_message,
                          q[KEY_HTTP_MESSAGE])
        self.assertEquals(cloudphish_analysis.sha256_content,
                          q[KEY_SHA256_CONTENT])
        self.assertEquals(cloudphish_analysis.location, q[KEY_LOCATION])
        self.assertEquals(cloudphish_analysis.file_name, q[KEY_FILE_NAME])
コード例 #18
0
ファイル: test_file_analysis.py プロジェクト: jpressnell/ACE
    def test_file_analysis_001_oletools_000(self):

        #from saq.modules.file_analysis import OLEVBA_Analysis_v1_2

        KEY_STORAGE_DIR = 'storage_dir'
        KEY_TAGS = 'tags'
        KEY_MACRO_COUNT = 'macro_count'
        KEY_OID = 'oid'
        KEY_SANDBOX = 'sandbox'

        # expected results for the various files
        results = {
            'Past Due Invoices.doc': {
                KEY_OID: None,
                KEY_STORAGE_DIR: None,
                KEY_TAGS: [ 'microsoft_office', 'ole' ],
                KEY_MACRO_COUNT: 4,
                KEY_SANDBOX: True,
            }, 
            'Outstanding Invoices.doc': {
                KEY_OID: None,
                KEY_STORAGE_DIR: None,
                KEY_TAGS: [ 'microsoft_office', 'ole' ],
                KEY_MACRO_COUNT: 3,
                KEY_SANDBOX: True,
            }, 
            'Paid Invoice.doc': {
                KEY_OID: None,
                KEY_STORAGE_DIR: None,
                KEY_TAGS: [ 'microsoft_office', 'ole' ],
                KEY_MACRO_COUNT: 3,
                KEY_SANDBOX: True,
            }, 
            'mortgage_payment-0873821-0565.docm': {
                KEY_OID: None,
                KEY_STORAGE_DIR: None,
                KEY_TAGS: [ 'microsoft_office' ],
                KEY_MACRO_COUNT: 1,
                KEY_SANDBOX: True,
            }, 
            'receipt_687790.doc': {
                KEY_OID: None,
                KEY_STORAGE_DIR: None,
                KEY_TAGS: [ 'microsoft_office' ],
                KEY_MACRO_COUNT: 5,
                KEY_SANDBOX: True,
            }, 
        }

        for file_name in results.keys():
            root = create_root_analysis(uuid=str(uuid.uuid4()))
            root.initialize_storage()
            target_path = os.path.join('test_data/ole_files', file_name)
            shutil.copy(target_path, root.storage_dir)
            file_observable = root.add_observable(F_FILE, file_name)
            root.save()
            root.schedule()

            results[file_name][KEY_OID] = file_observable.id
            results[file_name][KEY_STORAGE_DIR] = root.storage_dir

        engine = TestEngine()
        engine.enable_module('analysis_module_olevba_v1_2', 'test_groups')
        engine.enable_module('analysis_module_file_type', 'test_groups')
        engine.controlled_stop()
        engine.start()
        engine.wait()

        for file_name in results.keys():
            with self.subTest(storage_dir=results[file_name][KEY_STORAGE_DIR], file_name=file_name):
                root = RootAnalysis(storage_dir=results[file_name][KEY_STORAGE_DIR])
                root.load()
                file_observable = root.get_observable(results[file_name][KEY_OID])
                self.assertIsNotNone(file_observable)
                if results[file_name][KEY_SANDBOX]:
                    self.assertTrue(file_observable.has_directive(DIRECTIVE_SANDBOX))
                for tag in results[file_name][KEY_TAGS]:
                    with self.subTest(storage_dir=results[file_name][KEY_STORAGE_DIR], file_name=file_name, tag=tag):
                        self.assertTrue(file_observable.has_tag(tag))

                macro_count = len([f for f in root.all_observables if f.type == F_FILE and f.has_tag('macro')])
                self.assertEquals(macro_count, results[file_name][KEY_MACRO_COUNT])