Beispiel #1
0
def request_handler_1_3():
    assert saq.CONFIG is not None

    # TODO actually use the library
    # protocol constants copied over from the client library ;)
    KEY_ID = 'id'
    KEY_UUID = 'uuid'
    KEY_TOOL = 'tool'
    KEY_TOOL_INSTANCE = 'tool_instance'
    KEY_TYPE = 'type'
    KEY_DESCRIPTION = 'description'
    KEY_EVENT_TIME = 'event_time'
    KEY_DETAILS = 'details'
    KEY_OBSERVABLES = 'observables'
    KEY_TAGS = 'tags'
    KEY_NAME = 'name'
    KEY_COMPANY_NAME = 'company_name'

    # client passes in the JSON contents of the alert
    contents = json.loads(request.form['alert'])

    alert = Alert()
    alert.uuid = contents[KEY_UUID]
    alert.storage_dir = os.path.join(saq.CONFIG['global']['data_dir'], saq.SAQ_NODE, alert.uuid[0:3], alert.uuid)
    alert.initialize_storage()
    alert.tool = contents[KEY_TOOL]
    alert.tool_instance = contents[KEY_TOOL_INSTANCE]
    alert.alert_type = contents[KEY_TYPE]
    alert.description = contents[KEY_DESCRIPTION]
    alert.event_time = contents[KEY_EVENT_TIME]
    alert.details = contents[KEY_DETAILS]

    if KEY_NAME in contents:
        alert.name = contents[KEY_NAME]

    if KEY_COMPANY_NAME in contents and contents[KEY_COMPANY_NAME]:
        alert.company_name = contents[KEY_COMPANY_NAME]
    else:
        alert.company_name = saq.CONFIG['global']['company_name']

    # add all the specified observables
    # each key in the observable dictionary is the type
    for o_type in contents[KEY_OBSERVABLES].keys():
        # protocol verison 1.2 only had two elements (value, time)
        # version 1.3 has four (value, time, is_suspect, directives)
        for values in contents[KEY_OBSERVABLES][o_type]:
            o_value = values[0]
            o_time = values[1]
            is_suspect = values[2] # DEPRECATED
            directives = values[3]

            o = alert.add_observable(o_type, o_value, o_time)
            if o:
                for directive in directives:
                    o.add_directive(directive)

    # add all the specified tags
    for tag in contents[KEY_TAGS]:
        alert.add_tag(tag)

    # save the files to disk and add them as observables of type file
    for f in request.files.getlist('data'):
        logging.debug("recording file {}".format(f.filename))
        temp_dir = tempfile.mkdtemp(dir=saq.CONFIG.get('server', 'incoming_dir'))
        _path = os.path.join(temp_dir, secure_filename(f.filename))
        try:
            if os.path.exists(_path):
                logging.error("duplicate file name {}".format(_path))
                raise RuntimeError("duplicate file name {}".format(_path))

            logging.debug("saving file to {}".format(_path))
            try:
                f.save(_path)
            except Exception as e:
                logging.error("unable to save file to {}: {}".format(_path, e))
                raise e

            full_path = os.path.join(alert.storage_dir, f.filename)

            try:
                dest_dir = os.path.dirname(full_path)
                if not os.path.isdir(dest_dir):
                    try:
                        os.makedirs(dest_dir)
                    except Exception as e:
                        logging.error("unable to create directory {}: {}".format(dest_dir, e))
                        raise e

                logging.debug("copying file {} to {}".format(_path, full_path))
                shutil.copy(_path, full_path)

                # add this as a F_FILE type observable
                alert.add_observable(F_FILE, os.path.relpath(full_path, start=alert.storage_dir))

            except Exception as e:
                logging.error("unable to copy file from {} to {} for alert {}: {}".format(
                              _path, full_path, alert, e))
                raise e

        except Exception as e:
            logging.error("unable to deal with file {}: {}".format(f, e))
            report_exception()
            return "", 500

        finally:
            try:
                shutil.rmtree(temp_dir)
            except Exception as e:
                logging.error("unable to delete temp dir {}: {}".format(temp_dir, e))

    try:
        if not alert.sync():
            logging.error("unable to sync alert")
            return "", 500

        # send the alert to the automated analysis engine
        alert.request_correlation()

    except Exception as e:
        logging.error("unable to sync to database: {}".format(e))
        report_exception()
        return "", 500

    return str(alert.id), 200
Beispiel #2
0
def request_handler_1_0():
    assert saq.CONFIG is not None

    logging.warning("using deprecated network protocol")

    # client passes in the JSON contents of the alert
    contents = json.loads(request.form['alert'])

    # create a new Alert object - this already comes with an ID we can use
    # note that we use network_json here since this is a new alert coming across the wire
    alert = Alert(network_json=contents)

    # we need somewhere to store this alert
    alert.storage_dir = os.path.join(saq.CONFIG['global']['data_dir'], saq.SAQ_NODE, alert.uuid[0:3], alert.uuid)
    alert.initialize_storage()
    alert._materialize()

    # save the attachments to disk
    # XXX I think that this will only allow attachments into the main (root) directory of the alert
    # TODO - support relative directories here
    for f in request.files.getlist('data'):
        logging.debug("recording file {0}".format(f.filename))
        temp_dir = tempfile.mkdtemp(dir=saq.CONFIG.get('server', 'incoming_dir'))
        _path = os.path.join(temp_dir, secure_filename(f.filename))
        try:
            if os.path.exists(_path):
                logging.error("duplicate file name {0}".format(_path))
                raise RuntimeError("duplicate file name {0}".format(_path))

            logging.debug("saving file to {0}".format(_path))
            try:
                f.save(_path)
            except Exception as e:
                logging.error("unable to save file to {0}: {1}".format(_path, str(e)))
                raise e

            full_path = os.path.join(alert.storage_dir, f.filename)

            try:
                dest_dir = os.path.dirname(full_path)
                if not os.path.isdir(dest_dir):
                    os.makedirs(dest_dir)

                logging.debug("copying file {0} to {1}".format(_path, full_path))
                shutil.copy(_path, full_path)

            except Exception as e:
                logging.error("unable to copy file from {0} to {1} for alert {2}: {3}".format(
                    _path, full_path, alert, str(e)))
                raise e

        except Exception as e:
            report_exception()
            return "", 500

        finally:
            try:
                shutil.rmtree(temp_dir)
            except Exception as e:
                logging.error("unable to delete temp dir {0}: {1}".format(temp_dir, str(e)))

    # update the attachment paths
    for attachment in alert.attachments:
        logging.debug("moving reference for {0} to {1}".format(attachment.path, alert.storage_dir))
        attachment.path = os.path.join(alert.storage_dir, attachment.path)

    attempt = 0
    while attempt < 3:
        try:
            if not alert.sync():
                logging.error("unable to submit alert")
                return "", 500
            else:
                # send the alert to the automated analysis engine
                alert.request_correlation()

                break
        except Exception as e:
            logging.error("unable to sync to database: {0}".format(str(e)))
            attempt += 1
            if attempt < 3:
                continue

            return "", 500

    return str(alert.id), 200
Beispiel #3
0
def request_handler_1_2():
    assert saq.CONFIG is not None

    # TODO actually use the library
    # protocol constants copied over from the client library ;)
    KEY_ID = 'id'
    KEY_UUID = 'uuid'
    KEY_TOOL = 'tool'
    KEY_TOOL_INSTANCE = 'tool_instance'
    KEY_TYPE = 'type'
    KEY_DESCRIPTION = 'description'
    KEY_EVENT_TIME = 'event_time'
    KEY_DETAILS = 'details'
    KEY_OBSERVABLES = 'observables'
    KEY_TAGS = 'tags'
    KEY_ATTACHMENTS = 'attachments'
    KEY_NAME = 'name'

    # client passes in the JSON contents of the alert
    contents = json.loads(request.form['alert'])

    alert = Alert()

    # set all of the properties individually
    # XXX fix me
    # it looks like the construction logic doesn't quite work here
    # when loading from the arguments to the constructor, the internal
    # variables with leading underscores get set rather than the properties
    # representing the database columns it was designed that way to allow the
    # JSON stuff to work correctly, so I'll need to revisit that later

    alert.uuid = contents[KEY_UUID]
    alert.storage_dir = os.path.join(saq.CONFIG['global']['data_dir'], saq.SAQ_NODE, alert.uuid[0:3], alert.uuid)
    alert.initialize_storage()
    alert.tool = contents[KEY_TOOL]
    alert.tool_instance = contents[KEY_TOOL_INSTANCE]
    alert.alert_type = contents[KEY_TYPE]
    alert.description = contents[KEY_DESCRIPTION]
    alert.event_time = contents[KEY_EVENT_TIME]
    alert.details = contents[KEY_DETAILS]

    # XXX shame on me for not testing well enough
    if KEY_NAME in contents:
        alert.name = contents[KEY_NAME]

    # add all the specified observables
    # each key in the observable dictionary is the type
    for o_type in contents[KEY_OBSERVABLES].keys():
        # protocol verison 1.2 only had two elements (value, time)
        # version 1.3 has three (value, time, is_suspect)
        for values in contents[KEY_OBSERVABLES][o_type]:
            o_value = values[0]
            o_time = values[1]
            is_suspect = False # deprecated
            if len(values) > 2:
                is_suspect = values[2]

            alert.add_observable(o_type, o_value, o_time)

    # add all the specified tags
    for tag in contents[KEY_TAGS]:
        alert.add_tag(tag)

    #alert._materialize()

    # save the attachments to disk and add them as observables of type file
    for f in request.files.getlist('data'):
        logging.debug("recording file {0}".format(f.filename))
        # XXX why not just save straight to the destination address?
        temp_dir = tempfile.mkdtemp(dir=saq.CONFIG.get('server', 'incoming_dir'))
        _path = os.path.join(temp_dir, secure_filename(f.filename))
        try:
            if os.path.exists(_path):
                logging.error("duplicate file name {0}".format(_path))
                raise RuntimeError("duplicate file name {0}".format(_path))

            logging.debug("saving file to {0}".format(_path))
            try:
                f.save(_path)
            except Exception as e:
                logging.error("unable to save file to {0}: {1}".format(_path, str(e)))
                raise e

            full_path = os.path.join(alert.storage_dir, f.filename)

            try:
                dest_dir = os.path.dirname(full_path)
                if not os.path.isdir(dest_dir):
                    try:
                        os.makedirs(dest_dir)
                    except Exception as e:
                        logging.error("unable to create directory {0}: {1}".format(dest_dir, str(e)))
                        raise e

                logging.debug("copying file {0} to {1}".format(_path, full_path))
                shutil.copy(_path, full_path)

                # add this as a F_FILE type observable
                alert.add_observable(F_FILE, os.path.relpath(full_path, start=alert.storage_dir))

            except Exception as e:
                logging.error("unable to copy file from {0} to {1} for alert {2}: {3}".format(
                    _path, full_path, alert, str(e)))
                raise e

        except Exception as e:
            logging.error("unable to deal with file {0}: {1}".format(f, str(e)))
            report_exception()
            return "", 500

        finally:
            try:
                shutil.rmtree(temp_dir)
            except Exception as e:
                logging.error("unable to delete temp dir {0}: {1}".format(temp_dir, str(e)))

    try:
        if not alert.sync():
            logging.error("unable to sync alert")
            return "", 500

        # send the alert to the automated analysis engine
        alert.request_correlation()

    except Exception as e:
        logging.error("unable to sync to database: {0}".format(str(e)))
        report_exception()
        return "", 500

    return str(alert.id), 200
Beispiel #4
0
    def handle_network_item(self, analysis_path):
        logging.info("got network item {}".format(analysis_path))

        # create a temporary directory to extract the tar file
        temp_dir = tempfile.mkdtemp(suffix='.ace_submission')

        try:
            # extract the tar file inside this temporary directory
            p = Popen(['tar', 'xf', analysis_path, '-C', temp_dir],
                      stdout=PIPE,
                      stderr=PIPE)
            _stdout, _stderr = p.communicate()
            p.wait()

            if p.returncode != 0:
                logging.warning("tar returned non-zero status for {}".format(
                    analysis_path))

            if _stderr:
                logging.warning(
                    "tar command printed text to stderr for {}: {}".format(
                        analysis_path, _stderr))

            # load the analysis
            root = Alert()
            root.storage_dir = temp_dir
            try:
                root.load()
            except Exception as e:
                logging.error("unable to load from {}: {}".format(
                    analysis_path, e))
                report_exception()
                return

            # move the storage_dir into ACE
            try:
                dest_dir = os.path.join(saq.CONFIG['global']['data_dir'],
                                        saq.SAQ_NODE, root.uuid[0:3],
                                        root.uuid)
                shutil.move(root.storage_dir, dest_dir)
            except Exception as e:
                logging.error("unable to move {} to {}: {}".format(
                    root.storage_dir, dest_dir, e))
                report_exception()
                return

            # change the location of the alert to this receiving system
            root.location = saq.SAQ_NODE

            # insert the alert into the database
            root.storage_dir = dest_dir
            if root.id:
                logging.debug(
                    "removed previous id {} from forwarded alert {}".format(
                        root.id, root))
                root.id = None

            try:
                root.sync()
                root.request_correlation()
            except Exception as e:
                logging.error("unable to save alert from {}: {}".format(
                    analysis_path, e))
                report_exception()
                return

            # if we got to this point then we're done with this input file
            try:
                os.remove(analysis_path)
            except Exception as e:
                logging.error("unable to remove {}: {}".format(
                    analysis_path, e))
                report_exception()

        except Exception as e:
            logging.error("unable to process {}: {}".format(analysis_path, e))
            report_exception()
            raise e

        finally:
            try:
                if os.path.exists(temp_dir):
                    shutil.rmtree(temp_dir)
            except Exception as e:
                logging.error(
                    "unable to delete temporary directory {}: {}".format(
                        temp_dir, e))
                report_exception()
Beispiel #5
0
    def test_ace_engine_002_persistent_engine(self):

        engine = CustomACEEngine()
        if os.path.exists(engine.delayed_analysis_path):
            os.remove(engine.delayed_analysis_path)

        engine.enable_module('analysis_module_test_delayed_analysis')
        self.start_engine(engine)
        root = create_root_analysis(uuid=str(uuid.uuid4()))
        root.initialize_storage()
        o_uuid = root.add_observable(F_TEST, '0:05|0:10').id
        root.save()

        alert = Alert()
        alert.storage_dir = root.storage_dir
        alert.load()
        alert.sync()
        alert.request_correlation()

        def callback():
            return os.path.exists(os.path.join(root.storage_dir, '.delayed'))

        self.assertTrue(self.wait_for_condition(callback))
        self.kill_engine(engine)

        self.assertTrue(os.path.exists(engine.delayed_analysis_path))
        with open(engine.delayed_analysis_path, 'rb') as fp:
            delayed_analysis = pickle.load(fp)

        if len(delayed_analysis) > 1:
            for item in delayed_analysis:
                print(item[1])
            self.fail("more than one delayed analysis request is available")

        next_time, dar = delayed_analysis[0]  # dar == delayed_analysis_request
        from saq.engine import DelayedAnalysisRequest
        self.assertIsInstance(dar, DelayedAnalysisRequest)
        self.assertEquals(dar.storage_dir, root.storage_dir)
        self.assertEquals(dar.target_type, type(alert))
        self.assertEquals(dar.observable_uuid, o_uuid)
        self.assertEquals(dar.analysis_module,
                          'analysis_module_test_delayed_analysis')
        self.assertEquals(dar.uuid, root.uuid)
        self.assertFalse(dar.lock_proxy.is_locked())

        from saq.modules.test import DelayedAnalysisTestAnalysis

        root = create_root_analysis(storage_dir=root.storage_dir)
        root.load()
        analysis = root.get_observable(o_uuid).get_analysis(
            DelayedAnalysisTestAnalysis)

        self.assertTrue(analysis.initial_request)
        self.assertFalse(analysis.delayed_request)
        self.assertEquals(analysis.request_count, 1)
        self.assertFalse(analysis.completed)

        engine = CustomACEEngine()
        engine.enable_module('analysis_module_test_delayed_analysis')
        self.start_engine(engine)
        engine.queue_work_item(TerminatingMarker())
        self.wait_engine(engine)

        root = create_root_analysis(storage_dir=root.storage_dir)
        root.load()
        analysis = root.get_observable(o_uuid).get_analysis(
            DelayedAnalysisTestAnalysis)

        self.assertTrue(analysis.initial_request)
        self.assertTrue(analysis.delayed_request)
        self.assertEquals(analysis.request_count, 2)
        self.assertTrue(analysis.completed)

        self.assertFalse(os.path.exists(engine.delayed_analysis_path))
Beispiel #6
0
    def test_ace_engine_003_persistent_engine_multiple(self):
        """Multiple delayed analysis requests are saved at shutdown and reloaded at startup."""

        engine = CustomACEEngine()
        if os.path.exists(engine.delayed_analysis_path):
            os.remove(engine.delayed_analysis_path)

        tracking = {}  # key = storage_dir, value = observable uuid

        engine.enable_module('analysis_module_test_delayed_analysis')
        self.start_engine(engine)
        for _ in range(3):
            root = create_root_analysis(uuid=str(uuid.uuid4()))
            root.initialize_storage()
            tracking[root.storage_dir] = root.add_observable(
                F_TEST, '0:10|0:15').id
            root.save()

            alert = Alert()
            alert.storage_dir = root.storage_dir
            alert.load()
            alert.sync()
            alert.request_correlation()

            def callback():
                return os.path.exists(
                    os.path.join(root.storage_dir, '.delayed'))

            self.assertTrue(self.wait_for_condition(callback))

        self.kill_engine(engine)

        self.assertTrue(os.path.exists(engine.delayed_analysis_path))
        with open(engine.delayed_analysis_path, 'rb') as fp:
            delayed_analysis = pickle.load(fp)

        self.assertEquals(len(delayed_analysis), 3)

        from saq.modules.test import DelayedAnalysisTestAnalysis

        for storage_dir in tracking.keys():
            root = create_root_analysis(storage_dir=storage_dir)
            root.load()

            analysis = root.get_observable(tracking[storage_dir]).get_analysis(
                DelayedAnalysisTestAnalysis)

            self.assertTrue(analysis.initial_request)
            self.assertFalse(analysis.delayed_request)
            self.assertEquals(analysis.request_count, 1)
            self.assertFalse(analysis.completed)

        engine = CustomACEEngine()
        engine.enable_module('analysis_module_test_delayed_analysis')
        self.start_engine(engine)
        engine.queue_work_item(TerminatingMarker())
        self.wait_engine(engine)

        for storage_dir in tracking.keys():
            root = create_root_analysis(storage_dir=storage_dir)
            root.load()
            analysis = root.get_observable(tracking[storage_dir]).get_analysis(
                DelayedAnalysisTestAnalysis)

            self.assertTrue(analysis.initial_request)
            self.assertTrue(analysis.delayed_request)
            self.assertEquals(analysis.request_count, 2)
            self.assertTrue(analysis.completed)

        self.assertFalse(os.path.exists(engine.delayed_analysis_path))