示例#1
0
    def load_package_from_dict(self, package_definition: dict,
                               source: str) -> ACEPackage:
        _package = ACEPackage(
            source=source,
            name=package_definition["name"],
            description=package_definition["description"],
            version=package_definition["version"],
        )

        get_logger().debug(f"loading package from {source}")

        # load any defined modules
        if "modules" in package_definition:
            for module_spec in package_definition["modules"]:
                module_name, class_name = module_spec.rsplit(".", 1)
                _module = importlib.import_module(module_name)
                _package.modules.append(getattr(_module, class_name))

        # load any defined services
        if "services" in package_definition:
            for service_spec in package_definition["services"]:
                module_name, class_name = service_spec.rsplit(".", 1)
                _module = importlib.import_module(module_name)
                _type = getattr(_module, class_name)
                # sanity check type definition
                if not issubclass(_type, ACEService):
                    raise RuntimeError(
                        f"service definition {service_spec} in {source} does not extend ACEService"
                    )

                _package.services.append(_type)

        return _package
示例#2
0
    async def delete_analysis_module_type(
            self, amt: Union[AnalysisModuleType, str]) -> bool:
        """Deletes (unregisters) the given AnalysisModuleType from the system.
        Any outstanding requests for this type are discarded.
        Returns True if the analysis module type was deleted, False otherwise.
        If the type does not exist then False is returned."""

        if isinstance(amt, str):
            amt = await self.get_analysis_module_type(amt)

        if not await self.get_analysis_module_type(amt.name):
            return False

        get_logger().info(f"deleting analysis module type {amt}")

        # remove the work queue for the module
        await self.delete_work_queue(amt.name)
        # remove the module
        await self.i_delete_analysis_module_type(amt)
        # remove any outstanding requests from tracking
        await self.clear_tracking_by_analysis_module_type(amt)
        # remove any cached analysis results for this type
        await self.delete_cached_analysis_results_by_module_type(amt)

        await self.fire_event(EVENT_AMT_DELETED, amt)
        return True
示例#3
0
    async def i_load_file(self, sha256: str, path: str) -> Union[ContentMetadata, None]:
        meta = await self.get_content_meta(sha256)
        if meta is None:
            raise UnknownFileError()

        # if storage encryption is NOT enabled then we have an option to "copy" the data super fast
        # on systems that support hard links
        if not await self.storage_encryption_enabled():
            try:
                # fastest way to "copy" data is to just create a new link to it
                src_path = os.path.join(self.storage_root, meta.location)
                await asyncio.get_running_loop().run_in_executor(None, os.link, src_path, path)
                get_logger().debug(f"hard linked {src_path} to {path}")
                return meta
            except IOError:
                pass

        # NOTE in theory it makes sense fall back to symlinks but there are two problems with that
        # 1) you're referencing the actual file
        # 2) external tooling and analysis may not work or get invalid results if the file is a symlink

        # if that didn't work then we just do a byte-for-byte copy as normal
        # this also works if the data is encrypted
        async with aiofiles.open(path, "wb") as fp:
            async for chunk in await self.iter_content(sha256):
                await fp.write(chunk)

        return meta
示例#4
0
    def kill_executor(self):
        if self.concurrency_mode != CONCURRENCY_MODE_PROCESS:
            return

        for process in psutil.Process(os.getpid()).children():
            get_logger().warning(f"sending TERM to {process}")
            process.send_signal(signal.SIGTERM)
示例#5
0
 async def clear_tracking_by_analysis_module_type(self,
                                                  amt: AnalysisModuleType):
     """Deletes tracking for any requests assigned to the given analysis module type."""
     get_logger().debug(
         f"clearing analysis request tracking for analysis module type {amt}"
     )
     return await self.i_clear_tracking_by_analysis_module_type(amt)
示例#6
0
    async def track_analysis_details(self, root: RootAnalysis, uuid: str,
                                     value: Any) -> bool:
        assert isinstance(root, RootAnalysis)
        assert isinstance(uuid, str)

        # we don't save Analysis that doesn't have the details set
        if value is None:
            return False

        get_logger().debug(f"tracking {root} analysis details {uuid}")
        exists = await self.analysis_details_exists(root.uuid)

        # the thing to be tracked must be able to serialize into json
        json_value = json.dumps(value, sort_keys=True)

        if await self.analysis_encryption_enabled():
            encoded_value = await encrypt_chunk(
                self.encryption_settings.aes_key, json_value.encode())
        else:
            encoded_value = json_value.encode()

        await self.i_track_analysis_details(root.uuid, uuid, encoded_value)

        if not exists:
            await self.fire_event(EVENT_ANALYSIS_DETAILS_NEW,
                                  [root, root.uuid])
        else:
            await self.fire_event(EVENT_ANALYSIS_DETAILS_MODIFIED,
                                  [root, root.uuid])

        return True
示例#7
0
    def process_exception(
            self,
            module: AnalysisModule,
            request: AnalysisRequest,
            e: Exception,
            error_message: Optional[str] = None) -> AnalysisRequest:
        assert isinstance(module, AnalysisModule)
        assert isinstance(request, AnalysisRequest)
        assert isinstance(e, Exception)

        # use existing analysis if it already exists
        analysis = request.modified_observable.get_analysis(module.type)
        if analysis is None:
            analysis = request.modified_observable.add_analysis(
                Analysis(type=module.type))

        # set the error message and stack trace details
        if not error_message:
            analysis.error_message = f"{type(e).__name__}: {e}"
        else:
            analysis.error_message = error_message

        analysis.stack_trace = format_error_report(e)
        get_logger().error(error_message)
        return request
示例#8
0
 async def close_redis_connections(self):
     pool_key = _pool_key()
     get_logger().info(f"closing connection pool to redis ({pool_key})")
     if pool_key in self.pools:
         self.pools[pool_key].close()
         await self.pools[pool_key].wait_closed()
         del self.pools[pool_key]
示例#9
0
    def execute_analysis(self, module_type: str, request_json: str) -> str:
        """Processes the request with the analysis module."""
        amt = AnalysisModuleType.from_json(module_type)
        module = self.module_map[amt.name]

        # run_until_complete just keeps going until it returns
        # there's no way to "cancel" it if something gets stuck
        # the only way out is to kill the process
        # so we start a thread to monitor the timeout
        def _module_timeout():
            get_logger().critical(
                f"analysis module {module} timed out analyzing request {request_json}"
            )
            if self.concurrency_mode == CONCURRENCY_MODE_PROCESS:
                # and then die if we hit it
                # NOTE that if we're only running threads then there's really no way out other than to log it
                sys.exit(1)

        get_logger().debug(f"starting timer for {module.timeout} seconds")
        t = threading.Timer(module.timeout, _module_timeout)
        t.start()

        try:
            result = self.event_loop.run_until_complete(
                self.execute_analysis_async(module_type, request_json))
        finally:
            # if we didn't time out make sure we cancel the timer
            t.cancel()

        return result
示例#10
0
    async def _get_redis_connection(self):
        # if the pid or tid change then we create a new pool
        pool_key = _pool_key()
        if pool_key not in self.pools:
            host = await self.get_config_value(CONFIG_REDIS_HOST,
                                               env=ACE_REDIS_HOST)
            port = await self.get_config_value(CONFIG_REDIS_PORT,
                                               env=ACE_REDIS_PORT,
                                               env_type=int)
            db = await self.get_config_value(CONFIG_REDIS_DB, default=0)
            pool_size = await self.get_config_value(CONFIG_REDIS_DB,
                                                    default=100)

            if host and port:
                connection_info = (host, port)
            else:
                connection_info = host

            if not connection_info:
                raise ValueError("missing redis connection settings")

            get_logger().info(
                f"connecting to redis {connection_info} ({pool_key})")
            self.pools[pool_key] = await aioredis.create_redis_pool(
                connection_info)
            get_logger().debug(
                f"connected to redis {connection_info} ({pool_key})")

        return self.pools[pool_key]
示例#11
0
    async def create_database(self):
        from ace.system.database.schema import Base

        get_logger().info(f"creating database {self.db_url}")
        Base.metadata.bind = self.engine
        async with self.engine.begin() as conn:
            await conn.run_sync(Base.metadata.create_all)
示例#12
0
 async def i_track_content_root(self, sha256: str, uuid: str):
     try:
         async with self.get_db() as db:
             await db.merge(StorageRootTracking(sha256=sha256, root_uuid=uuid))
             await db.commit()
     except IntegrityError as e:
         get_logger().warning("unable to track roots for {uuid}: {e}")
示例#13
0
 async def i_fire_event(self, event: Event):
     try:
         async with self.get_redis_connection() as rc:
             await rc.publish(REDIS_CHANNEL_EVENTS,
                              event.json(encoder=custom_json_encoder))
     except Exception as e:
         get_logger().error(f"unable to submit event {event} to redis: {e}")
示例#14
0
    async def delete_content(self, sha256: str) -> bool:
        get_logger().debug(f"deleting content {sha256}")
        result = await self.i_delete_content(sha256)
        if result:
            await self.fire_event(EVENT_STORAGE_DELETED, sha256)

        return result
示例#15
0
 def _module_timeout():
     get_logger().critical(
         f"analysis module {module} timed out analyzing request {request_json}"
     )
     if self.concurrency_mode == CONCURRENCY_MODE_PROCESS:
         # and then die if we hit it
         # NOTE that if we're only running threads then there's really no way out other than to log it
         sys.exit(1)
示例#16
0
    async def event_reader(self, channel):
        while await channel.wait_message():
            message = await channel.get()
            if message:
                await self.redis_message_handler(message)

        get_logger().debug("event reader stopped")
        self.event_reader_stopped_event.set()
示例#17
0
    async def delete_analysis_details(self, uuid: str) -> bool:
        assert isinstance(uuid, str)

        get_logger().debug(f"deleting analysis detials {uuid}")
        result = await self.i_delete_analysis_details(uuid)
        if result:
            await self.fire_event(EVENT_ANALYSIS_DETAILS_DELETED, uuid)

        return result
示例#18
0
 async def initialize_event_reader(self):
     """Starts the event reader loop if it isn't already running."""
     if self.event_reader_connection is None:
         get_logger().debug("starting event reader loop")
         self.event_reader_stopped_event = asyncio.Event()
         self.event_reader_connection = await self._get_redis_connection()
         (channel, ) = await self.event_reader_connection.subscribe(
             REDIS_CHANNEL_EVENTS)
         asyncio.get_running_loop().create_task(self.event_reader(channel))
示例#19
0
    async def delete_config(self, key: str) -> bool:
        """Deletes the configuration setting. Returns True if the setting was deleted."""
        assert isinstance(key, str) and key

        get_logger().debug(f"deleted config key {key}")
        result = await self.i_delete_config(key)
        if result:
            await self.fire_event(EVENT_CONFIG_DELETE, key)

        return result
示例#20
0
    async def track_content_root(self, sha256: str, root: Union[RootAnalysis,
                                                                str]):
        assert isinstance(sha256, str)
        assert isinstance(root, RootAnalysis) or isinstance(root, str)

        if isinstance(root, RootAnalysis):
            root = root.uuid

        get_logger().debug(f"tracking content {sha256} to root {root}")
        await self.i_track_content_root(sha256, root)
示例#21
0
    async def i_delete_content(self, sha256: str) -> bool:
        file_path = await self.get_file_path(sha256)
        try:
            if await asyncio.get_running_loop().run_in_executor(os.path.exists, file_path):
                await asyncio.get_running_loop().run_in_executor(os.remove, file_path)
        except Exception as e:
            get_logger().exception(f"unable to delete {file_path}")

        if not await DatabaseStorageInterface.i_delete_content(self, sha256):
            return False

        return True
示例#22
0
    async def add_work_queue(self, amt: Union[AnalysisModuleType,
                                              str]) -> bool:
        assert isinstance(amt, AnalysisModuleType) or isinstance(amt, str)
        if isinstance(amt, AnalysisModuleType):
            amt = amt.name

        get_logger().debug(f"adding work queue for {amt}")
        result = await self.i_add_work_queue(amt)
        if result:
            await self.fire_event(EVENT_WORK_QUEUE_NEW, amt)

        return result
示例#23
0
    async def delete_analysis_request(
            self, target: Union[AnalysisRequest, str]) -> bool:
        assert isinstance(target, AnalysisRequest) or isinstance(target, str)
        if isinstance(target, AnalysisRequest):
            target = target.id

        get_logger().debug(f"deleting analysis request {target}")
        result = await self.i_delete_analysis_request(target)
        if result:
            await self.fire_event(EVENT_AR_DELETED, target)

        return result
示例#24
0
 async def link_analysis_requests(self, source_request: AnalysisRequest,
                                  dest_request: AnalysisRequest) -> bool:
     """Links the source to the dest such that when the dest has completed,
     failed or expired, the source is then processed again."""
     assert isinstance(source_request, AnalysisRequest)
     assert isinstance(dest_request, AnalysisRequest)
     assert source_request != dest_request
     get_logger().debug(
         f"linking analysis request source {source_request} to dest {dest_request}"
     )
     return await self.i_link_analysis_requests(source_request,
                                                dest_request)
示例#25
0
    async def track_analysis_request(self, request: AnalysisRequest):
        """Begins tracking the given AnalysisRequest."""
        assert isinstance(request, AnalysisRequest)

        if request.type and await self.get_analysis_module_type(
                request.type.name) is None:
            raise UnknownAnalysisModuleTypeError()

        get_logger().debug(f"tracking analysis request {request}")
        result = await self.i_track_analysis_request(request)
        await self.fire_event(EVENT_AR_NEW, request)
        return result
示例#26
0
    async def put_work(self, amt: Union[AnalysisModuleType, str],
                       analysis_request: AnalysisRequest):
        assert isinstance(amt, AnalysisModuleType) or isinstance(amt, str)
        assert isinstance(analysis_request, AnalysisRequest)

        if isinstance(amt, AnalysisModuleType):
            amt = amt.name

        get_logger().debug(
            f"adding request {analysis_request} to work queue for {amt}")
        result = await self.i_put_work(amt, analysis_request)
        await self.fire_event(EVENT_WORK_ADD, [amt, analysis_request])
        return result
示例#27
0
    async def delete_root_analysis(self, root: Union[RootAnalysis,
                                                     str]) -> bool:
        assert isinstance(root, RootAnalysis) or isinstance(root, str)

        if isinstance(root, RootAnalysis):
            root = root.uuid

        get_logger().debug(f"deleting root {root}")
        result = await self.i_delete_root_analysis(root)
        if result:
            await self.fire_event(EVENT_ANALYSIS_ROOT_DELETED, root)

        return result
示例#28
0
    async def set_config(self, key: str, value: Any, documentation: Optional[str] = None):
        """Sets the configuration setting. This function updates the setting if it already exists, or creates a new one if
        it did not."""
        assert isinstance(key, str) and key
        assert documentation is None or isinstance(documentation, str) and documentation

        if value is None and documentation is None:
            raise ValueError("cannot set configuration value to None")

        get_logger().debug(f"modified config key {key} value {value}")
        result = await self.i_set_config(key, value, documentation)
        await self.fire_event(EVENT_CONFIG_SET, [key, value, documentation])
        return result
示例#29
0
    async def submit_alert(self, root: Union[RootAnalysis, str]) -> bool:
        """Submits the given RootAnalysis uuid as an alert to any registered alert systems.
        Returns True if at least one system is registered, False otherwise."""
        assert isinstance(root, str) or isinstance(root, RootAnalysis)
        if isinstance(root, RootAnalysis):
            root = root.uuid

        get_logger().info(f"submitting alert {root}")
        result = await self.i_submit_alert(root)
        if result:
            await self.fire_event(EVENT_ALERT, root)

        return result
示例#30
0
    def set_service_info(self,
                         name: str,
                         status: str,
                         pid: Optional[int] = None):
        with sqlite3.connect(self.db_path) as db:
            c = db.cursor()
            c.execute(
                "INSERT OR REPLACE INTO services(name, status, pid) VALUES ( ?, ?, ? )",
                (name, status, pid))
            db.commit()

            get_logger().info(
                f"service {name} changed status to {status} on pid {pid}")