예제 #1
0
    def __init__(self,
                 datastore=None,
                 redis=None,
                 redis_persist=None,
                 logger=None):
        super().__init__('assemblyline.dispatcher.file', logger)

        config: Config = forge.get_config()
        datastore: AssemblylineDatastore = datastore or forge.get_datastore(
            config)
        self.dispatcher = Dispatcher(redis=redis,
                                     redis_persist=redis_persist,
                                     datastore=datastore,
                                     logger=self.log)

        if config.core.metrics.apm_server.server_url is not None:
            self.log.info(
                f"Exporting application metrics to: {config.core.metrics.apm_server.server_url}"
            )
            elasticapm.instrument()
            self.apm_client = elasticapm.Client(
                server_url=config.core.metrics.apm_server.server_url,
                service_name="dispatcher")
        else:
            self.apm_client = None
    def __init__(self,
                 logger=None,
                 datastore=None,
                 redis=None,
                 persistent_redis=None):
        super().__init__('assemblyline.ingester.internals', logger=logger)
        config = forge.get_config()
        # Connect to all sorts of things
        datastore = datastore or forge.get_datastore(config)
        classification_engine = forge.get_classification()

        # Initialize the ingester specific resources
        self.ingester = Ingester(datastore=datastore,
                                 classification=classification_engine,
                                 logger=self.log,
                                 redis=redis,
                                 persistent_redis=persistent_redis)

        if config.core.metrics.apm_server.server_url is not None:
            self.log.info(
                f"Exporting application metrics to: {config.core.metrics.apm_server.server_url}"
            )
            elasticapm.instrument()
            self.apm_client = elasticapm.Client(
                server_url=config.core.metrics.apm_server.server_url,
                service_name="ingester")
        else:
            self.apm_client = None
def redis():
    config = forge.get_config()
    client = get_client(config.core.metrics.redis.host,
                        config.core.metrics.redis.port, False)
    client.flushdb()
    yield client
    client.flushdb()
예제 #4
0
    def __init__(self,
                 name,
                 host=None,
                 export_interval_secs=None,
                 counter_type=None,
                 config=None,
                 redis=None,
                 counter_names=None,
                 timer_names=None,
                 export_zero=True):
        config = config or forge.get_config()
        self.channel = forge.get_metrics_sink(redis)
        self.export_interval = export_interval_secs or config.core.metrics.export_interval
        self.name = name
        self.host = host or get_random_id()
        self.type = counter_type or name
        self.export_zero = export_zero

        self.counter_schema = set(counter_names)
        self.timer_schema = set(timer_names)

        self.counts = Counters({key: 0 for key in self.counter_schema})
        self.values = {}
        self.lock = threading.Lock()
        self.scheduler = None
        self.reset()

        assert self.channel
        assert (self.export_interval > 0)
    def __init__(self, config=None):
        super().__init__('assemblyline.heartbeat_manager')
        self.config = config or forge.get_config()
        self.datastore = forge.get_datastore()
        self.metrics_queue = CommsQueue(METRICS_QUEUE)
        self.scheduler = BackgroundScheduler(daemon=True)
        self.hm = HeartbeatFormatter("heartbeat_manager",
                                     self.log,
                                     config=self.config)

        self.counters_lock = Lock()
        self.counters = {}
        self.rolling_window = {}
        self.window_ttl = {}
        self.ttl = self.config.core.metrics.export_interval * 2
        self.window_size = int(60 / self.config.core.metrics.export_interval)
        if self.window_size != 60 / self.config.core.metrics.export_interval:
            self.log.warning(
                "Cannot calculate a proper window size for reporting heartbeats. "
                "Metrics reported during hearbeat will be wrong.")

        if self.config.core.metrics.apm_server.server_url is not None:
            self.log.info(
                f"Exporting application metrics to: {self.config.core.metrics.apm_server.server_url}"
            )
            elasticapm.instrument()
            self.apm_client = elasticapm.Client(
                server_url=self.config.core.metrics.apm_server.server_url,
                service_name="heartbeat_manager")
        else:
            self.apm_client = None
예제 #6
0
    def __init__(self,
                 datastore=None,
                 redis=None,
                 redis_persist=None,
                 logger=None):
        self.config = forge.get_config()

        self.redis = redis or get_client(
            host=self.config.core.redis.nonpersistent.host,
            port=self.config.core.redis.nonpersistent.port,
            private=False,
        )

        redis_persist = redis_persist or get_client(
            host=self.config.core.redis.persistent.host,
            port=self.config.core.redis.persistent.port,
            private=False,
        )

        self.timeout_watcher = WatcherClient(redis_persist)

        self.submission_queue = NamedQueue(SUBMISSION_QUEUE, self.redis)
        self.file_queue = NamedQueue(FILE_QUEUE, self.redis)
        self.ds = datastore or forge.get_datastore(self.config)
        self.log = logger or logging.getLogger(
            "assemblyline.dispatching.client")
        self.results = datastore.result
        self.errors = datastore.error
        self.files = datastore.file
        self.active_submissions = ExpiringHash(DISPATCH_TASK_HASH,
                                               host=redis_persist)
        self.running_tasks = ExpiringHash(DISPATCH_RUNNING_TASK_HASH,
                                          host=self.redis)
        self.service_data = cast(Dict[str, Service],
                                 CachedObject(self._get_services))
    def __init__(self, config=None):
        super().__init__('assemblyline.es_metrics', shutdown_timeout=15)
        self.config = config or forge.get_config()
        self.target_hosts = self.config.core.metrics.elasticsearch.hosts

        self.index_interval = 10.0
        self.old_node_data = {}
        self.old_cluster_data = {}
        self.old_index_data = {}
        self.old_index_time = 0.0
        self.old_node_time = 0.0
        self.old_cluster_time = 0.0

        if not self.target_hosts:
            self.log.error(
                "No elasticsearch cluster defined to store metrics. All gathered stats will be ignored..."
            )
            sys.exit(1)

        self.input_es = None
        self.target_es = None
        self.is_datastream = False

        if self.config.core.metrics.apm_server.server_url is not None:
            self.log.info(
                f"Exporting application metrics to: {self.config.core.metrics.apm_server.server_url}"
            )
            elasticapm.instrument()
            self.apm_client = elasticapm.Client(
                server_url=self.config.core.metrics.apm_server.server_url,
                service_name="es_metrics")
        else:
            self.apm_client = None
    def __init__(self, config=None):
        super().__init__('assemblyline.metrics_aggregator',
                         shutdown_timeout=65)
        self.config = config or forge.get_config()
        self.elastic_hosts = self.config.core.metrics.elasticsearch.hosts
        self.is_datastream = False

        if not self.elastic_hosts:
            self.log.error(
                "No elasticsearch cluster defined to store metrics. All gathered stats will be ignored..."
            )
            sys.exit(1)

        self.scheduler = BackgroundScheduler(daemon=True)
        self.metrics_queue = None
        self.es = None
        self.counters_lock = Lock()
        self.counters = {}

        if self.config.core.metrics.apm_server.server_url is not None:
            self.log.info(
                f"Exporting application metrics to: {self.config.core.metrics.apm_server.server_url}"
            )
            elasticapm.instrument()
            self.apm_client = elasticapm.Client(
                server_url=self.config.core.metrics.apm_server.server_url,
                service_name="metrics_aggregator")
        else:
            self.apm_client = None
def config():
    config = forge.get_config()
    config.logging.log_level = 'INFO'
    config.logging.log_as_json = False
    config.core.metrics.apm_server.server_url = None
    config.core.metrics.export_interval = 1
    config.datastore.ilm.enabled = True
    return config
예제 #10
0
 def __init__(self, show_prompt=True, logger_class=PrintLogger):
     cmd.Cmd.__init__(self)
     self.logger = logger_class()
     self.prompt = ""
     self.intro = ""
     self.datastore = forge.get_datastore(archive_access=True)
     self.config = forge.get_config()
     if show_prompt:
         self._update_context()
예제 #11
0
def get_hostip() -> str:
    ip = None
    try:
        from assemblyline.common import forge
        config = forge.get_config()
        ip = get_route_to(config.datastore.hosts[0])
    except Exception:
        pass

    return ip or get_default_gateway_ip()
예제 #12
0
    def __init__(self, log, alert_fqs=None, submission_fqs=None, lookback_time='*'):
        # Setup datastore
        config = forge.get_config()
        redis = get_client(config.core.redis.nonpersistent.host, config.core.redis.nonpersistent.port, False)

        self.datastore = forge.get_datastore(config=config)
        self.alert_queue = NamedQueue("replay_alert", host=redis)
        self.file_queue = NamedQueue("replay_file", host=redis)
        self.submission_queue = NamedQueue("replay_submission", host=redis)

        super().__init__(log, alert_fqs=alert_fqs, submission_fqs=submission_fqs, lookback_time=lookback_time)
    def __init__(self, sender, log, config=None, redis=None):
        self.sender = sender
        self.log = log

        self.config = config or forge.get_config()
        self.datastore = forge.get_datastore(self.config)

        self.redis = redis or get_client(
            host=self.config.core.redis.nonpersistent.host,
            port=self.config.core.redis.nonpersistent.port,
            private=False,
        )
        self.redis_persist = get_client(
            host=self.config.core.redis.persistent.host,
            port=self.config.core.redis.persistent.port,
            private=False,
        )
        self.status_queue = CommsQueue(STATUS_QUEUE, self.redis)
        self.dispatch_active_hash = Hash(DISPATCH_TASK_HASH,
                                         self.redis_persist)
        self.dispatcher_submission_queue = NamedQueue(SUBMISSION_QUEUE,
                                                      self.redis)
        self.ingest_scanning = Hash('m-scanning-table', self.redis_persist)
        self.ingest_unique_queue = PriorityQueue('m-unique',
                                                 self.redis_persist)
        self.ingest_queue = NamedQueue(INGEST_QUEUE_NAME, self.redis_persist)
        self.ingest_complete_queue = NamedQueue(COMPLETE_QUEUE_NAME,
                                                self.redis)
        self.alert_queue = NamedQueue(ALERT_QUEUE_NAME, self.redis_persist)

        constants = forge.get_constants(self.config)
        self.c_rng = constants.PRIORITY_RANGES['critical']
        self.h_rng = constants.PRIORITY_RANGES['high']
        self.m_rng = constants.PRIORITY_RANGES['medium']
        self.l_rng = constants.PRIORITY_RANGES['low']
        self.c_s_at = self.config.core.ingester.sampling_at['critical']
        self.h_s_at = self.config.core.ingester.sampling_at['high']
        self.m_s_at = self.config.core.ingester.sampling_at['medium']
        self.l_s_at = self.config.core.ingester.sampling_at['low']

        self.to_expire = {k: 0 for k in metrics.EXPIRY_METRICS}
        if self.config.core.expiry.batch_delete:
            self.delete_query = f"expiry_ts:[* TO {self.datastore.ds.now}-{self.config.core.expiry.delay}" \
                f"{self.datastore.ds.hour}/DAY]"
        else:
            self.delete_query = f"expiry_ts:[* TO {self.datastore.ds.now}-{self.config.core.expiry.delay}" \
                f"{self.datastore.ds.hour}]"

        self.scheduler = BackgroundScheduler(daemon=True)
        self.scheduler.add_job(
            self._reload_expiry_queues,
            'interval',
            seconds=self.config.core.metrics.export_interval * 4)
        self.scheduler.start()
예제 #14
0
    def __init__(self, redis_persist=None):
        config = forge.get_config()

        self.redis = redis_persist or get_client(
            host=config.core.redis.persistent.host,
            port=config.core.redis.persistent.port,
            private=False,
        )
        self.hash = ExpiringHash(name=WATCHER_HASH,
                                 ttl=MAX_TIMEOUT,
                                 host=redis_persist)
        self.queue = UniquePriorityQueue(WATCHER_QUEUE, redis_persist)
    def __init__(self, component: str, config=None, datastore=None):
        if not component:
            raise ValueError("Cannot instantiate a cachestore without providing a component name.")

        if not COMPONENT_VALIDATOR.match(component):
            raise ValueError("Invalid component name. (Only letters, numbers, underscores and dots allowed)")

        if config is None:
            config = forge.get_config()

        self.component = component
        self.datastore = datastore or forge.get_datastore(config=config)
        self.filestore = FileStore(*config.filestore.cache)
예제 #16
0
def export_metrics_once(name,
                        schema,
                        metrics,
                        host=None,
                        counter_type=None,
                        config=None,
                        redis=None):
    """Manually publish metric counts to the metrics system.

    This was built for when the service server is reporting metrics for execution and caching
    on behalf of many services. At the moment the metrics system uses the hosts to count the number
    of instances of each service. This could be done with a single auto exporting counter for
    the service server, but that may require significant downstream changes in the metrics system.
    """
    config = config or forge.get_config()
    redis = redis or get_client(config.core.metrics.redis.host,
                                config.core.metrics.redis.port, False)

    # Separate out the timers and normal counters
    timer_schema = set()
    counter_schema = set()

    for _k, field_type in schema.fields().items():
        if isinstance(field_type, PerformanceTimer):
            timer_schema.add(_k)
        else:
            counter_schema.add(_k)

    for _k in timer_schema:
        counter_schema.discard(_k + '_count')

    channel = forge.get_metrics_sink(redis)

    counts = Counters({key: 0 for key in counter_schema})
    counts.update({key + '.t': 0 for key in timer_schema})
    counts.update({key + '.c': 0 for key in timer_schema})

    for metric, value in metrics.items():
        if metric in counter_schema:
            counts[metric] += value
        elif metric in timer_schema:
            counts[metric + ".c"] += 1
            counts[metric + ".t"] += value
        else:
            raise ValueError(f"{metric} is not an accepted counter")

    counts['type'] = counter_type or name
    counts['name'] = name
    counts['host'] = host

    channel.publish(dict(counts.items()))
    def __init__(self, config=None):
        super().__init__('assemblyline.statistics_aggregator')
        self.config = config or forge.get_config()
        self.cache = forge.get_statistics_cache(config=self.config)
        self.datastore = forge.get_datastore(archive_access=True)
        self.scheduler = BackgroundScheduler(daemon=True)

        if self.config.core.metrics.apm_server.server_url is not None:
            self.log.info(f"Exporting application metrics to: {self.config.core.metrics.apm_server.server_url}")
            elasticapm.instrument()
            self.apm_client = elasticapm.Client(server_url=self.config.core.metrics.apm_server.server_url,
                                                service_name="metrics_aggregator")
        else:
            self.apm_client = None
예제 #18
0
def get_client(host, port, private):
    # In case a structure is passed a client as host
    if isinstance(host, (redis.Redis, redis.StrictRedis)):
        return host

    if not host or not port:
        config = forge.get_config(static=True)

        host = host or config.core.redis.nonpersistent.host
        port = int(port or config.core.redis.nonpersistent.port)

    if private:
        return redis.StrictRedis(host=host, port=port)
    else:
        return redis.StrictRedis(connection_pool=get_pool(host, port))
    def __init__(self, force_ilm=False):
        self.config = forge.get_config()
        if force_ilm:
            self.config.datastore.ilm.enabled = True

        super().__init__('assemblyline.expiry',
                         shutdown_timeout=self.config.core.expiry.sleep_time +
                         5)
        self.datastore = forge.get_datastore(config=self.config,
                                             archive_access=True)
        self.hot_datastore = forge.get_datastore(config=self.config,
                                                 archive_access=False)
        self.filestore = forge.get_filestore(config=self.config)
        self.cachestore = FileStore(*self.config.filestore.cache)
        self.expirable_collections = []
        self.archiveable_collections = []
        self.counter = MetricsFactory('expiry', Metrics)
        self.counter_archive = MetricsFactory('archive', Metrics)

        if self.config.datastore.ilm.enabled:
            self.fs_hashmap = {
                'file': self.archive_filestore_delete,
                'cached_file': self.archive_cachestore_delete
            }
        else:
            self.fs_hashmap = {
                'file': self.filestore_delete,
                'cached_file': self.cachestore_delete
            }

        for name, definition in self.datastore.ds.get_models().items():
            if hasattr(definition, 'archive_ts'):
                self.archiveable_collections.append(
                    getattr(self.datastore, name))
            if hasattr(definition, 'expiry_ts'):
                self.expirable_collections.append(getattr(
                    self.datastore, name))

        if self.config.core.metrics.apm_server.server_url is not None:
            self.log.info(
                f"Exporting application metrics to: {self.config.core.metrics.apm_server.server_url}"
            )
            elasticapm.instrument()
            self.apm_client = elasticapm.Client(
                server_url=self.config.core.metrics.apm_server.server_url,
                service_name="expiry")
        else:
            self.apm_client = None
예제 #20
0
    def __init__(self, hosts, collection_class=ESCollection, archive_access=True):
        config = forge.get_config()
        if config.datastore.ilm.enabled:
            ilm_config = config.datastore.ilm.indexes.as_primitives()
        else:
            ilm_config = {}

        super(ESStore, self).__init__(hosts, collection_class, ilm_config=ilm_config)
        tracer = logging.getLogger('elasticsearch')
        tracer.setLevel(logging.CRITICAL)

        self.client = elasticsearch.Elasticsearch(hosts=hosts,
                                                  connection_class=elasticsearch.RequestsHttpConnection,
                                                  max_retries=0)
        self.archive_access = archive_access
        self.url_path = 'elastic'
예제 #21
0
    def __init__(self):
        super().__init__('assemblyline.workflow')

        self.config = forge.get_config()
        self.datastore = forge.get_datastore(self.config)
        self.start_ts = f"{self.datastore.ds.now}/{self.datastore.ds.day}-1{self.datastore.ds.day}"

        if self.config.core.metrics.apm_server.server_url is not None:
            self.log.info(
                f"Exporting application metrics to: {self.config.core.metrics.apm_server.server_url}"
            )
            elasticapm.instrument()
            self.apm_client = elasticapm.Client(
                server_url=self.config.core.metrics.apm_server.server_url,
                service_name="workflow")
        else:
            self.apm_client = None
예제 #22
0
    def __init__(self,
                 component_name: str,
                 logger: logging.Logger = None,
                 shutdown_timeout: float = SHUTDOWN_SECONDS_LIMIT,
                 config=None):
        super().__init__(name=component_name)
        al_log.init_logging(component_name)
        self.config = config or forge.get_config()

        self.running = None
        self.log = logger or logging.getLogger(component_name)
        self._exception = None
        self._traceback = None
        self._shutdown_timeout = shutdown_timeout if shutdown_timeout is not None else SHUTDOWN_SECONDS_LIMIT
        self._old_sigint = None
        self._old_sigterm = None
        self._stopped = False
        self._last_heartbeat = 0
예제 #23
0
    def __init__(self, datastore=None, filestore=None):
        super().__init__('assemblyline.randomservice')
        self.config = forge.get_config()
        self.datastore = datastore or forge.get_datastore()
        self.filestore = filestore or forge.get_filestore()
        self.client_id = get_random_id()
        self.service_state_hash = ExpiringHash(SERVICE_STATE_HASH, ttl=30 * 60)

        self.counters = {
            n: MetricsFactory('service', Metrics, name=n, config=self.config)
            for n in self.datastore.service_delta.keys()
        }
        self.queues = [
            forge.get_service_queue(name)
            for name in self.datastore.service_delta.keys()
        ]
        self.dispatch_client = DispatchClient(self.datastore)
        self.service_info = CachedObject(self.datastore.list_all_services,
                                         kwargs={'as_obj': False})
예제 #24
0
    def __init__(self,
                 use_cache=True,
                 config=None,
                 datastore=None,
                 log=None) -> None:
        self.log = log or logging.getLogger('assemblyline.identify')
        self.config = None
        self.datastore = None
        self.use_cache = use_cache
        self.custom = re.compile(r"^custom: ", re.IGNORECASE)
        self.lock = threading.Lock()
        self.yara_default_externals = {'mime': '', 'magic': '', 'type': ''}

        # If cache is use, load the config and datastore objects to load potential items from cache
        if self.use_cache:
            self.log.info("Using cache with identify")
            self.config = config or get_config()
            self.datastore = datastore or get_datastore(config)

        # Load all data for the first time
        self._load_magic_file()
        self._load_yara_file()
        self._load_magic_patterns()
        self._load_trusted_mimes()

        # Register hot reloader
        if self.use_cache:
            self.reload_map = {
                'magic': self._load_magic_file,
                'mimes': self._load_trusted_mimes,
                'patterns': self._load_magic_patterns,
                'yara': self._load_yara_file
            }
            self.reload_watcher = EventWatcher()
            self.reload_watcher.register('system.identify',
                                         self._handle_reload_event)
            self.reload_watcher.start()
        else:
            self.reload_watcher = None
            self.reload_map = {}
예제 #25
0
    def __init__(self, datastore, redis, redis_persist, logger, counter_name='dispatcher'):
        # Load the datastore collections that we are going to be using
        self.datastore: AssemblylineDatastore = datastore
        self.log: logging.Logger = logger
        self.submissions: Collection = datastore.submission
        self.results: Collection = datastore.result
        self.errors: Collection = datastore.error
        self.files: Collection = datastore.file

        # Create a config cache that will refresh config values periodically
        self.config: Config = forge.get_config()

        # Connect to all of our persistent redis structures
        self.redis = redis or get_client(
            host=self.config.core.redis.nonpersistent.host,
            port=self.config.core.redis.nonpersistent.port,
            private=False,
        )
        self.redis_persist = redis_persist or get_client(
            host=self.config.core.redis.persistent.host,
            port=self.config.core.redis.persistent.port,
            private=False,
        )

        # Build some utility classes
        self.scheduler = Scheduler(datastore, self.config, self.redis)
        self.classification_engine = forge.get_classification()
        self.timeout_watcher = WatcherClient(self.redis_persist)

        self.submission_queue = NamedQueue(SUBMISSION_QUEUE, self.redis)
        self.file_queue = NamedQueue(FILE_QUEUE, self.redis)
        self._nonper_other_queues = {}
        self.active_submissions = ExpiringHash(DISPATCH_TASK_HASH, host=self.redis_persist)
        self.running_tasks = ExpiringHash(DISPATCH_RUNNING_TASK_HASH, host=self.redis)

        # Publish counters to the metrics sink.
        self.counter = MetricsFactory(metrics_type='dispatcher', schema=Metrics, name=counter_name,
                                      redis=self.redis, config=self.config)
예제 #26
0
    def __init__(self,
                 metrics_type,
                 schema,
                 name=None,
                 redis=None,
                 config=None,
                 export_zero=True):
        self.config = config or forge.get_config()
        self.redis = redis or get_client(self.config.core.metrics.redis.host,
                                         self.config.core.metrics.redis.port,
                                         False)

        # Separate out the timers and normal counters
        timer_schema = set()
        counter_schema = set()

        for _k, field_type in schema.fields().items():
            if isinstance(field_type, PerformanceTimer):
                timer_schema.add(_k)
            else:
                counter_schema.add(_k)

        for _k in timer_schema:
            counter_schema.discard(_k + '_count')

        self.type = metrics_type
        self.name = name or metrics_type

        # Initialize legacy metrics
        self.metrics_handler = AutoExportingCounters(
            self.name,
            redis=self.redis,
            config=self.config,
            counter_type=metrics_type,
            timer_names=timer_schema,
            counter_names=counter_schema,
            export_zero=export_zero)
        self.metrics_handler.start()
예제 #27
0
    def __init__(self, hosts, collection_class=ESCollection, archive_access=True):
        config = forge.get_config()
        if config.datastore.ilm.enabled:
            ilm_config = config.datastore.ilm.indexes.as_primitives()
        else:
            ilm_config = {}

        self._hosts = hosts
        self._closed = False
        self._collections = {}
        self._models = {}
        self.ilm_config = ilm_config
        self.validate = True

        tracer = logging.getLogger('elasticsearch')
        tracer.setLevel(logging.CRITICAL)

        self.client = elasticsearch.Elasticsearch(hosts=hosts,
                                                  connection_class=elasticsearch.RequestsHttpConnection,
                                                  max_retries=0,
                                                  timeout=TRANSPORT_TIMEOUT)
        self.archive_access = archive_access
        self.url_path = 'elastic'
예제 #28
0
def init_logging(name: str, config: Optional[Config] = None, log_level=None):
    logger = logging.getLogger('assemblyline')

    # Test if we've initialized the log handler already.
    if len(logger.handlers) != 0:
        return

    if name.startswith("assemblyline."):
        name = name[13:]

    if config is None:
        config = forge.get_config()

    if log_level is None:
        log_level = log_level_map[config.logging.log_level]

    logging.root.setLevel(logging.CRITICAL)
    logger.setLevel(log_level)

    if config.logging.log_level == "DISABLED":
        # While log_level is set to disable, we will not create any handlers
        return

    if config.logging.log_to_file:
        if not os.path.isdir(config.logging.log_directory):
            print(
                'Warning: log directory does not exist. Will try to create %s'
                % config.logging.log_directory)
            os.makedirs(config.logging.directory)

        if log_level <= logging.DEBUG:
            dbg_file_handler = logging.handlers.RotatingFileHandler(
                os.path.join(config.logging.log_directory, f'{name}.dbg'),
                maxBytes=10485760,
                backupCount=5)
            dbg_file_handler.setLevel(logging.DEBUG)
            if config.logging.log_as_json:
                dbg_file_handler.setFormatter(JsonFormatter(AL_JSON_FORMAT))
            else:
                dbg_file_handler.setFormatter(logging.Formatter(AL_LOG_FORMAT))
            logger.addHandler(dbg_file_handler)

        if log_level <= logging.INFO:
            op_file_handler = logging.handlers.RotatingFileHandler(
                os.path.join(config.logging.log_directory, f'{name}.log'),
                maxBytes=10485760,
                backupCount=5)
            op_file_handler.setLevel(logging.INFO)
            if config.logging.log_as_json:
                op_file_handler.setFormatter(JsonFormatter(AL_JSON_FORMAT))
            else:
                op_file_handler.setFormatter(logging.Formatter(AL_LOG_FORMAT))
            logger.addHandler(op_file_handler)

        if log_level <= logging.ERROR:
            err_file_handler = logging.handlers.RotatingFileHandler(
                os.path.join(config.logging.log_directory, f'{name}.err'),
                maxBytes=10485760,
                backupCount=5)
            err_file_handler.setLevel(logging.ERROR)
            if config.logging.log_as_json:
                err_file_handler.setFormatter(JsonFormatter(AL_JSON_FORMAT))
            else:
                err_file_handler.setFormatter(logging.Formatter(AL_LOG_FORMAT))
            err_file_handler.setFormatter(logging.Formatter(AL_LOG_FORMAT))
            logger.addHandler(err_file_handler)

    if config.logging.log_to_console:
        console = logging.StreamHandler()
        if config.logging.log_as_json:
            console.setFormatter(JsonFormatter(AL_JSON_FORMAT))
        else:
            console.setFormatter(logging.Formatter(AL_LOG_FORMAT))
        logger.addHandler(console)

    if config.logging.log_to_syslog and config.logging.syslog_host and config.logging.syslog_port:
        syslog_handler = logging.handlers.SysLogHandler(
            address=(config.logging.syslog_host, config.logging.syslog_port))
        syslog_handler.formatter = logging.Formatter(AL_SYSLOG_FORMAT)
        logger.addHandler(syslog_handler)
예제 #29
0
import hashlib
import json
import os
import pytest
import random
import tempfile

from conftest import get_api_data

from assemblyline.common import forge
from assemblyline.odm.random_data import create_users, wipe_users, create_submission, wipe_submissions
from assemblyline.odm.randomizer import get_random_phrase
from assemblyline.remote.datatypes.queues.named import NamedQueue
from assemblyline_core.dispatching.dispatcher import SubmissionTask

config = forge.get_config()
sq = NamedQueue('dispatch-submission-queue', host=config.core.redis.persistent.host,
                port=config.core.redis.persistent.port)
submission = None


@pytest.fixture(scope="module")
def datastore(datastore_connection, filestore):
    global submission
    try:
        create_users(datastore_connection)
        submission = create_submission(datastore_connection, filestore)
        yield datastore_connection
    finally:
        wipe_users(datastore_connection)
        wipe_submissions(datastore_connection, filestore)
예제 #30
0
from assemblyline.odm.models.heuristic import Heuristic
from assemblyline.odm.models.result import Result
from assemblyline.odm.models.service import Service
from assemblyline.odm.models.service_delta import ServiceDelta
from assemblyline.odm.models.signature import Signature
from assemblyline.odm.models.submission import Submission
from assemblyline.odm.models.submission_summary import SubmissionSummary
from assemblyline.odm.models.submission_tree import SubmissionTree
from assemblyline.odm.models.user import User
from assemblyline.odm.models.user_favorites import UserFavorites
from assemblyline.odm.models.user_settings import UserSettings
from assemblyline.odm.models.vm import VM
from assemblyline.odm.models.workflow import Workflow
from assemblyline.remote.datatypes.lock import Lock

days_until_archive = forge.get_config().datastore.ilm.days_until_archive


class AssemblylineDatastore(object):
    def __init__(self, datastore_object):

        self.ds = datastore_object
        self.ds.register('alert', Alert)
        self.ds.register('cached_file', CachedFile)
        self.ds.register('emptyresult', EmptyResult)
        self.ds.register('error', Error)
        self.ds.register('file', File)
        self.ds.register('filescore', FileScore)
        self.ds.register('heuristic', Heuristic)
        self.ds.register('result', Result)
        self.ds.register('service', Service)