Esempio n. 1
0
def main():
    log.init_logging('run_service')

    if len(sys.argv) != 2:
        usage()
        exit(1)

    name = sys.argv[1]

    try:
        svc_class = class_by_name(name) if '.' in name else service_by_name(
            name)
    except:
        print 'Could not load service "%s".\n%s' % (name,
                                                    get_valid_service_list())
        raise

    logger.info('Running service in stand-alone mode. CTRL-C to exit.')
    # noinspection PyBroadException
    try:
        cfg = forge.get_datastore().get_service(svc_class.SERVICE_NAME).get(
            "config", {})
    except:  # pylint: disable=W0702
        cfg = {}
    service_driver = ServiceDriver(svc_class, cfg, 86400, NUM_WORKERS)
    service_driver.start()

    try:
        while True:
            send_minimal_heartbeat(svc_class.SERVICE_NAME, NUM_WORKERS)
            time.sleep(config.system.update_interval)
    except KeyboardInterrupt:
        print 'Exiting.'
    finally:
        service_driver.stop_hard()
Esempio n. 2
0
def main(shard):
    log.init_logging('dispatcher')

    ds = forge.get_datastore()

    service_proxies = ServiceProxyManager(ds.list_service_keys())
    dispatcher = Dispatcher(service_proxies, shard=shard, debug=False)
    dispatcher.start()
Esempio n. 3
0
 def __init__(self, externals=None, logger=None):
     if not logger:
         from assemblyline.al.common import log as al_log
         al_log.init_logging('YaraValidator')
         logger = logging.getLogger('assemblyline.yara_validator')
         logger.setLevel(logging.WARNING)
     if not externals:
         externals = {'dummy': ''}
     self.log = logger
     self.externals = externals
     self.rulestart = re.compile(r'^(?:global )?(?:private )?(?:private )?rule ', re.MULTILINE)
     self.rulename = re.compile('rule ([^{^:]+)')
Esempio n. 4
0
    def __init__(self, logger=None):
        if not logger:
            from assemblyline.al.common import log as al_log
            al_log.init_logging('yara_importer')
            logger = logging.getLogger('assemblyline.yara_importer')
            logger.setLevel(logging.INFO)

        yara_parser_class = forge.get_yara_parser()
        self.ds = forge.get_datastore()
        self.yp = yara_parser_class()
        self.log = logger
        self._id_cache = {}
        self._name_cache = []
Esempio n. 5
0
def main():
    log.init_logging('test')
    logger = logging.getLogger('assemblyline.test')

    store = forge.get_datastore()

    sids = []
    for x in store.stream_search(
            'submission',
            'times.completed:[2015-01-30T00:00:00.000Z TO 2015-01-30T00:59:59.999Z]'
    ):
        sid = x['submission.sid']
        sids.append(sid)

    count = 0
    submissions = store.get_submissions(sids)
    for submission in submissions:
        if submission.get('state', '') != 'completed':
            continue
        if len(submission['files']) != 1:
            continue
        _, srl = submission['files'][0]
        fileinfo = store.get_file(srl)
        if not fileinfo:
            continue
        submission = submission.get('submission', {})
        if not submission:
            continue
        metadata = submission.get('metadata', {})
        if not metadata:
            continue
        metadata['ignore_submission_cache'] = True
        metadata['ignore_cache'] = False
        metadata['md5'] = fileinfo['md5']
        metadata['sha1'] = fileinfo['sha1']
        metadata['sha256'] = fileinfo['sha256']
        metadata['size'] = fileinfo['size']
        resubmit(metadata)
        count += 1
        if count >= 1000:
            break

    logger.info('Resubmitted %d submissions for testing', count)
Esempio n. 6
0
def main():
    log.init_logging('hostagent')
    agent = HostAgent()
    if len(sys.argv) > 1:
        if len(sys.argv) == 2:
            if (sys.argv[1]
                    == '--sysprep') or sys.argv[1] == '--updateandexit':
                result = agent.sysprep()
                print 'SysPrep: %s' % str(result)
                exit(0)
            elif sys.argv[1] == '--register':
                result = agent.register_host()
                print "Registration Result: %s" % str(result)
                exit(0)
            else:
                usage()
                exit(1)
        else:
            usage()
            exit(1)

    agent.serve_forever()
Esempio n. 7
0
    def subprocess_entry_point(self):
        """ The Worker's initial entry point after being spawned."""
        # Ignore signal.SIGINT. Let parent handle it and let us know when to stop via self.should_run.
        signal.signal(signal.SIGINT, signal.SIG_IGN)
        try:
            if platform.system() == 'Windows':
                # log init doesn't servive the 'fork' on windows. redo.
                log.init_logging('hostagent')

            # Name our process after the service type. Makes 'ps' listings easier to read.
            try_setproctitle(self.service_cls.SERVICE_NAME)
            self.log = logging.getLogger('assemblyline.svc.worker.%s' %
                                         self.service_cls.SERVICE_NAME.lower())

            msgs = forge.apply_overrides(self.config_overrides)
            if msgs:
                self.log.info("Using %s.", " and ".join(msgs))

            self.ingest_queue = forge.get_service_queue(
                self.service_cls.SERVICE_NAME)  # remote job queue.
            self._stats_sink = forge.get_metrics_sink()

            self.log.info("Instantiating supervisor thread")

            self._current_work_items_lock = threading.Lock()
            self._supervisor_thread = threading.Thread(
                name='service-drainer', target=self._supervisor_thread_main)
            self._supervisor_thread.start()

            self.log.info("Supervisor thread instantiated")

            self._run_service_until_shutdown()

            self.log.info('_run_service has exited. we must be stopping')
        except KeyboardInterrupt:
            self.should_run = False
            # This should happen if the signal.signal call above is working as expected.
            return EXIT_INTERRUPTED
Esempio n. 8
0
def main(shard):
    log.init_logging('dispatcher')
    logger = logging.getLogger('assemblyline.dispatch')

    r = redis.StrictRedis(config.core.redis.nonpersistent.host,
                          config.core.redis.nonpersistent.port,
                          config.core.redis.nonpersistent.db)

    r.delete('ingest-queue-' + shard)

    store = forge.get_datastore()
    store.commit_index('submission')

    query = 'state:submitted AND times.submitted:[NOW-1DAY TO *]'
    sids = []
    for x in store.stream_search('submission', query):
        sid = x['submission.sid']
        if str(forge.determine_dispatcher(sid)) == shard:
            sids.append(sid)

    count = 0
    submissions = store.get_submissions(sids)
    for submission in submissions:
        if submission.get('state', '') != 'submitted':
            sid = submission.get('sid', '')
            if sid:
                store.save_submission(sid, submission)
            continue
        submission['request'] = {}
        for path, srl in submission['files']:
            submission['fileinfo'] = store.get_file(srl)
            submission['request']['path'] = path
            submission['request']['srl'] = srl
            resubmit(submission)
        count += 1

    logger.info('Resubmitted %d submissions to dispatcher %s.', count, shard)
Esempio n. 9
0
class ControllerClient(AgentClient):

    def __init__(self, sender=None, async=False):
        super(ControllerClient, self).__init__(async, sender)

    def heartbeat(self, mac):
        return self._send_agent_rpc('Controller.' + mac, ControllerRequest.HEARTBEAT)

    # noinspection PyUnusedLocal
    def stop(self, mac):
        return self._send_agent_rpc('Controller.' + mac, ControllerRequest.STOP)

    # noinspection PyUnusedLocal
    def start(self, mac):
        return self._send_agent_rpc('Controller.' + mac, ControllerRequest.START)

    # noinspection PyUnusedLocal
    def restart(self, mac):
        return self._send_agent_rpc('Controller.' + mac, ControllerRequest.RESTART)

    # noinspection PyUnusedLocal
    def status(self, mac):
        return self._send_agent_rpc('Controller.' + mac, ControllerRequest.STATUS)

if __name__ == '__main__':
    al_log.init_logging('controller')

    hc = HostController()
    hc.serve_forever()
    hc.log.info("Controller Terminated")
Esempio n. 10
0
#!/usr/bin/env python

import logging

from assemblyline.common.isotime import now_as_local
from assemblyline.al.common import forge, log as al_log

config = forge.get_config()
Classification = forge.get_classification()

al_log.init_logging('signature_statistics')
log = logging.getLogger('assemblyline.signature_statistics')

log.info("Generating signature statistics")
store = forge.get_datastore()

output = {"timestamp": None, "stats": None}
stats = {}

sig_list = [(x['meta.id'], x['meta.rule_version'], x['name'],
             x.get('meta.classification', Classification.UNRESTRICTED))
            for x in store.stream_search(
                "signature",
                "name:*",
                fl="name,meta.id,meta.rule_version,meta.classification")]

for sid, rev, name, classification in sig_list:
    key = "%sr.%s" % (sid, rev)
    res = store.stats_search("result",
                             query='result.tags.value:"%s"' % name,
                             stats_fields=["result.score"])["result.score"]
Esempio n. 11
0
#!/usr/bin/env python

import logging

from assemblyline.al.common import forge
from assemblyline.al.common import log as al_log
from assemblyline.al.common import queue
config = forge.get_config()

DATABASE_NUM = 4

al_log.init_logging('expiry_worker')
log = logging.getLogger('assemblyline.expiry_worker')


# noinspection PyBroadException
def main():
    ds = forge.get_datastore()
    fs = forge.get_filestore()
    submission_queue = queue.NamedQueue('d-submission', db=DATABASE_NUM)
    result_queue = queue.NamedQueue('d-result', db=DATABASE_NUM)
    file_queue = queue.NamedQueue('d-file', db=DATABASE_NUM)
    error_queue = queue.NamedQueue('d-error', db=DATABASE_NUM)
    dynamic_queue = queue.NamedQueue('d-dynamic', db=DATABASE_NUM)
    alert_queue = queue.NamedQueue('d-alert', db=DATABASE_NUM)
    filescore_queue = queue.NamedQueue('d-filescore', db=DATABASE_NUM)
    emptyresult_queue = queue.NamedQueue('d-emptyresult', db=DATABASE_NUM)

    log.info("Ready!")
    queues = [
        submission_queue, result_queue, file_queue, error_queue, dynamic_queue,
Esempio n. 12
0
#!/usr/bin/env python

import json
import logging
import redis
import time

from assemblyline.common import isotime
from assemblyline.al.common import forge, log as al_log

al_log.init_logging("quota_sniper")
logger = logging.getLogger('assemblyline.quota_sniper')
logger.setLevel(logging.INFO)

config = forge.get_config()

client = redis.StrictRedis(host=config.core.redis.nonpersistent.host,
                           port=config.core.redis.nonpersistent.port,
                           db=config.core.redis.nonpersistent.db)

persist = redis.StrictRedis(host=config.core.redis.persistent.host,
                            port=config.core.redis.persistent.port,
                            db=config.core.redis.persistent.db)

time_diff = 60 * 5  # Anything older than 5 minutes...
quota_time_diff = 60 * 60  # Anything older than 1 hour...

while True:
    # API Quota tracking
    data = client.hgetall('c-tracker-quota')
    if data:
Esempio n. 13
0
import os
import time

from threading import Thread

from assemblyline.common.isotime import now_as_iso, now, iso_to_epoch
from assemblyline.al.common import forge, log as al_log, queue
from assemblyline.al.core.datastore import SearchException

DATABASE_NUM = 4
QUERY = "__expiry_ts__:[* TO NOW-12HOUR]"  # Delay expiry by 12 hours so we expire peak data during offpeak hours
SLEEP_TIME = 5
MAX_QUEUE_LENGTH = 100000

config = forge.get_config()
al_log.init_logging('expiry')
log = logging.getLogger('assemblyline.expiry')


def load_expired(datastore, name, delete_queue):
    # Keep track of the last 60 minutes we've processed and don't reprocess them until they are out of the ignored
    # window to let time for the index to be commited to memory
    max_rows = 500
    max_depth = 5000
    ignored_window = []
    max_window_size = 60

    hosts = datastore.hosts
    host_id = 0

    log.debug("Expiry will cycle through the following hosts: %s" %
Esempio n. 14
0
#!/usr/bin/env python
import time
import logging

from assemblyline.al.common import forge, log as al_log, queue
from assemblyline.al.core.datastore import SearchException
from assemblyline.common.charset import safe_str

ds = forge.get_datastore()

DATABASE_NUM = 4
config = forge.get_config()
al_log.init_logging('workflow_filter')
log = logging.getLogger('assemblyline.workflow_filter')
action_queue = queue.PriorityQueue('alert-actions', db=DATABASE_NUM)
QUEUE_PRIORITY = -1


def get_last_reporting_ts(p_start_ts):
    log.info(
        "Finding reporting timestamp for the last alert since {start_ts}...".
        format(start_ts=p_start_ts))
    args = [('sort', 'reporting_ts desc'), ('rows', '1'),
            ('fl', 'reporting_ts')]
    result = ds.direct_search(
        "alert",
        "reporting_ts:[{start_ts} TO *]".format(start_ts=p_start_ts),
        args=args)
    docs = result.get('response', {}).get('docs', [{}]) or [{}]
    ret_ts = docs[0].get("reporting_ts", p_start_ts)
    return ret_ts
Esempio n. 15
0
#!/usr/bin/env python

import os
import subprocess
import logging

from assemblyline.common.net import get_hostname, get_hostip
from assemblyline.al.common import log as al_log, queue
from assemblyline.al.common import message

al_log.init_logging('harddrive_monitor')
log = logging.getLogger('assemblyline.harddrive_monitor')


def is_drive_ok(smart_output):
    for line in smart_output.splitlines():
        if "SMART Health Status" in line:
            status = line.split("SMART Health Status:")[1].strip()
            if status == "OK":
                return True
            else:
                return False
        elif "SMART overall-health self-assessment test result" in line:
            status = line.split(
                "SMART overall-health self-assessment test result:")[1].strip(
                )
            if status == "PASSED":
                return True
            else:
                return False
    return False
Esempio n. 16
0
#!/usr/bin/env python
"""
Alerter

Alerter is responsible for monitoring the alert queue and creating alerts.
"""
import logging
import signal

from assemblyline.common import net
from assemblyline.common.isotime import now
from assemblyline.al.common import forge
from assemblyline.al.common import log

config = forge.get_config()
log.init_logging("alerter")

from assemblyline.al.common import counter
from assemblyline.al.common import queue

persistent_settings = {
    'db': config.core.redis.persistent.db,
    'host': config.core.redis.persistent.host,
    'port': config.core.redis.persistent.port,
}

alertq_name = 'm-alert'
commandq_name = 'a-command'
create_alert = forge.get_create_alert()
datastore = forge.get_datastore()
exit_msgs = ['server closed the connection unexpectedly']
#!/usr/bin/env python

import logging
import os

from assemblyline.common.isotime import now_as_iso, epoch_to_iso, iso_to_epoch
from assemblyline.al.common import forge
from assemblyline.al.common import log
from assemblyline.al.common import queue

config = forge.get_config()
datastore = forge.get_datastore()

log.init_logging('idx_to_journal')

directory = config.core.expiry.journal.directory
emptyresult_queue = queue.NamedQueue(
    "ds-emptyresult",
    db=config.core.redis.persistent.db,
    host=config.core.redis.persistent.host,
    port=config.core.redis.persistent.port,
)
logger = logging.getLogger('assemblyline.idx_to_journal')
max_open_files = 1
path_and_filehandle = []
path_to_filehandle = {}
previous = []


def get_filehandle(path):
    fh = path_to_filehandle.get(path, None)
Esempio n. 18
0
#!/usr/bin/env python
import logging
import time

from threading import Thread

from assemblyline.al.common import forge, log as al_log, queue
from al_ui.helper.user import add_access_control

config = forge.get_config()
al_log.init_logging('alert_actions')
log = logging.getLogger('assemblyline.alert_actions')

DATABASE_NUM = 4
DEFAULT_QUEUE_PRIORITY = -2
EXTENDED_SCAN_QUEUE_PRIORITY = 0
TASKER_COUNT = config.core.alert_actions.tasker_count
WORKER_COUNT = config.core.alert_actions.worker_count


def determine_worker_id(event_id):
    return int(event_id, 16) % WORKER_COUNT


class AlertAction(object):
    def __init__(self):
        self.worker_threads = {
            x: Thread(target=run_worker_thread,
                      args=(x, ),
                      name="worker-%s" % x)
            for x in range(WORKER_COUNT)
Esempio n. 19
0
            my_logger.info(output_metrics)
            try:
                self.es.create(
                    "al_metrics-%s" % timestamp[:10].replace("-", "."),
                    component_type, output_metrics)
            except Exception as e:
                my_logger.exception(e)

        my_logger.info("Metrics aggregated... Waiting for next run.")


if __name__ == '__main__':
    logging.basicConfig(stream=sys.stderr, level=logging.INFO)

    config = forge.get_config()
    al_log.init_logging('metricsd')
    log = logging.getLogger('assemblyline.metricsd')

    elastic_ip = config.get('logging', {}).get('logserver',
                                               {}).get('node', None)
    elastic_port = config.get('logging', {}).get('logserver',
                                                 {}).get('elastic',
                                                         {}).get('port', 9200)

    if not elastic_ip or not elastic_port:
        log.error(
            "Elasticsearch cluster not configured in the seed. There is no need to gather stats on this box."
        )
        sys.exit(1)

    mserver = MetricsServer('SsMetrics', log, elastic_ip, elastic_port)
Esempio n. 20
0
from assemblyline.al.common import forge, log as al_log, queue
config = forge.get_config()

# Run config
DATABASE_NUM = 3
RETRY_PRINT_THRESHOLD = 1000
PROCESSES_COUNT = 50
COUNT_INCREMENT = 1000
LOW_THRESHOLD = 10000
HIGH_THRESHOLD = 50000
DEBUG = False
DO_SYS_BUCKETS = True

# Logger
al_log.init_logging('reindex')
log = logging.getLogger('assemblyline.reindex')

# Globals
ds = forge.get_datastore()
reindex_queue = queue.NamedQueue('r-index', db=DATABASE_NUM)
done_queue = queue.NamedQueue("r-done", db=DATABASE_NUM)
bucket_error = []

bucket_map = {
    "node": ds.nodes,
    "profile": ds.profiles,
    "signature": ds.signatures,
    "user": ds.users,
    "alert": ds.alerts,
    "file": ds.files,
Esempio n. 21
0
    def __exit__(self, unused1, unused2, unused3):
        with self.SCAN_LOCK_LOCK:
            l = self.SCAN_LOCK[self.scan_key]
            l[0] -= 1
            if l[0] == 0:
                del self.SCAN_LOCK[self.scan_key]
        l[1].release()


Timeout = namedtuple('Timeout', ['time', 'scan_key'])

Classification = forge.get_classification()
config = forge.get_config()
constants = forge.get_constants()

log.init_logging("middleman")
logger = logging.getLogger('assemblyline.middleman')

persistent = {
    'db': config.core.redis.persistent.db,
    'host': config.core.redis.persistent.host,
    'port': config.core.redis.persistent.port,
}

shards = 1
try:
    shards = int(config.core.middleman.shards)
except AttributeError:
    logger.warning("No shards setting. Defaulting to %d.", shards)

shard = '0'
Esempio n. 22
0
import elasticsearch
import json
import logging
import psutil
import os
import sys
import urllib2

from apscheduler.scheduler import Scheduler

from assemblyline.common.net import get_hostname, get_hostip
from assemblyline.al.common import forge, log as al_log
from assemblyline.common.isotime import now_as_local, now_as_iso

config = forge.get_config(static_seed=os.getenv("AL_SEED_STATIC"))
al_log.init_logging('system_metrics')
log = logging.getLogger('assemblyline.system_metrics')

previous_net_io = None
previous_disk_io = None

SOLR_ADMIN_URL = '%s/admin/mbeans?stats=true&wt=json&cat=QUERYHANDLER&cat=CORE&key=/update&key=/select&key=searcher'
SOLR_CORE_URL = 'admin/cores?wt=json&indexInfo=false'
SOLR_URL_BUILDER = 'http://localhost:8093/internal_solr/%s'
RIAK_STATS_URL = 'http://localhost:8098/stats'


def calculate_system_metrics(es, cur_ip, cur_host):
    global previous_disk_io, previous_net_io
    log.info("Starting system metrics calculation...")
Esempio n. 23
0
#!/usr/bin/env python

from assemblyline.al.common import forge, queue
from assemblyline.al.common import log

import logging
import os

config = forge.get_config()
config.logging.log_to_console = False
config.logging.log_to_syslog = False
config.logging.log_to_file = True

pid = str(os.getpid())
log.init_logging('reindex_worker.%s' % pid)
logger = logging.getLogger('assemblyline.reindex_worker')

# Run config
DATABASE_NUM = 3

# Globals
ds = forge.get_datastore()
reindex_queue = queue.NamedQueue('r-index', db=DATABASE_NUM)
done_queue = queue.NamedQueue("r-done", db=DATABASE_NUM)


def do_reindex(bucket_name, key):
    try:
        data = ds._get_bucket_item(ds.get_bucket(bucket_name), key)
        data = ds.sanitize(bucket_name, data, key)
        ds._save_bucket_item(ds.get_bucket(bucket_name), key, data)
Esempio n. 24
0
#!/usr/bin/env python

import logging
import time

from assemblyline.al.common import forge
from assemblyline.al.common import log
from assemblyline.al.common.task import Task
from assemblyline.al.service.list_queue_sizes import get_service_queue_lengths

log.init_logging('plumber')
logger = logging.getLogger('assemblyline.plumber')

dispatch_queue = forge.get_dispatch_queue()
store = forge.get_datastore()
config = forge.get_config()
service_queue = {}
threshold = {}


def get_queue(n):
    q = service_queue.get(n, None)
    if not q:
        service_queue[n] = q = forge.get_service_queue(n)

    return q

for service in store.list_services():
    # noinspection PyBroadException
    try:
        name = service.get('name')
Esempio n. 25
0
from flask import Flask, request, session
from flask_socketio import SocketIO, emit

from assemblyline.al.common import forge, log as al_log
from assemblyline.al.common.queue import CommsQueue, NamedQueue
from assemblyline.al.common.remote_datatypes import Hash

config = forge.get_config()
datastore = forge.get_datastore()
classification = forge.get_classification()

app = Flask(__name__)
app.config['SECRET_KEY'] = config.ui.secret_key
socketio = SocketIO(app)

al_log.init_logging("ui")
AUDIT = config.ui.audit
AUDIT_LOG = logging.getLogger('assemblyline.ui.audit')
LOGGER = logging.getLogger('assemblyline.ui.socketio')

KV_SESSION = Hash("flask_sessions",
                  host=config.core.redis.nonpersistent.host,
                  port=config.core.redis.nonpersistent.port,
                  db=config.core.redis.nonpersistent.db)


def get_user_info(resquest_p, session_p):
    uname = None
    current_session = KV_SESSION.get(session_p.get("session_id", None))
    if current_session:
        current_session = json.loads(current_session)
Esempio n. 26
0
#!/usr/bin/env python

import logging
import os
import signal

from assemblyline.al.common import forge
from assemblyline.al.common import log
from assemblyline.al.common import queue

config = forge.get_config()

log.init_logging('journalist')

directory = config.core.expiry.journal.directory
emptyresult_queue = queue.NamedQueue(
    "ds-emptyresult",
    db=config.core.redis.persistent.db,
    host=config.core.redis.persistent.host,
    port=config.core.redis.persistent.port,
)
logger = logging.getLogger('assemblyline.journalist')
max_open_files = 8
path_and_filehandle = []
path_to_filehandle = {}
previous = []
running = True


# noinspection PyUnusedLocal
def interrupt(unused1, unused2):  # pylint:disable=W0613
Esempio n. 27
0
#!/usr/bin/env python

import json
import logging
import signal
import sys

from assemblyline.al.common import forge
from assemblyline.al.common import log
from assemblyline.al.common import queue

config = forge.get_config()
log.init_logging('ingest-bridge')

PRODUCTION_DB = config.core.ingest_bridge.db
PRODUCTION_HOST = config.core.ingest_bridge.host
PRODUCTION_PORT = config.core.ingest_bridge.port

SEND_EVERY = int(config.core.ingest_bridge.send_every)

logger = logging.getLogger('assemblyline.ingest_bridge')
running = True


# noinspection PyUnusedLocal
def interrupt(unused1, unused2):  # pylint:disable=W0613
    global running  # pylint:disable=W0603
    logger.info("Caught signal. Coming down...")
    running = False

#!/usr/bin/env python

import logging

from assemblyline.common.isotime import now_as_local
from assemblyline.al.common import forge, log as al_log
from assemblyline.al.common.heuristics import list_all_heuristics
config = forge.get_config()

al_log.init_logging('heuristic_statistics')
log = logging.getLogger('assemblyline.heuristic_statistics')

log.info("Generating heuristic statistics")
store = forge.get_datastore()

output = {"timestamp": None, "stats": None}
stats = {}

HEUR, _ = list_all_heuristics(store.list_services())

for heur in HEUR:
    heur_key = heur["id"]
    results = store.stats_search("result",
                                 query='result.tags.value:"%s"' % heur["id"],
                                 stats_fields=["result.score"])["result.score"]

    if results:
        heur_stat = [
            results["count"], results["min"],
            int(results["mean"]), results["max"]
        ]