コード例 #1
0
    def __init__(self, wid, worker_type, working_dir, instance_id):
        self.working_dir = working_dir
        self.worker_id = wid
        self.ds = forge.get_datastore()
        self.worker_type = worker_type
        self.instance_id = instance_id

        if worker_type == TYPE_BACKUP:
            self.hash_queue = remote_datatypes.Hash("r-hash_%s" %
                                                    self.instance_id,
                                                    db=DATABASE_NUM)
            self.follow_queue = queue.NamedQueue("r-follow_%s" %
                                                 self.instance_id,
                                                 db=DATABASE_NUM,
                                                 ttl=1800)
            self.queue = queue.NamedQueue("r-backup_%s" % self.instance_id,
                                          db=DATABASE_NUM,
                                          ttl=1800)
            self.done_queue = queue.NamedQueue("r-backup-done_%s" %
                                               self.instance_id,
                                               db=DATABASE_NUM,
                                               ttl=1800)
        else:
            self.hash_queue = None
            self.follow_queue = None
            self.queue = None
            self.done_queue = queue.NamedQueue("r-restore-done_%s" %
                                               self.instance_id,
                                               db=DATABASE_NUM,
                                               ttl=1800)
コード例 #2
0
ファイル: expiry.py プロジェクト: wgwjifeng/cyberweapons
def main(bucket_list, journal_queues):
    ds = forge.get_datastore()
    queues = {
        x: queue.NamedQueue('d-%s' % x, db=DATABASE_NUM)
        for x in set(journal_queues).union(set(bucket_list))
    }

    Thread(target=track_status, name="queues_status", args=(queues, )).start()

    log.info("Ready!")
    loader_threads = {
        x: Thread(target=load_expired,
                  name="loader_%s" % x,
                  args=(ds, x, queues[x]))
        for x in bucket_list
    }

    loader_threads.update({
        'journal_%s' % x: Thread(target=load_journal,
                                 name="journal_loader_%s" % x,
                                 args=(x, queues[x]))
        for x in journal_queues
    })

    for thread in loader_threads.itervalues():
        thread.start()

    for thread in loader_threads.itervalues():
        thread.join()
コード例 #3
0
    def __init__(self, working_dir, worker_count=50, spawn_workers=True):
        self.working_dir = working_dir
        self.ds = forge.get_datastore()
        self.plist = []
        self.instance_id = str(uuid.uuid4())
        self.follow_queue = queue.NamedQueue("r-follow_%s" % self.instance_id,
                                             db=DATABASE_NUM,
                                             ttl=1800)
        self.hash_queue = remote_datatypes.Hash("r-hash_%s" % self.instance_id,
                                                db=DATABASE_NUM)
        self.backup_queue = queue.NamedQueue('r-backup_%s' % self.instance_id,
                                             db=DATABASE_NUM,
                                             ttl=1800)
        self.backup_done_queue = queue.NamedQueue("r-backup-done_%s" %
                                                  self.instance_id,
                                                  db=DATABASE_NUM,
                                                  ttl=1800)
        self.restore_done_queue = queue.NamedQueue("r-restore-done_%s" %
                                                   self.instance_id,
                                                   db=DATABASE_NUM,
                                                   ttl=1800)
        self.bucket_error = []

        self.BUCKET_MAP = {
            "alert": self.ds.alerts,
            "blob": self.ds.blobs,
            "emptyresult": self.ds.emptyresults,
            "error": self.ds.errors,
            "file": self.ds.files,
            "filescore": self.ds.filescores,
            "node": self.ds.nodes,
            "profile": self.ds.profiles,
            "result": self.ds.results,
            "signature": self.ds.signatures,
            "submission": self.ds.submissions,
            "user": self.ds.users,
        }
        self.VALID_BUCKETS = sorted(self.BUCKET_MAP.keys())
        self.worker_count = worker_count
        self.spawn_workers = spawn_workers
        self.current_type = None
コード例 #4
0
def send_notification(notice, failure=None, logfunc=logger.info):
    if failure:
        notice.set('failure', failure)

    failure = notice.get('failure', None)
    if failure:
        logfunc("%s: %s", failure, str(notice.raw))

    queue_name = notice.get('notification_queue', False)
    if not queue_name:
        return

    score = notice.get('al_score', 0)
    threshold = notice.get('notification_threshold', None)
    if threshold and score < int(threshold):
        return

    q = notificationq.get(queue_name, None)
    if not q:
        notificationq[queue_name] = q = \
            queue.NamedQueue(queue_name, **persistent)
    q.push(notice.raw)
コード例 #5
0
ファイル: vm.py プロジェクト: wgwjifeng/cyberweapons
    def pre_registration(self, name, mac, service, num_workers):
        self.log.info('preregistering VM: %s mac:%s service:%s host:%s.' % (name, mac, service, self.host_mac))
        reg = DEFAULT_REGISTRATION.copy()
        reg['hostname'] = name
        reg['mac_address'] = mac
        reg['is_vm'] = True
        reg['vm_host'] = self.host_ip
        reg['vm_host_mac'] = self.host_mac
        reg['profile'] = service
        reg['profile_definition'] = {
            'services': {
                service: {
                    'workers': num_workers,
                    'service_overrides': {}
                }
            }
        }
        reg['roles'] = ["hostagent"]
        if config.workers.virtualmachines.use_parent_as_datastore or config.workers.virtualmachines.use_parent_as_queue:
            reg['config_overrides'] = {'parent_ip': self.host_ip}

        vm_queue = queue.NamedQueue('vm-%s' % mac, db=DATABASE_NUM)
        vm_queue.delete()
        vm_queue.push(reg)
コード例 #6
0
}

shards = 1
try:
    shards = int(config.core.middleman.shards)
except AttributeError:
    logger.warning("No shards setting. Defaulting to %d.", shards)

shard = '0'
opts, _ = getopt.getopt(sys.argv[1:], 's:', ['shard='])
for opt, arg in opts:
    if opt in ('-s', '--shard'):
        shard = arg

# Globals
alertq = queue.NamedQueue('m-alert', **persistent)  # df line queue
cache = {}
cache_lock = RLock()
chunk_size = 1000
completeq_name = 'm-complete-' + shard
date_fmt = '%Y-%m-%dT%H:%M:%SZ'
default_prefix = config.core.middleman.default_prefix
dup_prefix = 'w-' + shard + '-'
dupq = queue.MultiQueue(**persistent)  # df line queue
expire_after_seconds = config.core.middleman.expire_after
get_whitelist_verdict = forge.get_get_whitelist_verdict()
hostinfo = {
    'ip:': get_hostip(),
    'mac_address': get_mac_address(),
    'host': get_hostname(),
}
コード例 #7
0
def main():
    ds = forge.get_datastore()
    fs = forge.get_filestore()
    submission_queue = queue.NamedQueue('d-submission', db=DATABASE_NUM)
    result_queue = queue.NamedQueue('d-result', db=DATABASE_NUM)
    file_queue = queue.NamedQueue('d-file', db=DATABASE_NUM)
    error_queue = queue.NamedQueue('d-error', db=DATABASE_NUM)
    dynamic_queue = queue.NamedQueue('d-dynamic', db=DATABASE_NUM)
    alert_queue = queue.NamedQueue('d-alert', db=DATABASE_NUM)
    filescore_queue = queue.NamedQueue('d-filescore', db=DATABASE_NUM)
    emptyresult_queue = queue.NamedQueue('d-emptyresult', db=DATABASE_NUM)

    log.info("Ready!")
    queues = [
        submission_queue, result_queue, file_queue, error_queue, dynamic_queue,
        alert_queue, filescore_queue, emptyresult_queue
    ]
    while True:
        queue_name, key = queue.select(*queues)

        try:
            rewrite = False
            expiry = None
            if isinstance(key, tuple) or isinstance(key, list):
                key, rewrite, expiry = key

            if rewrite:
                # noinspection PyProtectedMember
                ds._save_bucket_item(ds.get_bucket(queue_name[2:]), key,
                                     {"__expiry_ts__": expiry})

            if queue_name == "d-submission":
                ds.delete_submission(key)
                log.debug("Submission %s (DELETED)" % key)
            elif queue_name == "d-result":
                ds.delete_result(key)
                log.debug("Result %s (DELETED)" % key)
            elif queue_name == "d-error":
                ds.delete_error(key)
                log.debug("Error %s (DELETED)" % key)
            elif queue_name == "d-file":
                ds.delete_file(key)
                if config.core.expiry.delete_storage and fs.exists(
                        key, location='far'):
                    fs.delete(key, location='far')
                log.debug("File %s (DELETED)" % key)
            elif queue_name == "d-alert":
                ds.delete_alert(key)
                log.debug("Alert %s (DELETED)" % key)
            elif queue_name == "d-filescore":
                ds.delete_filescore(key)
                log.debug("FileScore %s (DELETED)" % key)
            elif queue_name == "d-emptyresult":
                ds.delete_result(key)
                log.debug("EmptyResult %s (DELETED)" % key)
            else:
                log.warning("Unknown message: %s (%s)" % (key, queue_name))
        except:
            log.exception("Failed deleting key %s from bucket %s:", key,
                          queue_name)

        queues = queues[1:] + queues[0:1]
コード例 #8
0
import logging

from assemblyline.al.common import forge
from assemblyline.al.common import log
from assemblyline.al.common import queue

config = forge.get_config()

persistent_settings = {
    'db': config.core.redis.persistent.db,
    'host': config.core.redis.persistent.host,
    'port': config.core.redis.persistent.port,
}

ingestq = queue.NamedQueue('m-ingest-test', **persistent_settings)


def resubmit(metadata):
    ingestq.push(metadata)


def main():
    log.init_logging('test')
    logger = logging.getLogger('assemblyline.test')

    store = forge.get_datastore()

    sids = []
    for x in store.stream_search(
            'submission',
コード例 #9
0
ファイル: journalist.py プロジェクト: wgwjifeng/cyberweapons
import logging
import os
import signal

from assemblyline.al.common import forge
from assemblyline.al.common import log
from assemblyline.al.common import queue

config = forge.get_config()

log.init_logging('journalist')

directory = config.core.expiry.journal.directory
emptyresult_queue = queue.NamedQueue(
    "ds-emptyresult",
    db=config.core.redis.persistent.db,
    host=config.core.redis.persistent.host,
    port=config.core.redis.persistent.port,
)
logger = logging.getLogger('assemblyline.journalist')
max_open_files = 8
path_and_filehandle = []
path_to_filehandle = {}
previous = []
running = True


# noinspection PyUnusedLocal
def interrupt(unused1, unused2):  # pylint:disable=W0613
    global running  # pylint:disable=W0603
    logger.info("Caught signal. Coming down...")
    running = False
コード例 #10
0
config = forge.get_config()
config.logging.log_to_console = False
config.logging.log_to_syslog = False
config.logging.log_to_file = True

pid = str(os.getpid())
log.init_logging('reindex_worker.%s' % pid)
logger = logging.getLogger('assemblyline.reindex_worker')

# Run config
DATABASE_NUM = 3

# Globals
ds = forge.get_datastore()
reindex_queue = queue.NamedQueue('r-index', db=DATABASE_NUM)
done_queue = queue.NamedQueue("r-done", db=DATABASE_NUM)


def do_reindex(bucket_name, key):
    try:
        data = ds._get_bucket_item(ds.get_bucket(bucket_name), key)
        data = ds.sanitize(bucket_name, data, key)
        ds._save_bucket_item(ds.get_bucket(bucket_name), key, data)
    except:
        done_queue.push({
            "is_done": False,
            "success": False,
            "bucket_name": bucket_name,
            "key": key
        })
コード例 #11
0
    'host': config.core.redis.persistent.host,
    'port': config.core.redis.persistent.port,
}

alertq_name = 'm-alert'
commandq_name = 'a-command'
create_alert = forge.get_create_alert()
datastore = forge.get_datastore()
exit_msgs = ['server closed the connection unexpectedly']
interval = 3 * 60 * 60
logger = logging.getLogger('assemblyline.alerter')
max_consecutive_errors = 100
max_retries = 10
running = True

alertq = queue.NamedQueue(alertq_name, **persistent_settings)
commandq = queue.NamedQueue(commandq_name, **persistent_settings)

# Publish counters to the metrics sink.
counts = counter.AutoExportingCounters(name='alerter',
                                       host=net.get_hostip(),
                                       export_interval_secs=5,
                                       channel=forge.get_metrics_sink(),
                                       auto_log=True,
                                       auto_flush=True)
counts.start()


# noinspection PyUnusedLocal
def interrupt(unused1, unused2):  # pylint:disable=W0613
    global running  # pylint:disable=W0603