def send_raw(self, raw, shards=None): if not shards: config = forge.get_config() shards = config.core.dispatcher.shards task = Task(raw) self.send(task, shards)
def init_logging(name='al'): config = forge.get_config() logging.root.setLevel(logging.CRITICAL) logger = logging.getLogger('assemblyline') logger.setLevel(logging.INFO) if config.logging.log_to_file: if not os.path.isdir(config.logging.directory): print 'Warning: log directory does not exist. Will try to create %s' % config.logging.directory os.makedirs(config.logging.directory) op_file_handler = logging.handlers.RotatingFileHandler(os.path.join(config.logging.directory, name + '.log'), maxBytes=10485760, backupCount=5) op_file_handler.setLevel(logging.INFO) op_file_handler.setFormatter(logging.Formatter(AL_LOG_FORMAT)) logger.addHandler(op_file_handler) err_file_handler = logging.handlers.RotatingFileHandler(os.path.join(config.logging.directory, name + '.err'), maxBytes=10485760, backupCount=5) err_file_handler.setLevel(logging.ERROR) err_file_handler.setFormatter(logging.Formatter(AL_LOG_FORMAT)) logger.addHandler(err_file_handler) if config.logging.log_to_console: console = logging.StreamHandler() console.setLevel(logging.INFO) console.setFormatter(logging.Formatter(AL_LOG_FORMAT)) logger.addHandler(console) if config.logging.log_to_syslog and config.logging.syslog_ip: syslog_handler = logging.handlers.SysLogHandler(address=(config.logging.syslog_ip, 514)) syslog_handler.formatter = logging.Formatter(AL_SYSLOG_FORMAT) logger.addHandler(syslog_handler)
def __init__(self, stub_name, log_name): config = forge.get_config() self.registry_host = config.installation.docker.get( 'private_registry', None) self.log = logging.getLogger(log_name) self.docker_count = 0 self.docker_contexts = {} self.project_id = str(uuid.uuid4()).replace("-", "") self.stub_name = "%s_%s_%%i" % (self.project_id, stub_name)
def get_client(host, port, db, private): if not host or not port or not db: config = forge.get_config() host = host or config.core.redis.nonpersistent.host port = int(port or config.core.redis.nonpersistent.port) db = int(db or config.core.redis.nonpersistent.db) if private: return redis.StrictRedis(host=host, port=port, db=db) else: return redis.StrictRedis(connection_pool=get_pool(host, port, db))
def __init__(self, metrics_channel_name, logger, elastic_ip_p, elastic_port_p): self.metrics_channel_name = metrics_channel_name self.elastic_ip = elastic_ip_p self.elastic_port = elastic_port_p self.scheduler = Scheduler() self.metrics_queue = None self.es = None self.log = logger self.METRIC_TYPES.update( forge.get_config().core.metricsd.extra_metrics) self.counters_lock = Lock() self.counters = {}
def validate_rules(self, rulefile, datastore=False): change = False while True: try: self.paranoid_rule_check(rulefile) return change # If something goes wrong, clean rules until valid file given except Exception as e: change = True if e.message.startswith('yara.SyntaxError'): e_line = int(e.message.split('):', 1)[0].split("(", -1)[1]) e_message = e.message.split("): ", 1)[1] try: invalid_rule, reline = self.clean(rulefile, e_line, e_message) except Exception as ve: raise ve # If datastore object given, change status of signature to INVALID in Riak if datastore: from assemblyline.al.common import forge store = forge.get_datastore() config = forge.get_config() signature_user = config.services.master_list.Yara.config.SIGNATURE_USER # Get the offending sig ID sig_query = "name:{} AND meta.al_status:(DEPLOYED OR NOISY)".format(invalid_rule) sigl = store.list_filtered_signature_keys(sig_query) # Mark and update Riak store = forge.get_datastore() for sig in sigl: sigdata = store.get_signature(sig) # Check this in case someone already marked it as invalid try: if sigdata['meta']['al_status'] == 'INVALID': continue except KeyError: pass sigdata['meta']['al_status'] = 'INVALID' today = datetime.date.today().isoformat() sigdata['meta']['al_state_change_date'] = today sigdata['meta']['al_state_change_user'] = signature_user sigdata['comments'].append("AL ERROR MSG:{0}. Line:{1}".format(e_message.rstrip().strip(), reline)) store.save_signature(sig, sigdata) else: raise e continue
def send(self, task, shards=None, queue_name=None): if queue_name is None: queue_name = {} if not shards: config = forge.get_config() shards = config.core.dispatcher.shards if not task.dispatch_queue: n = forge.determine_dispatcher(task.sid, shards) name = queue_name.get(n, None) if not name: queue_name[n] = name = 'ingest-queue-' + str(n) task.dispatch_queue = name if not task.priority: task.priority = 0 self._get_queue(task.dispatch_queue).push(task.priority, task.raw)
def main(): if len(sys.argv) == 1: print "Usage: %s <One or more prepared VM tarballs>" sys.exit(7) try: svc_class = service_by_name("Cuckoo") except: print 'Could not load service "%s".\n' \ 'Valid options:\n%s' % ("Cuckoo", [s['name'] for s in forge.get_datastore().list_services()]) sys.exit(7) cfg = forge.get_datastore().get_service(svc_class.SERVICE_NAME).get( "config", {}) config = forge.get_config() local_meta_root = os.path.join(config.system.root, cfg['REMOTE_DISK_ROOT']) vm_meta_path = os.path.join(local_meta_root, cfg['vm_meta']) out_config = vm_meta_path out_directory = os.path.dirname(out_config) vm_list = sys.argv[1:] cuckoo_config = [] for vm in vm_list: for js in install_vm_meta(out_directory, vm, ['']): cuckoo_config.append(js) with open(out_config, "w") as fh: json.dump(cuckoo_config, fh, sort_keys=True, indent=4, separators=(',', ': ')) print "Wrote %i Definitions to %s!" % (len(cuckoo_config), out_config)
import json import logging import platform import re import threading import time from collections import namedtuple from assemblyline.common.importing import class_by_path from assemblyline.al.common import forge from assemblyline.al.common.message import Message, MT_SVCHEARTBEAT from assemblyline.al.common.queue import CommsQueue from assemblyline.al.service.service_driver import ServiceDriver config = forge.get_config() log = logging.getLogger('assemblyline.svc.mgr') DONE = len(config.services.stages) + 1 NAME = dict([(x + 1, config.services.stages[x]) for x in xrange(len(config.services.stages))]) ORDER = dict([(config.services.stages[x], x + 1) for x in xrange(len(config.services.stages))]) ServiceEntry = namedtuple( 'ServiceEntry', [ 'name', 'accepts', 'category', 'proxy', 'rejects', 'skip', 'stage',
Args: c12n: Classification to normalize long_format: True/False in long format skip_auto_select: True/False skip group auto adding, use True when dealing with user's classifications Returns: A normalized version of the original classification """ if not self.enforce: return self.UNRESTRICTED lvl_idx, req, groups, subgroups = self._get_classification_parts( c12n, long_format=long_format) return self._get_normalized_classification_text( lvl_idx, req, groups, subgroups, long_format=long_format, skip_auto_select=skip_auto_select) if __name__ == "__main__": import json from assemblyline.al.common import forge config = forge.get_config( static_seed="assemblyline.al.install.seeds.assemblyline_appliance.seed" ) c = Classification(config.system.classification.definition) print json.dumps(c.get_parsed_classification_definition(), indent=4)
from assemblyline.common.charset import safe_str from assemblyline.common.isotime import epoch_to_iso from assemblyline.al.common import forge from assemblyline.al.common import task from assemblyline.al.common.forge import get_constants, get_config constants = get_constants() config = get_config() class NoticeException(Exception): pass alert = [ 'sid', ] aliases = config.core.alerter.metadata_aliases meta_fields = { "al_score": "number", "filename": "text", "size": "number", "ts": "date" } meta_fields.update(config.core.alerter.metadata_fields) metadata = meta_fields.keys() overrides = task.submission_overrides + [ 'completed_queue', 'description',
def submit(self, task, shards=None): if not shards: config = forge.get_config() shards = config.core.dispatcher.shards task.dispatch_queue = None self.send(task, shards)
def __init__(self, host=None, port=None, db=None): config = forge.get_config() self.host = host or config.core.redis.nonpersistent.host self.port = port or config.core.redis.nonpersistent.port self.db = db or config.core.redis.nonpersistent.db self.q = {}
#!/usr/bin/env python import sys import libvirt import logging import lxml import lxml.etree import os import subprocess from assemblyline.al.common import forge config = forge.get_config(static_seed=os.environ.get("VMEDIT_SEED", None)) from assemblyline.al.common import vm QCOW2_EXT = 'qcow2' LOCAL_VMDISK_ROOT = '/opt/al/var/masterdisks/' log = logging.getLogger('assemblyline.al.vme') class VmEditor(object): def __init__(self, vmname, cfg=None): self.ds = forge.get_datastore() self.vm_name = vmname + '.001' if cfg: self.vm_cfg = cfg.workers.virtualmachines.master_list.get(vmname, {}).get('cfg', None) else: self.vm_cfg = self.ds.get_virtualmachine(vmname) if not self.vm_cfg: raise Exception("Could not find VM %s in the seed" % vmname) self.vmm = libvirt.open(None)
import elasticsearch import json import logging import psutil import os import sys import urllib2 from apscheduler.scheduler import Scheduler from assemblyline.common.net import get_hostname, get_hostip from assemblyline.al.common import forge, log as al_log from assemblyline.common.isotime import now_as_local, now_as_iso config = forge.get_config(static_seed=os.getenv("AL_SEED_STATIC")) al_log.init_logging('system_metrics') log = logging.getLogger('assemblyline.system_metrics') previous_net_io = None previous_disk_io = None SOLR_ADMIN_URL = '%s/admin/mbeans?stats=true&wt=json&cat=QUERYHANDLER&cat=CORE&key=/update&key=/select&key=searcher' SOLR_CORE_URL = 'admin/cores?wt=json&indexInfo=false' SOLR_URL_BUILDER = 'http://localhost:8093/internal_solr/%s' RIAK_STATS_URL = 'http://localhost:8098/stats' def calculate_system_metrics(es, cur_ip, cur_host): global previous_disk_io, previous_net_io log.info("Starting system metrics calculation...")