コード例 #1
0
def store_service_config(name, store_config=True, config_overrides=None, enabled=True):
    cls = class_by_name(name) if '.' in name else service_by_name(name)
    if not hasattr(cls, "get_default_config"):
        raise Exception(name + " is not an AL service. Make sure the class path you've entered is valid.")
    cfg = cls.get_default_config()
    cfg['enabled'] = enabled

    if '.' in name:
        cfg['classpath'] = name

    if config_overrides:
        for cfg_key, cfg_value in config_overrides.iteritems():
            if cfg_key not in cfg['config'] and cfg_key != 'PLUMBER_MAX_QUEUE_SIZE':
                raise Exception("Config override %s is not a valid configuration option for %s" % (cfg_key, name))
            cfg['config'][cfg_key] = cfg_value
    if store_config:
        srv_config = forge.get_datastore().get_service(cfg['name'])
        if srv_config:
            srv_config.update(cfg)
        else:
            srv_config = cfg

        forge.get_datastore().save_service(cfg['name'], srv_config)
        return srv_config
    return cfg
コード例 #2
0
    def validate_rules(self, rulefile, datastore=False):
        change = False
        while True:
            try:
                self.paranoid_rule_check(rulefile)
                return change
            # If something goes wrong, clean rules until valid file given
            except Exception as e:
                change = True
                if e.message.startswith('yara.SyntaxError'):

                    e_line = int(e.message.split('):', 1)[0].split("(", -1)[1])
                    e_message = e.message.split("): ", 1)[1]
                    try:
                        invalid_rule, reline = self.clean(rulefile, e_line, e_message)
                    except Exception as ve:
                        raise ve

                    # If datastore object given, change status of signature to INVALID in Riak
                    if datastore:
                        from assemblyline.al.common import forge
                        store = forge.get_datastore()
                        config = forge.get_config()
                        signature_user = config.services.master_list.Yara.config.SIGNATURE_USER
                        # Get the offending sig ID
                        sig_query = "name:{} AND meta.al_status:(DEPLOYED OR NOISY)".format(invalid_rule)
                        sigl = store.list_filtered_signature_keys(sig_query)
                        # Mark and update Riak
                        store = forge.get_datastore()
                        for sig in sigl:
                            sigdata = store.get_signature(sig)
                            # Check this in case someone already marked it as invalid
                            try:
                                if sigdata['meta']['al_status'] == 'INVALID':
                                    continue
                            except KeyError:
                                pass
                            sigdata['meta']['al_status'] = 'INVALID'
                            today = datetime.date.today().isoformat()
                            sigdata['meta']['al_state_change_date'] = today
                            sigdata['meta']['al_state_change_user'] = signature_user
                            sigdata['comments'].append("AL ERROR MSG:{0}. Line:{1}".format(e_message.rstrip().strip(),
                                                                                           reline))
                            store.save_signature(sig, sigdata)

                else:
                    raise e

                continue
コード例 #3
0
def install(alsi):
    from copy import deepcopy
    from assemblyline.al.common import forge
    from assemblyline.common.user_defaults import ACCOUNT_DEFAULT, SETTINGS_DEFAULT

    mm_user = alsi.config['core']['middleman']['user']
    ds = forge.get_datastore()
    mm_user_data = ds.get_user(mm_user)
    if not mm_user_data:
        mm_user_data = deepcopy(ACCOUNT_DEFAULT)
        mm_user_data['api_quota'] = 256
        mm_user_data['classification'] = alsi.config['core']['middleman'][
            'classification']
        mm_user_data['groups'] = ["MIDDLEMAN"]
        mm_user_data['name'] = mm_user
        mm_user_data['uname'] = mm_user
        ds.save_user(mm_user, mm_user_data)

        mm_options = deepcopy(SETTINGS_DEFAULT)
        mm_options['classification'] = alsi.config['core']['middleman'][
            'classification']
        ds.save_user(mm_user + "_options", mm_options)

    alsi.sudo_install_file(
        'assemblyline/al/install/etc/init/middleman_instance.conf',
        '/etc/init/middleman_instance.conf')

    alsi.sudo_install_file('assemblyline/al/install/etc/init/middleman.conf',
                           '/etc/init/middleman.conf')

    if not os.path.exists('/etc/init.d/middleman'):
        alsi.runcmd('sudo ln -s /lib/init/upstart-job /etc/init.d/middleman')

    alsi.milestone("middleman install complete")
コード例 #4
0
def add_to_profile(profile_name, service_key):
    ds = forge.get_datastore()
    profile = ds.get_profile(profile_name)
    if not profile:
        raise Exception("Could not find profile: %s")
    profile['services'][service_key] = {'service_overrides': {}, 'workers': 1}
    ds.save_profile(profile_name, profile)
コード例 #5
0
ファイル: expiry.py プロジェクト: wgwjifeng/cyberweapons
def main(bucket_list, journal_queues):
    ds = forge.get_datastore()
    queues = {
        x: queue.NamedQueue('d-%s' % x, db=DATABASE_NUM)
        for x in set(journal_queues).union(set(bucket_list))
    }

    Thread(target=track_status, name="queues_status", args=(queues, )).start()

    log.info("Ready!")
    loader_threads = {
        x: Thread(target=load_expired,
                  name="loader_%s" % x,
                  args=(ds, x, queues[x]))
        for x in bucket_list
    }

    loader_threads.update({
        'journal_%s' % x: Thread(target=load_journal,
                                 name="journal_loader_%s" % x,
                                 args=(x, queues[x]))
        for x in journal_queues
    })

    for thread in loader_threads.itervalues():
        thread.start()

    for thread in loader_threads.itervalues():
        thread.join()
コード例 #6
0
    def start(self):
        self.vmm = CuckooVmManager(self.cfg)
        self.cm = CuckooContainerManager(self.cfg, self.vmm)

        self._register_cleanup_op({
            'type':
            'shell',
            'args':
            shlex.split("docker rm --force %s" % self.cm.name)
        })

        self.log.debug("VMM and CM started!")
        # Start the container
        self.cuckoo_ip = self.cm.start_container(self.cm.name)
        self.restart_interval = random.randint(45, 55)
        self.file_name = None
        self.set_urls()
        self.ssdeep_match_pct = int(self.cfg.get("dedup_similar_percent", 80))

        for param in forge.get_datastore().get_service(
                self.SERVICE_NAME)['submission_params']:
            if param['name'] == "routing":
                self.enabled_routes = param['list']
                if self.enabled_routes[0] != param['default']:
                    self.enabled_routes.remove(param['default'])
                    self.enabled_routes.insert(0, param['default'])

        if self.enabled_routes is None:
            raise ValueError("No routing submission_parameter.")
        self.log.debug("Cuckoo started!")
コード例 #7
0
def ingester():  # df node def # pylint:disable=R0912
    datastore = forge.get_datastore()
    user_groups = {}

    # Move from ingest to unique and waiting queues.
    # While there are entries in the ingest queue we consume chunk_size
    # entries at a time and move unique entries to uniqueq / queued and
    # duplicates to their own queues / waiting.
    while running:
        while True:
            result = completeq.pop(blocking=False)  # df pull pop
            if not result:
                break

            completed(Task(result))  # df push calls

        entry = ingestq.pop(timeout=1)  # df pull pop
        if not entry:
            continue

        trafficq.push(entry)  # df push push

        sha256 = entry.get('sha256', '')
        if not sha256 or len(sha256) != 64:
            logger.error("Invalid sha256: %s", entry)
            continue

        entry['md5'] = entry.get('md5', '').lower()
        entry['sha1'] = entry.get('sha1', '').lower()
        entry['sha256'] = sha256.lower()

        ingest(datastore, user_groups, entry)  # df push calls

    datastore.close()
コード例 #8
0
 def undrain(self, msg):
     self.store = forge.get_datastore()
     if self.service_manager:
         self.service_manager.undrain()
     if self.vm_manager:
         self.vm_manager.undrain()
     return True
コード例 #9
0
    def __init__(self, wid, worker_type, working_dir, instance_id):
        self.working_dir = working_dir
        self.worker_id = wid
        self.ds = forge.get_datastore()
        self.worker_type = worker_type
        self.instance_id = instance_id

        if worker_type == TYPE_BACKUP:
            self.hash_queue = remote_datatypes.Hash("r-hash_%s" %
                                                    self.instance_id,
                                                    db=DATABASE_NUM)
            self.follow_queue = queue.NamedQueue("r-follow_%s" %
                                                 self.instance_id,
                                                 db=DATABASE_NUM,
                                                 ttl=1800)
            self.queue = queue.NamedQueue("r-backup_%s" % self.instance_id,
                                          db=DATABASE_NUM,
                                          ttl=1800)
            self.done_queue = queue.NamedQueue("r-backup-done_%s" %
                                               self.instance_id,
                                               db=DATABASE_NUM,
                                               ttl=1800)
        else:
            self.hash_queue = None
            self.follow_queue = None
            self.queue = None
            self.done_queue = queue.NamedQueue("r-restore-done_%s" %
                                               self.instance_id,
                                               db=DATABASE_NUM,
                                               ttl=1800)
コード例 #10
0
def install(alsi=None):
    from assemblyline.al.common import forge
    ds = forge.get_datastore()

    ip = net.get_hostip()
    mac = net.get_mac_for_ip(ip)

    existing_reg = ds.get_node(mac)
    if existing_reg:
        alsi.info("Registration already exist. Skipping...")
        return
    reg = DEFAULT_CORE_REGISTRATION.copy()
    reg['hostname'] = net.get_hostname()
    reg['ip'] = ip
    reg['mac_address'] = mac
    reg['machine_info'] = sysinfo.get_machine_info()
    reg['platform'] = sysinfo.get_platform()
    if 'roles' not in reg:
        reg['roles'] = []
    if "dispatcher" not in reg["roles"]:
        reg['roles'].append("dispatcher")
    if "middleman" not in reg["roles"]:
        reg['roles'].append("middleman")
    ds.save_node(mac, reg)
    alsi.info("Core server registered!")
コード例 #11
0
def main():
    log.init_logging('run_service')

    if len(sys.argv) != 2:
        usage()
        exit(1)

    name = sys.argv[1]

    try:
        svc_class = class_by_name(name) if '.' in name else service_by_name(
            name)
    except:
        print 'Could not load service "%s".\n%s' % (name,
                                                    get_valid_service_list())
        raise

    logger.info('Running service in stand-alone mode. CTRL-C to exit.')
    # noinspection PyBroadException
    try:
        cfg = forge.get_datastore().get_service(svc_class.SERVICE_NAME).get(
            "config", {})
    except:  # pylint: disable=W0702
        cfg = {}
    service_driver = ServiceDriver(svc_class, cfg, 86400, NUM_WORKERS)
    service_driver.start()

    try:
        while True:
            send_minimal_heartbeat(svc_class.SERVICE_NAME, NUM_WORKERS)
            time.sleep(config.system.update_interval)
    except KeyboardInterrupt:
        print 'Exiting.'
    finally:
        service_driver.stop_hard()
コード例 #12
0
def main(shard):
    log.init_logging('dispatcher')

    ds = forge.get_datastore()

    service_proxies = ServiceProxyManager(ds.list_service_keys())
    dispatcher = Dispatcher(service_proxies, shard=shard, debug=False)
    dispatcher.start()
コード例 #13
0
ファイル: vmedit.py プロジェクト: wgwjifeng/cyberweapons
 def __init__(self, vmname, cfg=None):
     self.ds = forge.get_datastore()
     self.vm_name = vmname + '.001'
     if cfg:
         self.vm_cfg = cfg.workers.virtualmachines.master_list.get(vmname, {}).get('cfg', None)
     else:
         self.vm_cfg = self.ds.get_virtualmachine(vmname)
     if not self.vm_cfg:
         raise Exception("Could not find VM %s in the seed" % vmname)
     self.vmm = libvirt.open(None)
コード例 #14
0
 def __init__(self, tasker_id_p):
     self.action_queue = queue.PriorityQueue('alert-actions',
                                             db=DATABASE_NUM)
     self.worker_queues_map = {
         x: queue.PriorityQueue('alert-actions-worker-%s' % x,
                                db=DATABASE_NUM)
         for x in range(WORKER_COUNT)
     }
     self.datastore = forge.get_datastore()
     self.tasker_id = tasker_id_p
コード例 #15
0
    def __init__(self, logger=None):
        if not logger:
            from assemblyline.al.common import log as al_log
            al_log.init_logging('yara_importer')
            logger = logging.getLogger('assemblyline.yara_importer')
            logger.setLevel(logging.INFO)

        yara_parser_class = forge.get_yara_parser()
        self.ds = forge.get_datastore()
        self.yp = yara_parser_class()
        self.log = logger
        self._id_cache = {}
        self._name_cache = []
コード例 #16
0
    def __init__(self, server_url=None, datastore=None):
        if not server_url:
            server_url = config.submissions.url

        self.server_url = server_url
        self.transport = forge.get_filestore()
        self.datastore = datastore
        self.is_unix = os.name == "posix"
        if not self.is_unix:
            from assemblyline_client import Client
            self.client = Client(self.server_url, auth=SUBMISSION_AUTH)
        elif self.datastore is None:
            self.datastore = forge.get_datastore()
コード例 #17
0
    def __init__(self, backup_file_path):
        self.backup_file_path = backup_file_path
        self.ds = forge.get_datastore()

        # Static maps
        self.BUCKET_MAP = {
            "blob": self.ds.blobs,
            "node": self.ds.nodes,
            "profile": self.ds.profiles,
            "signature": self.ds.signatures,
            "user": self.ds.users,
        }
        self.VALID_BUCKETS = sorted(self.BUCKET_MAP.keys())
コード例 #18
0
def main():
    if len(sys.argv) == 1:
        print "Usage: %s <One or more prepared VM tarballs>"
        sys.exit(7)

    try:
        svc_class = service_by_name("Cuckoo")
    except:
        print 'Could not load service "%s".\n' \
              'Valid options:\n%s' % ("Cuckoo", [s['name'] for s in forge.get_datastore().list_services()])
        sys.exit(7)

    cfg = forge.get_datastore().get_service(svc_class.SERVICE_NAME).get(
        "config", {})
    config = forge.get_config()

    local_meta_root = os.path.join(config.system.root, cfg['REMOTE_DISK_ROOT'])
    vm_meta_path = os.path.join(local_meta_root, cfg['vm_meta'])

    out_config = vm_meta_path
    out_directory = os.path.dirname(out_config)
    vm_list = sys.argv[1:]

    cuckoo_config = []
    for vm in vm_list:
        for js in install_vm_meta(out_directory, vm, ['']):
            cuckoo_config.append(js)

    with open(out_config, "w") as fh:
        json.dump(cuckoo_config,
                  fh,
                  sort_keys=True,
                  indent=4,
                  separators=(',', ': '))

    print "Wrote %i Definitions to %s!" % (len(cuckoo_config), out_config)
コード例 #19
0
ファイル: vm.py プロジェクト: wgwjifeng/cyberweapons
    def start(self):
        self._state = State.STARTING
        self.log.info("VmManager starting on (%s:%s).", self.host_ip, self.host_mac)
        with self.vmm_lock:
            self.vmm = libvirt.open(None)
            self.vmrevert_scheduler = apscheduler.scheduler.Scheduler()
            self.store = forge.get_datastore()

            # clean any state that might be left from previous run.
            self._destroy_all()

            # install and start vms
            self._reconcile_config()
            self.log.info("Starting all instances.")
            self._start_all()
            self._schedule_automatic_reverts()
            self._state = State.RUNNING
コード例 #20
0
ファイル: vm.py プロジェクト: wgwjifeng/cyberweapons
 def __init__(self, vmcfg=None):
     self.disk_root = config.workers.virtualmachines.disk_root
     if not os.path.exists(self.disk_root):
         os.makedirs(self.disk_root)
     self.vmm = None
     self.cfg = vmcfg
     if vmcfg is None:
         self.cfg = get_vmcfg_for_localhost()
     self.vmrevert_scheduler = None
     self.host_ip = net.get_hostip()
     self.host_mac = net.get_mac_for_ip(self.host_ip)
     self.log = logging.getLogger('assemblyline.vmm')
     self.vm_profiles = {}
     self.vmm_lock = threading.Lock()
     self._state = State.INIT
     self._hostagent_client = ServiceAgentClient(async=True)
     self.store = forge.get_datastore()
     self.vm_configs = {}
コード例 #21
0
def reregister_services(store_config=True, config_overrides=None):
    failed = []
    passed = []
    services = forge.get_datastore().list_services()
    for svc in services:
        class_name = svc.get('classpath', "al_services.%s.%s" % (svc['repo'], svc['class_name']))

        pprint(class_name)
        try:
            store_service_config(class_name, store_config, config_overrides)
            passed.append(class_name)
        except ImportError as ie:
            failed.append((class_name, ie))
            logging.error('Existing service cannot be imported for inspection: %s. Skipping.', class_name)
    logging.info("Succeeded: \n\t%s", "\n\t".join(passed))
    logging.info("Failed:")
    for (c, e) in failed:
        logging.info("\t%s - %s", c, e)
コード例 #22
0
ファイル: controller.py プロジェクト: wgwjifeng/cyberweapons
    def __init__(self):
        self.mac = net.get_mac_address()
        self.store = forge.get_datastore()
        self.log = logging.getLogger('assemblyline.control')
        self.log.info('Starting Controller: MAC[%s] STORE[%s]' % (self.mac, self.store))

        # This hosts registration from riak (Hosts tab in UI).
        self.jobs = LocalQueue()
        self.last_heartbeat = 0
        self.rpc_handlers = {
            ControllerRequest.HEARTBEAT: self.controller_heartbeat,
            ControllerRequest.START: self.hostagent_start,
            ControllerRequest.STOP: self.hostagent_stop,
            ControllerRequest.RESTART: self.hostagent_restart,
            ControllerRequest.STATUS: self.hostagent_status,
        }
        self._should_run = True
        self.executor_thread = None
        self.heartbeat_thread = None
コード例 #23
0
def main():
    log.init_logging('test')
    logger = logging.getLogger('assemblyline.test')

    store = forge.get_datastore()

    sids = []
    for x in store.stream_search(
            'submission',
            'times.completed:[2015-01-30T00:00:00.000Z TO 2015-01-30T00:59:59.999Z]'
    ):
        sid = x['submission.sid']
        sids.append(sid)

    count = 0
    submissions = store.get_submissions(sids)
    for submission in submissions:
        if submission.get('state', '') != 'completed':
            continue
        if len(submission['files']) != 1:
            continue
        _, srl = submission['files'][0]
        fileinfo = store.get_file(srl)
        if not fileinfo:
            continue
        submission = submission.get('submission', {})
        if not submission:
            continue
        metadata = submission.get('metadata', {})
        if not metadata:
            continue
        metadata['ignore_submission_cache'] = True
        metadata['ignore_cache'] = False
        metadata['md5'] = fileinfo['md5']
        metadata['sha1'] = fileinfo['sha1']
        metadata['sha256'] = fileinfo['sha256']
        metadata['size'] = fileinfo['size']
        resubmit(metadata)
        count += 1
        if count >= 1000:
            break

    logger.info('Resubmitted %d submissions for testing', count)
コード例 #24
0
    def __init__(self):
        self.ip = net.get_hostip()
        self.mac = net.get_mac_for_ip(self.ip)
        self.store = forge.get_datastore()
        self.log = logging.getLogger('assemblyline.agent')
        self.log.info('Starting HostAgent: MAC[%s] STORE[%s]' %
                      (self.mac, self.store))

        # This hosts registration from riak (Hosts tab in UI).
        self.registration = None
        self.service_manager = None
        self.vm_manager = None
        self.flex_manager = None
        self.lock = None
        self.consumer_thread = None
        self._should_run = False
        self.host_profile = {}
        self.executor_thread = None

        # Chores are actions that we run periodically and which we coallesce
        # when the same chore is requested multiple times in the same tick.
        # Jobs are executed as they are received.
        self.jobs = LocalQueue()
        self.last_heartbeat = 0
        self.rpc_handlers = {
            AgentRequest.PING: self.ping,
            AgentRequest.DRAIN: self.drain,
            AgentRequest.UNDRAIN: self.undrain,
            AgentRequest.SHUTDOWN: self.shutdown,
            AgentRequest.VM_LIST: self.list_vms,
            AgentRequest.VM_START: self.start_vm,
            AgentRequest.VM_STOP: self.stop_vm,
            AgentRequest.VM_STOP_ALL: self.stop_all_vms,
            AgentRequest.VM_RESTART: self.restart_vm,
            AgentRequest.VM_REFRESH_ALL: self.refresh_vm_all,
            AgentRequest.VM_REFRESH_FLEET: self.refresh_vm_fleet,
            AgentRequest.VM_GET_REVERT_TIMES: self.vm_get_revert_times,
            AgentRequest.START_SERVICES: self.start_services,
            AgentRequest.STOP_SERVICES: self.stop_services,
        }

        self._should_run = True
コード例 #25
0
    def __init__(self, working_dir, worker_count=50, spawn_workers=True):
        self.working_dir = working_dir
        self.ds = forge.get_datastore()
        self.plist = []
        self.instance_id = str(uuid.uuid4())
        self.follow_queue = queue.NamedQueue("r-follow_%s" % self.instance_id,
                                             db=DATABASE_NUM,
                                             ttl=1800)
        self.hash_queue = remote_datatypes.Hash("r-hash_%s" % self.instance_id,
                                                db=DATABASE_NUM)
        self.backup_queue = queue.NamedQueue('r-backup_%s' % self.instance_id,
                                             db=DATABASE_NUM,
                                             ttl=1800)
        self.backup_done_queue = queue.NamedQueue("r-backup-done_%s" %
                                                  self.instance_id,
                                                  db=DATABASE_NUM,
                                                  ttl=1800)
        self.restore_done_queue = queue.NamedQueue("r-restore-done_%s" %
                                                   self.instance_id,
                                                   db=DATABASE_NUM,
                                                   ttl=1800)
        self.bucket_error = []

        self.BUCKET_MAP = {
            "alert": self.ds.alerts,
            "blob": self.ds.blobs,
            "emptyresult": self.ds.emptyresults,
            "error": self.ds.errors,
            "file": self.ds.files,
            "filescore": self.ds.filescores,
            "node": self.ds.nodes,
            "profile": self.ds.profiles,
            "result": self.ds.results,
            "signature": self.ds.signatures,
            "submission": self.ds.submissions,
            "user": self.ds.users,
        }
        self.VALID_BUCKETS = sorted(self.BUCKET_MAP.keys())
        self.worker_count = worker_count
        self.spawn_workers = spawn_workers
        self.current_type = None
コード例 #26
0
ファイル: vm.py プロジェクト: wgwjifeng/cyberweapons
def get_vmcfg_for_localhost():
    ip = net.get_hostip()
    mac = net.get_mac_for_ip(ip)
    store = forge.get_datastore()
    host_registration = store.get_node(mac)
    if not host_registration:
        raise ConfigException('Could not find host registration fr %s' % mac)

    profile_name = host_registration.get('profile', None)
    if not profile_name:
        raise ConfigException('Could not find profile for host: %s' % mac)

    host_profile = store.get_profile(profile_name)
    if not host_profile:
        raise ConfigException('Could not fetch host profile %s' % profile_name)

    vm_config = host_profile.get('virtual_machines', None)
    if not vm_config:
        raise ConfigException('Could not find virtual machine section in %s' % profile_name)
    store.client.close()
    return vm_config
コード例 #27
0
def dropper():  # df node def
    datastore = forge.get_datastore()

    while running:
        raw = dropq.pop(timeout=1)  # df pull pop
        if not raw:
            continue

        notice = Notice(raw)

        send_notification(notice)

        c12n = notice.get('classification',
                          config.core.middleman.classification)
        expiry = now_as_iso(86400)
        sha256 = notice.get('sha256')

        datastore.save_or_freshen_file(sha256, {'sha256': sha256}, expiry,
                                       c12n)

    datastore.close()
コード例 #28
0
    def __init__(self):
        # Delay these imports so most nodes don't import them.
        global Scheduler
        from apscheduler.scheduler import Scheduler

        self.bottleneck_queue_sizes = {}
        self.cores = None
        self.datastore = forge.get_datastore()
        self.flex_profile = None
        self.flex_scheduler = None
        self.log = logging.getLogger('assemblyline.flex')
        self.mac = net.get_mac_for_ip(net.get_hostip())
        self.main_bottleneck = ''
        self.needs_cleanup = True
        self.previous_queue_sizes = {}
        self.safe_start_dict = {}
        self.safeq = NamedQueue('safe-start-%s' % self.mac)
        self.service_manager = None
        self.ram_mb = None
        self.tick_count = 0
        self.vm_manager = None
コード例 #29
0
def get_service_queue_lengths():
    global ds  # pylint: disable=W0603
    if not ds:
        ds = forge.get_datastore()

    # Default is to return all services in a dict of class_name: queue_size.
    queue_lengths = {}
    services = ds.list_services()
    for svc in services:
        # noinspection PyBroadException
        try:
            if not svc:
                continue
            classpath = svc.get(
                'classpath',
                "al_services.%s.%s" % (svc['repo'], svc['class_name']))
            queue_lengths[svc['name']] = get_service_queue_length(classpath)
        except Exception:  # pylint: disable=W0703
            log.exception('while getting queue length for %s', svc['name'])

    return queue_lengths
コード例 #30
0
def main(shard):
    log.init_logging('dispatcher')
    logger = logging.getLogger('assemblyline.dispatch')

    r = redis.StrictRedis(config.core.redis.nonpersistent.host,
                          config.core.redis.nonpersistent.port,
                          config.core.redis.nonpersistent.db)

    r.delete('ingest-queue-' + shard)

    store = forge.get_datastore()
    store.commit_index('submission')

    query = 'state:submitted AND times.submitted:[NOW-1DAY TO *]'
    sids = []
    for x in store.stream_search('submission', query):
        sid = x['submission.sid']
        if str(forge.determine_dispatcher(sid)) == shard:
            sids.append(sid)

    count = 0
    submissions = store.get_submissions(sids)
    for submission in submissions:
        if submission.get('state', '') != 'submitted':
            sid = submission.get('sid', '')
            if sid:
                store.save_submission(sid, submission)
            continue
        submission['request'] = {}
        for path, srl in submission['files']:
            submission['fileinfo'] = store.get_file(srl)
            submission['request']['path'] = path
            submission['request']['srl'] = srl
            resubmit(submission)
        count += 1

    logger.info('Resubmitted %d submissions to dispatcher %s.', count, shard)