Exemple #1
0
    def try_run(self):
        while self.running:
            task = self.dispatch_client.request_work('worker',
                                                     self.service_name,
                                                     '0',
                                                     timeout=1)
            if not task:
                continue
            print(self.service_name, 'has received a job', task.sid)

            file = self.filestore.get(task.fileinfo.sha256)

            instructions = json.loads(file)
            instructions = instructions.get(self.service_name, {})
            print(self.service_name, 'following instruction:', instructions)
            hits = self.hits[task.fileinfo.sha256] = self.hits.get(
                task.fileinfo.sha256, 0) + 1

            if instructions.get('semaphore', False):
                _global_semaphore.acquire(blocking=True,
                                          timeout=instructions['semaphore'])
                continue

            if 'drop' in instructions:
                if instructions['drop'] >= hits:
                    self.drops[task.fileinfo.sha256] = self.drops.get(
                        task.fileinfo.sha256, 0) + 1
                    continue

            if instructions.get('failure', False):
                error = Error(instructions['error'])
                error.sha256 = task.fileinfo.sha256
                self.dispatch_client.service_failed(task.sid,
                                                    error=error,
                                                    error_key=get_random_id())
                continue

            result_data = {
                'archive_ts': time.time() + 300,
                'classification': 'U',
                'response': {
                    'service_version': '0',
                    'service_tool_version': '0',
                    'service_name': self.service_name,
                },
                'result': {},
                'sha256': task.fileinfo.sha256,
                'expiry_ts': time.time() + 600
            }

            result_data.update(instructions.get('result', {}))
            result_data['response'].update(instructions.get('response', {}))

            result = Result(result_data)
            result_key = instructions.get('result_key', get_random_id())
            self.dispatch_client.service_finished(task.sid, result_key, result)
def test_ingest_sha(datastore, client):
    file_id = random_id_from_collection(datastore, 'file')
    res = client.ingest(alert=True,
                        sha256=file_id,
                        metadata={
                            "file_id": get_random_id(),
                            "comment": "test"
                        },
                        nq=get_random_id(),
                        nt=100)
    assert res.get('ingest_id', None) is not None
Exemple #3
0
def test_live_namespace(datastore, sio):
    wq_data = {'wq_id': get_random_id()}
    wq = NamedQueue(wq_data['wq_id'], private=True)

    start_msg = {'status_code': 200, 'msg': "Start listening..."}
    stop_msg = {
        'status_code': 200,
        'msg': "All messages received, closing queue..."
    }
    cachekey_msg = {'status_code': 200, 'msg': get_random_id()}
    cachekeyerr_msg = {'status_code': 200, 'msg': get_random_id()}

    test_res_array = []

    @sio.on('start', namespace='/live_submission')
    def on_start(data):
        test_res_array.append(('on_start', data == start_msg))

    @sio.on('stop', namespace='/live_submission')
    def on_stop(data):
        test_res_array.append(('on_stop', data == stop_msg))

    @sio.on('cachekey', namespace='/live_submission')
    def on_cachekey(data):
        test_res_array.append(('on_cachekey', data == cachekey_msg))

    @sio.on('cachekeyerr', namespace='/live_submission')
    def on_stop(data):
        test_res_array.append(('on_cachekeyerr', data == cachekeyerr_msg))

    try:
        sio.emit('listen', wq_data, namespace='/live_submission')
        sio.sleep(1)

        wq.push({"status": "START"})
        wq.push({"status": "OK", "cache_key": cachekey_msg['msg']})
        wq.push({"status": "FAIL", "cache_key": cachekeyerr_msg['msg']})
        wq.push({"status": "STOP"})

        start_time = time.time()

        while len(test_res_array) < 4 and time.time() - start_time < 5:
            sio.sleep(0.1)

        assert len(test_res_array) == 4

        for test, result in test_res_array:
            if not result:
                pytest.fail(f"{test} failed.")

    finally:
        sio.disconnect()
Exemple #4
0
def test_submission_namespace(datastore, sio):
    submission_queue = CommsQueue('submissions', private=True)
    monitoring = get_random_id()

    ingested = random_model_obj(SubmissionMessage).as_primitives()
    ingested['msg_type'] = "SubmissionIngested"
    received = random_model_obj(SubmissionMessage).as_primitives()
    received['msg_type'] = "SubmissionReceived"
    queued = random_model_obj(SubmissionMessage).as_primitives()
    queued['msg_type'] = "SubmissionQueued"
    started = random_model_obj(SubmissionMessage).as_primitives()
    started['msg_type'] = "SubmissionStarted"

    test_res_array = []

    @sio.on('monitoring', namespace='/submissions')
    def on_monitoring(data):
        # Confirmation that we are waiting for status messages
        test_res_array.append(('on_monitoring', data == monitoring))

    @sio.on('SubmissionIngested', namespace='/submissions')
    def on_submission_ingested(data):
        test_res_array.append(
            ('on_submission_ingested', data == ingested['msg']))

    @sio.on('SubmissionReceived', namespace='/submissions')
    def on_submission_received(data):
        test_res_array.append(
            ('on_submission_received', data == received['msg']))

    @sio.on('SubmissionQueued', namespace='/submissions')
    def on_submission_queued(data):
        test_res_array.append(('on_submission_queued', data == queued['msg']))

    @sio.on('SubmissionStarted', namespace='/submissions')
    def on_submission_started(data):
        test_res_array.append(
            ('on_submission_started', data == started['msg']))

    try:
        sio.emit('monitor', monitoring, namespace='/submissions')
        sio.sleep(1)

        submission_queue.publish(ingested)
        submission_queue.publish(received)
        submission_queue.publish(queued)
        submission_queue.publish(started)

        start_time = time.time()

        while len(test_res_array) < 5 and time.time() - start_time < 5:
            sio.sleep(0.1)

        assert len(test_res_array) == 5

        for test, result in test_res_array:
            if not result:
                pytest.fail(f"{test} failed.")
    finally:
        sio.disconnect()
Exemple #5
0
    def __init__(self,
                 name,
                 host=None,
                 export_interval_secs=None,
                 counter_type=None,
                 config=None,
                 redis=None,
                 counter_names=None,
                 timer_names=None,
                 export_zero=True):
        config = config or forge.get_config()
        self.channel = forge.get_metrics_sink(redis)
        self.export_interval = export_interval_secs or config.core.metrics.export_interval
        self.name = name
        self.host = host or get_random_id()
        self.type = counter_type or name
        self.export_zero = export_zero

        self.counter_schema = set(counter_names)
        self.timer_schema = set(timer_names)

        self.counts = Counters({key: 0 for key in self.counter_schema})
        self.values = {}
        self.lock = threading.Lock()
        self.scheduler = None
        self.reset()

        assert self.channel
        assert (self.export_interval > 0)
def generate_conf_key(service_tool_version=None, task=None):
    ignore_salt = None
    service_config = None
    submission_params_str = None

    if task is not None:
        service_config = json.dumps(sorted(task.service_config.items()))
        submission_params = {
            "deep_scan": task.deep_scan,
            "max_files": task.max_files,
            "min_classification": task.min_classification.value
        }
        submission_params_str = json.dumps(sorted(submission_params.items()))

        if task.ignore_cache:
            ignore_salt = get_random_id()

    if service_tool_version is None and \
            service_config is None and \
            submission_params_str is None and \
            ignore_salt is None:
        return "0"

    total_str = f"{service_tool_version}_{service_config}_{submission_params_str}_{ignore_salt}".encode(
        'utf-8')
    partial_md5 = hashlib.md5(
        (str(total_str).encode('utf-8'))).hexdigest()[:16]
    return baseconv.base62.encode(int(partial_md5, 16))
Exemple #7
0
 def __init__(self,
              working_dir,
              worker_count=50,
              spawn_workers=True,
              use_threading=False,
              logger=None):
     self.working_dir = working_dir
     self.datastore = forge.get_datastore(archive_access=True)
     self.logger = logger
     self.plist = []
     self.use_threading = use_threading
     self.instance_id = get_random_id()
     self.worker_queue = NamedQueue(f"r-worker-{self.instance_id}",
                                    ttl=1800)
     self.done_queue = NamedQueue(f"r-done-{self.instance_id}", ttl=1800)
     self.hash_queue = Hash(f"r-hash-{self.instance_id}")
     self.bucket_error = []
     self.VALID_BUCKETS = sorted(list(
         self.datastore.ds.get_models().keys()))
     self.worker_count = worker_count
     self.spawn_workers = spawn_workers
     self.total_count = 0
     self.error_map_count = {}
     self.missing_map_count = {}
     self.map_count = {}
     self.last_time = 0
     self.last_count = 0
     self.error_count = 0
Exemple #8
0
def test_user_quota_tracker(redis_connection):
    if redis_connection:
        from assemblyline.remote.datatypes.user_quota_tracker import UserQuotaTracker

        max_quota = 3
        timeout = 2
        name = get_random_id()
        uqt = UserQuotaTracker('test-quota', timeout=timeout)

        # First 0 to max_quota items should succeed
        for _ in range(max_quota):
            assert uqt.begin(name, max_quota) is True

        # All other items should fail until items timeout
        for _ in range(max_quota):
            assert uqt.begin(name, max_quota) is False

        # if you remove and item only one should be able to go in
        uqt.end(name)
        assert uqt.begin(name, max_quota) is True
        assert uqt.begin(name, max_quota) is False

        # if you wait the timeout, all items can go in
        time.sleep(timeout + 1)
        for _ in range(max_quota):
            assert uqt.begin(name, max_quota) is True
Exemple #9
0
 def __init__(self,
              working_dir: str,
              worker_count: int = 50,
              spawn_workers: bool = True,
              use_threading: bool = False,
              logger: logging.Logger = None):
     self.working_dir = working_dir
     self.datastore = forge.get_datastore(archive_access=True)
     self.logger = logger
     self.plist: list[Process] = []
     self.use_threading = use_threading
     self.instance_id = get_random_id()
     self.worker_queue: NamedQueue[dict[str, Any]] = NamedQueue(
         f"r-worker-{self.instance_id}", ttl=1800)
     self.done_queue: NamedQueue[dict[str, Any]] = NamedQueue(
         f"r-done-{self.instance_id}", ttl=1800)
     self.hash_queue: Hash[str] = Hash(f"r-hash-{self.instance_id}")
     self.bucket_error: list[str] = []
     self.valid_buckets: list[str] = sorted(
         list(self.datastore.ds.get_models().keys()))
     self.worker_count = worker_count
     self.spawn_workers = spawn_workers
     self.total_count = 0
     self.error_map_count: dict[str, int] = {}
     self.missing_map_count: dict[str, int] = {}
     self.map_count: dict[str, int] = {}
     self.last_time: float = 0
     self.last_count = 0
     self.error_count = 0
Exemple #10
0
    def put(self, path, content):
        path = self.normalize(path)

        dirname = os.path.dirname(path)
        filename = os.path.basename(path)

        tempname = get_random_id()
        temppath = _join(dirname, tempname)

        finalpath = _join(dirname, filename)
        assert (finalpath == path)

        self.makedirs(dirname)
        fh = None
        try:
            fh = open(temppath, "wb")
            return fh.write(content)
        finally:
            if fh:
                fh.close()

            try:
                shutil.move(temppath, finalpath)
            except shutil.Error:
                pass
            assert (self.exists(path))
Exemple #11
0
def test_submit_content(datastore, client):
    content = get_random_phrase(wmin=15, wmax=50).encode()
    fname = "{}.txt".format(get_random_id())
    res = client.submit(content=content, fname=fname)
    assert res['sid'] is not None
    assert res['files'][0]['name'] == fname
    assert res == datastore.submission.get(res['sid'], as_obj=False)
Exemple #12
0
 def __init__(self, name, timeout, host=None, port=None):
     self.uuid = get_random_id()
     self.c = get_client(host, port, False)
     self.lock_release = '-'.join(('lock', str(timeout), name, 'released'))
     self.lock_holder = '-'.join(('lock', str(timeout), name, 'holder'))
     self.timeout = timeout
     self._acquire = self.c.register_script(lock_acquire_script)
     self._release = self.c.register_script(lock_release_script)
Exemple #13
0
def test_submit_sha(datastore, client):
    file_id = random_id_from_collection(datastore, 'file')
    metadata = {"file_id": get_random_id(), "comment": "test"}
    res = client.submit(sha256=file_id, metadata=metadata)
    assert res['sid'] is not None
    assert res['files'][0]['sha256'] == file_id
    assert res['metadata'] == metadata
    assert res == datastore.submission.get(res['sid'], as_obj=False)
def create_workflows(ds, log=None):
    for _ in range(20):
        w_id = get_random_id()
        ds.workflow.save(w_id, random_model_obj(Workflow))
        if log:
            log.info(f'\t{w_id}')

    ds.workflow.commit()
Exemple #15
0
def test_watcher(redis_connection):
    redis_connection.time = RedisTime()
    rds = redis_connection
    queue_name = get_random_id()
    out_queue = NamedQueue(queue_name, rds)
    try:
        # Create a server and hijack its running flag and the current time in 'redis'
        client = WatcherClient(rds)
        server = WatcherServer(rds, rds)
        server.running = ToggleTrue()
        rds.time.current = 0
        assert out_queue.length() == 0

        # Send a simple event to occur soon
        client.touch(10, 'one-second', queue_name, {'first': 'one'})
        server.try_run()
        assert out_queue.length() == 0  # Nothing yet
        rds.time.current = 12  # Jump forward 12 seconds
        server.try_run()
        assert out_queue.length() == 1
        assert out_queue.pop() == {'first': 'one'}

        # Send a simple event to occur soon, then change our mind
        client.touch(10, 'one-second', queue_name, {'first': 'one'})
        client.touch(20, 'one-second', queue_name, {'first': 'one'})
        server.try_run()
        assert out_queue.length() == 0  # Nothing yet

        # Set events to occur, in inverse order, reuse a key, overwrite content and timeout
        client.touch(200, 'one-second', queue_name, {'first': 'last'})
        client.touch(100, '100-second', queue_name, {'first': '100'})
        client.touch(50, '50-second', queue_name, {'first': '50'})
        server.try_run()
        assert out_queue.length() == 0  # Nothing yet

        for _ in range(15):
            rds.time.current += 20
            server.try_run()

        assert out_queue.length() == 3
        assert out_queue.pop() == {'first': '50'}
        assert out_queue.pop() == {'first': '100'}
        assert out_queue.pop() == {'first': 'last'}

        # Send a simple event to occur soon, then stop it
        rds.time.current = 0
        client.touch(10, 'one-second', queue_name, {'first': 'one'})
        server.try_run()
        assert out_queue.length() == 0  # Nothing yet
        client.clear('one-second')
        rds.time.current = 12  # Jump forward 12 seconds
        server.try_run()
        assert out_queue.length() == 0  # still nothing because it was cleared

    finally:
        out_queue.delete()
Exemple #16
0
def add_workflow(**kwargs):
    """
    Add a workflow to the system

    Variables:
    None

    Arguments:
    None

    Data Block:
    {
     "name": "Workflow name",    # Name of the workflow
     "classification": "",       # Max classification for workflow
     "label": ['label1'],        # Labels for the workflow
     "priority": "LOW",          # Priority of the workflow
     "status": "MALICIOUS",      # Status of the workflow
     "query": "*:*"              # Query to match the data
    }

    Result example:
    {
     "success": true             # Saving the user info succeded
    }
    """

    data = request.json

    name = data.get('name', None)
    query = data.get('query', None)

    if not name:
        return make_api_response({"success": False}, err="Name field is required", status_code=400)

    if not query:
        return make_api_response({"success": False}, err="Query field is required", status_code=400)

    if not verify_query(query):
        return make_api_response({"success": False}, err="Query contains an error", status_code=400)

    data.update({
        "workflow_id": get_random_id(),
        "creator": kwargs['user']['uname'],
        "edited_by": kwargs['user']['uname'],
        "priority": data['priority'] or None,
        "status": data['status'] or None
    })
    try:
        workflow_data = Workflow(data)
    except ValueError as e:
        return make_api_response({'success': False}, err=str(e), status_code=400)

    return make_api_response({"success": STORAGE.workflow.save(workflow_data.workflow_id, workflow_data),
                              "workflow_id": workflow_data.workflow_id})
Exemple #17
0
def datastore(datastore_connection):
    ds = datastore_connection
    try:
        create_users(ds)
        signatures.extend(create_signatures(ds))
        ds.signature.commit()

        for _ in range(TEST_SIZE):
            f = random_model_obj(File)
            ds.file.save(f.sha256, f)
            file_list.append(f.sha256)
        ds.file.commit()

        for x in range(TEST_SIZE):
            a = random_model_obj(Alert)
            a.file.sha256 = file_list[x]
            ds.alert.save(a.alert_id, a)
        ds.alert.commit()

        for x in range(TEST_SIZE):
            r = random_model_obj(Result)
            r.sha256 = file_list[x]
            ds.result.save(r.build_key(), r)
        ds.result.commit()

        for x in range(TEST_SIZE):
            s = random_model_obj(Submission)
            for f in s.files:
                f.sha256 = file_list[x]
            ds.submission.save(s.sid, s)
        ds.submission.commit()

        for x in range(TEST_SIZE):
            h = random_model_obj(Heuristic)
            h.heur_id = f"AL_TEST_{x}"
            ds.heuristic.save(h.heur_id, h)
        ds.heuristic.commit()

        for _ in range(TEST_SIZE):
            w_id = get_random_id()
            w = random_model_obj(Workflow)
            ds.workflow.save(w_id, w)
        ds.workflow.commit()

        yield ds
    finally:
        ds.alert.wipe()
        ds.file.wipe()
        ds.result.wipe()
        ds.signature.wipe()
        ds.submission.wipe()
        ds.heuristic.wipe()
        ds.workflow.wipe()
        wipe_users(ds)
Exemple #18
0
 def upload(self, src_path, dst_path):
     dst_path = self.normalize(dst_path)
     dirname = posixpath.dirname(dst_path)
     filename = posixpath.basename(dst_path)
     tempname = get_random_id()
     temppath = posixpath.join(dirname, tempname)
     finalpath = posixpath.join(dirname, filename)
     assert (finalpath == dst_path)
     self.makedirs(dirname)
     self.sftp.put(src_path, temppath)
     self.sftp.rename(temppath, finalpath)
     assert (self.exists(dst_path))
Exemple #19
0
def test_submit_path(datastore, client):
    content = get_random_phrase(wmin=15, wmax=50).encode()
    test_path = "/tmp/test_submit_{}.txt".format(get_random_id())
    with open(test_path, 'wb') as test_file:
        test_file.write(content + b"PATH")

    params = {'service_spec': {"extract": {"password": "******"}}}
    res = client.submit(path=test_path, params=params)
    assert res.get('sid', None) is not None
    assert res['files'][0]['name'] == os.path.basename(test_path)
    assert res['params']['service_spec'] == params['service_spec']
    assert res == datastore.submission.get(res['sid'], as_obj=False)
def reply_queue_name(prefix=None, suffix=None):
    if prefix:
        components = [prefix]
    else:
        components = []

    components.append(get_random_id())

    if suffix:
        components.append(str(suffix))

    return '-'.join(components)
Exemple #21
0
    def client(datastore):
        user = datastore.user.get('admin')
        random_pass = get_random_password(length=48)
        key_name = "key_%s" % get_random_id().lower()
        user.apikeys[key_name] = {
            "password": bcrypt.hash(random_pass),
            "acl": ["R", "W"]
        }
        datastore.user.save('admin', user)
        api_key = "%s:%s" % (key_name, random_pass)

        c = get_client(UI_HOST, apikey=('admin', api_key), verify=False)
        return c
def test_get_message(datastore, client):
    notification_queue = get_random_id()
    queue = NamedQueue("nq-%s" % notification_queue,
                       host=config.core.redis.persistent.host,
                       port=config.core.redis.persistent.port)
    queue.delete()
    msg = random_model_obj(Submission).as_primitives()
    queue.push(msg)

    res = client.ingest.get_message(notification_queue)
    assert isinstance(res, dict)
    assert 'sid' in res
    assert 'results' in res
    assert res == msg
Exemple #23
0
    def upload(self, src_path, dst_path):
        dst_path = self.normalize(dst_path)
        if src_path == dst_path:
            return

        dirname = os.path.dirname(dst_path)
        filename = os.path.basename(dst_path)
        tempname = get_random_id()
        temppath = _join(dirname, tempname)
        finalpath = _join(dirname, filename)
        assert (finalpath == dst_path)
        self.makedirs(dirname)
        shutil.copy(src_path, temppath)
        shutil.move(temppath, finalpath)
        assert (self.exists(dst_path))
Exemple #24
0
 def upload(self, src_path, dst_path):
     dst_path = self.normalize(dst_path)
     dirname = posixpath.dirname(dst_path)
     filename = posixpath.basename(dst_path)
     tempname = get_random_id()
     temppath = posixpath.join(dirname, tempname)
     finalpath = posixpath.join(dirname, filename)
     assert (finalpath == dst_path)
     self.makedirs(dirname)
     with open(src_path, 'rb') as localfile:
         self.log.debug("Storing: %s", temppath)
         self.ftp.storbinary('STOR ' + temppath, localfile)
     self.log.debug("Rename: %s -> %s", temppath, finalpath)
     self.ftp.rename(temppath, finalpath)
     assert (self.exists(dst_path))
def test_ingest_path(datastore, client):
    content = get_random_phrase(wmin=15, wmax=50).encode()
    test_path = "/tmp/test_ingest_{}".format(get_random_id())
    with open(test_path, 'wb') as test_file:
        test_file.write(content + b"PATH")

    res = client.ingest(
        alert=True,
        path=test_path,
        params={'service_spec': {
            "extract": {
                "password": "******"
            }
        }})
    assert res.get('ingest_id', None) is not None
def test_get_message_list(datastore, client):
    notification_queue = get_random_id()
    queue = NamedQueue("nq-%s" % notification_queue,
                       host=config.core.redis.persistent.host,
                       port=config.core.redis.persistent.port)
    queue.delete()
    msg_0 = random_model_obj(Submission).as_primitives()
    queue.push(msg_0)
    msg_1 = random_model_obj(Submission).as_primitives()
    queue.push(msg_1)

    res = client.ingest.get_message_list(notification_queue)
    assert len(res) == 2
    assert res[0] == msg_0
    assert res[1] == msg_1
Exemple #27
0
def test_alert_namespace(datastore, sio):
    alert_queue = CommsQueue('alerts', private=True)
    test_id = get_random_id()

    created = random_model_obj(AlertMessage)
    created.msg_type = "AlertCreated"

    updated = random_model_obj(AlertMessage)
    updated.msg_type = "AlertUpdated"

    test_res_array = []

    @sio.on('monitoring', namespace='/alerts')
    def on_monitoring(data):
        # Confirmation that we are waiting for alerts
        test_res_array.append(('on_monitoring', data == test_id))

    @sio.on('AlertCreated', namespace='/alerts')
    def on_alert_created(data):
        test_res_array.append(
            ('on_alert_created', data == created.as_primitives()['msg']))

    @sio.on('AlertUpdated', namespace='/alerts')
    def on_alert_updated(data):
        test_res_array.append(
            ('on_alert_updated', data == updated.as_primitives()['msg']))

    try:
        sio.emit('alert', test_id, namespace='/alerts')
        sio.sleep(1)

        alert_queue.publish(created.as_primitives())
        alert_queue.publish(updated.as_primitives())

        start_time = time.time()

        while len(test_res_array) < 3 or time.time() - start_time < 5:
            sio.sleep(0.1)

        assert len(test_res_array) == 3

        for test, result in test_res_array:
            if not result:
                pytest.fail(f"{test} failed.")

    finally:
        sio.disconnect()
def test_download_file_handle(datastore, client):
    signature_id = random_id_from_collection(datastore,
                                             'signature',
                                             q="type:yara")
    query = "id:{}".format(signature_id)
    output = "/tmp/sigs_{}_obj".format(get_random_id())
    res = client.signature.download(query=query, output=open(output, 'wb'))
    assert res

    found = False

    with open(output, 'rb') as fh:
        if b"yara/sample_rules.yar" in fh.read():
            found = True

    if not found:
        pytest.fail("This is not the signature file that we were expecting.")
def test_uid():
    test_data = "test" * 1000
    rid = get_random_id()
    id_test = get_id_from_data(test_data)
    id_test_l = get_id_from_data(test_data, length=LONG)
    id_test_m = get_id_from_data(test_data, length=MEDIUM)
    id_test_s = get_id_from_data(test_data, length=SHORT)
    id_test_t = get_id_from_data(test_data, length=TINY)
    assert 23 > len(rid) >= 20
    assert 23 > len(id_test) >= 20
    assert 44 > len(id_test_l) >= 41
    assert 23 > len(id_test_m) >= 20
    assert 13 > len(id_test_s) >= 10
    assert 8 > len(id_test_t) >= 5
    assert id_test == id_test_m
    for c_id in [rid, id_test, id_test_l, id_test_m, id_test_s, id_test_t]:
        for x in c_id:
            assert x in BASE62_ALPHABET
Exemple #30
0
def test_single(clean_redis):
    disp = dispatch_hash.DispatchHash('test-disptach-hash', clean_redis)
    try:
        file_hash = get_random_id()
        service = 'service_name'
        result_key = 'some-result'

        # An empty dispatch hash isn't finished
        assert not disp.all_finished()

        # If we call dispatch, the time should be set
        now = time.time()
        disp.dispatch(file_hash, service)
        assert abs(now - disp.dispatch_time(file_hash, service)) < 1
        assert not disp.finished(file_hash, service)
        assert disp.dispatch_count() == 1
        assert not disp.all_finished()

        # After failing, the time should be reset
        disp.fail_recoverable(file_hash, service)
        assert disp.dispatch_time(file_hash, service) == 0
        assert disp.dispatch_count() == 0
        assert not disp.finished(file_hash, service)
        assert not disp.all_finished()

        # Try dispatching again
        now = time.time()
        disp.dispatch(file_hash, service)
        assert abs(now - disp.dispatch_time(file_hash, service)) < 1
        assert not disp.finished(file_hash, service)
        assert disp.dispatch_count() == 1
        assert not disp.all_finished()

        # Success rather than failure
        disp.finish(file_hash, service, result_key, 0, "U")
        assert disp.dispatch_time(file_hash, service) == 0
        assert disp.dispatch_count() == 0
        assert disp.finished_count() == 1
        assert disp.all_finished()
        assert disp.finished(file_hash, service) == dispatch_hash.DispatchRow('result', result_key, 0, False, 'U')
        assert disp.all_finished()

    finally:
        disp.delete()