def test_scheduler_driver_callbacks(mocker): driver = mocker.Mock() proxy = SchedulerDriverProxy(driver) proxy.abort() proxy.join() proxy.start() proxy.stop() proxy.acknowledge(TaskStatus()) proxy.decline(OfferID()) proxy.kill(OfferID()) proxy.launch(OfferID(resources=[Cpus(1), Mem(128)]), [TaskInfo(resources=[Cpus(1), Mem(128)])]) proxy.message(ExecutorID(), SlaveID(), 'message') proxy.reconcile([TaskStatus()]) proxy.request([Request()]) proxy.revive() proxy.suppress() proxy.accept(OfferID(), [Operation()]) driver.abort.assert_called_once() driver.join.assert_called_once() driver.start.assert_called_once() driver.stop.assert_called_once() driver.acknowledgeStatusUpdate.assert_called_once() driver.declineOffer.assert_called_once() driver.killTask.assert_called_once() driver.launchTasks.assert_called_once() driver.sendFrameworkMessage.assert_called_once() driver.reconcileTasks.assert_called_once() driver.requestResources.assert_called_once() driver.reviveOffers.assert_called_once() driver.suppressOffers.assert_called_once() driver.acceptOffers.assert_called_once()
def offers(): o1 = Offer(id=OfferID(value='first-offer'), slave_id=SlaveID(value='test-slave'), resources=[Cpus(2), Mem(256), Disk(1024)]) o2 = Offer(id=OfferID(value='second-offer'), slave_id=SlaveID(value='test-slave'), resources=[Cpus(1), Mem(1024), Disk(2048)]) return [o1, o2]
def test_resources_mixin_addition(): o = Offer(resources=[Cpus(1), Mem(128), Disk(0)]) t = TaskInfo(resources=[Cpus(0.5), Mem(128), Disk(0)]) s = o + t assert isinstance(s, ResourcesMixin) assert s.cpus == Cpus(1.5) assert s.cpus == 1.5 assert s.mem == Mem(256) assert s.mem == 256 assert s.disk == Disk(0) assert s.disk == 0
def test_resources_mixin_subtraction(): o = Offer(resources=[Cpus(1), Mem(128), Disk(0)]) t = TaskInfo(resources=[Cpus(0.5), Mem(128), Disk(0)]) s = o - t assert isinstance(s, ResourcesMixin) assert s.cpus == Cpus(0.5) assert s.cpus == 0.5 assert s.mem == Mem(0) assert s.mem == 0 assert s.disk == Disk(0) assert s.disk == 0
def test_resources_mixin_inplace_addition(): o = Offer(resources=[Cpus(1), Mem(128), Disk(64)]) t = TaskInfo(resources=[Cpus(0.5), Mem(128), Disk(0)]) o += t assert isinstance(o, Offer) assert o.cpus == Cpus(1.5) assert o.cpus == 1.5 assert o.mem == Mem(256) assert o.mem == 256 assert o.disk == Disk(64) assert o.disk == 64
def test_resources_mixin_inplace_subtraction(): o = Offer(resources=[Cpus(1), Mem(128), Disk(64)]) t = TaskInfo(resources=[Cpus(0.5), Mem(128), Disk(0)]) o -= t assert isinstance(o, Offer) assert o.cpus == Cpus(0.5) assert o.cpus == 0.5 assert o.mem == Mem(0) assert o.mem == 0 assert o.disk == Disk(64) assert o.disk == 64
def test_resources_mixin_sum(): o1 = Offer(resources=[Cpus(1), Mem(128), Disk(0)]) o2 = Offer(resources=[Cpus(2), Mem(128), Disk(100)]) o3 = Offer(resources=[Cpus(0.5), Mem(256), Disk(200)]) s = sum([o1, o2, o3]) assert isinstance(s, ResourcesMixin) assert s.cpus == Cpus(3.5) assert s.cpus == 3.5 assert s.mem == Mem(512) assert s.mem == 512 assert s.disk == Disk(300) assert s.disk == 300
def mesos(obj, name=None, pure=True, cpus=1, mem=64, disk=0, docker='lensa/dask.mesos', force_pull=False, envs={}, uris=[], **kwargs): kwargs['resources'] = [Cpus(cpus), Mem(mem), Disk(disk)] kwargs['docker'] = docker kwargs['force_pull'] = force_pull kwargs['envs'] = envs kwargs['uris'] = uris if isinstance(obj, MesosDelayed): return obj task, dasks, params = to_task_dasks_params(obj) if not dasks: return MesosDelayedLeaf(obj, pure=pure, name=name, **kwargs) else: if not name: name = '%s-%s' % (type(obj).__name__, tokenize(task, pure=pure)) dasks.append({name: task}) params.append({name: kwargs}) return MesosDelayed(name, dasks, params)
def tasks(): resources = [(0.1, 128), (1.0, 256), (3.0, 4096), (0.2, 64), (1.4, 1024), (0.5, 128), (0.1, 128)] return [ TaskInfo(resources=[Cpus(cpus), Mem(mem)]) for cpus, mem in resources ]
def python_task(): task = PythonTask(id=TaskID(value='test-task-id'), fn=sum, args=[range(5)], resources=[Cpus(0.1), Mem(128), Disk(0)]) return task
def docker_command(): task = TaskInfo(name='test-docker-task', id=TaskID(value='test-docker-task-id'), resources=[Cpus(0.1), Mem(64)], command=CommandInfo(value='echo 100'), container=ContainerInfo( type='DOCKER', docker=DockerInfo(image='lensa/satyr'))) return task
def docker_python(): task = PythonTask(id=TaskID(value='test-python-task-id'), fn=sum, args=[range(5)], name='test-python-task-name', resources=[Cpus(0.1), Mem(64), Disk(0)]) return task
def test_get_both(executor): params = {'z': {'resources': [Cpus(0.17), Mem(64), Disk(0)]}} assert executor.get(dsk, 'w', params=params) == 4 assert executor.submit.call_count == 1 assert executor.threadpool.submit.call_count == 1 assert executor.get(dsk, ['w', 'z'], params=params) == (4, 2) assert executor.submit.call_count == 2 assert executor.threadpool.submit.call_count == 2
def test_sequential_submit_get(mocker, docker_python): sched = QueueScheduler() mocker.spy(sched, 'on_update') with Running(sched, name='test-scheduler'): for i in range(3): task = PythonTask(id=TaskID(value='test-python-task-{}'.format(i)), fn=sum, args=[[1, 10, i]], name='test-python-task-name', resources=[Cpus(0.1), Mem(64), Disk(0)]) result = sched.submit(task) assert result.get(timeout=10) == 11 + i
def test_non_strict_encode_task_info(): t = TaskInfo(name='test-task', id=TaskID(value='test-task-id'), resources=[Cpus(0.1), Mem(16)], command=CommandInfo(value='echo 100')) t.result = 'some binary data' t.status = TaskStatus() p = encode(t) assert isinstance(p, mesos_pb2.TaskInfo) assert p.command.value == 'echo 100' with pytest.raises(AttributeError): p.status
def test_encode_task_info(): t = TaskInfo(name='test-task', id=TaskID(value='test-task-id'), resources=[Cpus(0.1), Mem(16)], command=CommandInfo(value='echo 100')) p = encode(t) assert isinstance(p, mesos_pb2.TaskInfo) assert p.command.value == 'echo 100' assert p.name == 'test-task' assert p.resources[0].name == 'cpus' assert p.resources[0].scalar.value == 0.1 assert p.task_id.value == 'test-task-id'
def test_status_in_task_info(): t = TaskInfo(name='test-task', id=TaskID(value='test-task-id'), resources=[Cpus(0.1), Mem(16)], command=CommandInfo(value='echo 100')) assert isinstance(t.status, TaskStatus) assert t.status.state == 'TASK_STAGING' p = encode(t) assert isinstance(p, mesos_pb2.TaskInfo) with pytest.raises(AttributeError): p.status
def test_encode_task_info_resources(): task = TaskInfo(name='test-task', id=TaskID(value='test-task-id'), resources=[Cpus(0.1), Mem(16)], command=CommandInfo(value='testcmd')) pb = encode(task) assert pb.name == 'test-task' assert pb.task_id.value == 'test-task-id' assert pb.resources[0].name == 'cpus' assert pb.resources[0].scalar.value == 0.1 assert pb.resources[1].name == 'mem' assert pb.resources[1].scalar.value == 16 assert pb.command.value == 'testcmd'
def test_resources_mixin_comparison(): o1 = Offer(resources=[Cpus(1), Mem(128), Disk(0)]) o2 = Offer(resources=[Cpus(2), Mem(256), Disk(1024)]) t1 = TaskInfo(resources=[Cpus(0.5), Mem(128), Disk(0)]) t2 = TaskInfo(resources=[Cpus(1), Mem(256), Disk(512)]) t3 = TaskInfo(resources=[Cpus(0.5), Mem(256), Disk(512)]) assert o1.cpus == 1 assert o1.mem == 128 assert o2.cpus == 2 assert o2.disk == 1024 assert t1.cpus == 0.5 assert t1.mem == 128 assert t2.cpus == 1 assert t2.disk == 512 assert o1 == o1 assert o1 < o2 assert o1 <= o2 assert o2 > o1 assert o2 >= o1 assert t1 == t1 assert t1 < t2 assert t1 <= t2 assert t2 > t1 assert t2 >= t1 assert o1 >= t1 assert o2 >= t1 assert o2 >= t2 assert t2 >= o1 assert t3 > o1 assert t3 <= t2 assert t3 > t1
def test_multiple_submissions(mocker, docker_python): sched = QueueScheduler() mocker.spy(sched, 'on_update') with Running(sched, name='test-scheduler'): results = [None] * 3 for i in range(3): task = PythonTask(id=TaskID(value='test-python-task-{}'.format(i)), fn=sum, args=[[1, 10, i]], name='test-python-task-name', resources=[Cpus(0.1), Mem(64), Disk(0)]) results[i] = sched.submit(task) sched.wait() # block until all tasks finishes assert [r.get() for r in results] == [11, 12, 13]
def test_encode_resources(): pb = encode(Cpus(0.1)) assert pb.scalar.value == 0.1 assert pb.name == 'cpus' assert pb.type == mesos_pb2.Value.SCALAR pb = encode(Mem(16)) assert pb.scalar.value == 16 assert pb.name == 'mem' assert pb.type == mesos_pb2.Value.SCALAR pb = encode(Disk(256)) assert pb.scalar.value == 256 assert pb.name == 'disk' assert pb.type == mesos_pb2.Value.SCALAR
def test_docker_python_exception(): sched = QueueScheduler() def error(): raise Exception('Dummy exception on executor side!') task = PythonTask(id=TaskID(value='test-python-task-id'), fn=error, name='test-python-task-name', resources=[Cpus(0.1), Mem(64), Disk(0)]) with Running(sched, name='test-scheduler'): result = sched.submit(task) with pytest.raises(Exception) as e: result.get() assert e.value.message == 'Dummy exception on executor side!'
def test_sequential_execution(mocker, docker_python): sched = QueueScheduler() mocker.spy(sched, 'on_update') with Running(sched, name='test-scheduler'): tasks = [] for i in range(3): task = PythonTask(id=TaskID(value='test-python-task-{}'.format(i)), fn=sum, args=[[1, 10, i]], name='test-python-task-name', resources=[Cpus(0.1), Mem(64), Disk(0)]) sched.submit(task) tasks.append(task) sched.wait() assert task.status.data == 11 + i
def test_parallel_execution(mocker, docker_python): sched = QueueScheduler() mocker.spy(sched, 'on_update') with Running(sched, name='test-scheduler'): tasks = [] for i in range(3): task = PythonTask(id=TaskID(value='test-python-task-{}'.format(i)), fn=sum, args=[[1, 10, i]], name='test-python-task-name', resources=[Cpus(0.1), Mem(64), Disk(0)]) sched.submit(task) tasks.append(task) sched.wait() # block until all tasks finishes assert [t.status.data for t in tasks] == [11, 12, 13]
def test_docker_python_exception(): sched = QueueScheduler() def error(): raise TypeError('Dummy exception on executor side!') task = PythonTask(id=TaskID(value='test-python-task-id'), fn=error, name='test-python-task-name', resources=[Cpus(0.1), Mem(64), Disk(0)]) with Running(sched, name='test-scheduler'): sched.submit(task) sched.wait() assert task.status.has_failed() assert isinstance(task.status.exception, RemoteException) assert isinstance(task.status.exception, TypeError)
def test_scheduler_retries(mocker): task = PythonTask(id=TaskID(value='non-existing-docker-image'), name='test', fn=lambda: range(int(10e10)), docker='pina/sen', resources=[Cpus(0.1), Mem(128), Disk(0)]) sched = QueueScheduler(name='test-executor-lost', retries=3) mocker.spy(sched, 'on_update') with Running(sched, name='test-scheduler'): sched.submit(task) sched.wait() assert sched.on_update.call_count == 3 states = ['TASK_STARTING', 'TASK_STARTING', 'TASK_FAILED'] for ((args, kwargs), state) in zip(sched.on_update.call_args_list, states): assert args[1].state == state
def command(): task = TaskInfo(name='test-task', id=TaskID(value='test-task-id'), resources=[Cpus(0.1), Mem(64)], command=CommandInfo(value='echo 100')) return task
def offers(): resources = [(1.1, 2048), (2.0, 512), (0.8, 1024), (1.6, 2048)] return [Offer(resources=[Cpus(cpus), Mem(mem)]) for cpus, mem in resources]
from satyr.proxies.messages import Cpus, Disk, Mem # tests are not modules, so these are not picklable @mesos(cpus=0.1, mem=256, docker='test1') def add(x, y): return x + y @mesos(cpus=0.2, mem=128, docker='test2') def mul(x, y): return x * y add_params = {'docker': 'test1', 'force_pull': False, 'resources': [Cpus(0.1), Mem(256), Disk(0)], 'envs': {}, 'uris': []} mul_params = {'docker': 'test2', 'force_pull': False, 'resources': [Cpus(0.2), Mem(128), Disk(0)], 'envs': {}, 'uris': []} def test_mesos_is_delayed(): def add(x, y): return x + y add1 = delayed(add) add2 = mesos(add)
def resources(): return [Cpus(0.1), Mem(128), Disk(0)]