def test_assign_workload(): fpath = os.path.dirname(os.path.abspath(__file__)) engine = Engine(cfg_path='%s/../config_test.yml' % fpath) with pytest.raises(CalcTypeError): engine.assign_resource([]) wl1 = Workload(num_tasks=4, ops_dist='uniform', dist_mean=5, dist_var=0) engine.assign_workload(wl1, submit_at=10) with open('%s/../config_test.yml' % fpath) as fp: cfg = yaml.load(fp) conn, chan = func_setup_mqs(cfg) response = None while not response: method_frame, header_frame, response = chan.basic_get( queue=cfg['rmq']['wlms']['queues']['workload'], no_ack=True) conn.close() response = json.loads(response) assert response.pop('submit_time') == 10 wl2 = Workload(no_uid=False) wl2.from_dict(response) assert len(wl1.task_list) == len(wl2.task_list) for ind, task in enumerate(wl1.task_list): assert task.ops == wl2.task_list[ind].ops
def test_fastest_first(): wl = Workload(num_tasks=10, ops_dist='uniform', dist_mean=10, dist_var=0) schedule = fastest_first(wl.task_list) assert schedule == sorted(wl.task_list, key=lambda task: task.ops, reverse=True)
def get_workload(): # Create a workload with a specific number of tasks with number of # operations per task drawn from a distribution wl = Workload( num_tasks=128, # no. of tasks ops_dist='uniform', # distribution to draw samples from dist_mean=1024, # mean of distribution dist_var=4 # variance of distribution ) return wl
def test_round_robin(): wl = Workload(num_tasks=10, ops_dist='uniform', dist_mean=10, dist_var=0) rs = Resource(num_cores=4, perf_dist='uniform', dist_mean=5, temporal_var=0, spatial_var=0) rs.create_core_list() schedule = round_robin(wl.task_list, rs.core_list) for ind, task in enumerate(wl.task_list): assert schedule[ind] == {'task': task, 'core': rs.core_list[ind % 4]}
def test_wlms_run(): fpath = os.path.dirname(os.path.abspath(__file__)) with open('%s/../config_test.yml' % fpath) as fp: cfg = yaml.load(fp) wlms = WLMS(cfg_path='%s/../config_test.yml' % fpath) t = threading.Thread(target=func_for_test_wlms_run, args=(wlms, )) t.daemon = True t.start() conn = pika.BlockingConnection( pika.ConnectionParameters(host=cfg['rmq']['host'], port=cfg['rmq']['port'])) chan = conn.channel() res = Resource(num_cores=10) res_as_dict = res.to_dict() chan.basic_publish(body=json.dumps(res_as_dict), exchange=cfg['rmq']['wlms']['exchange'], routing_key='res') wl = Workload(num_tasks=10) wl_as_dict = wl.to_dict() wl_as_dict['submit_time'] = 5 chan.basic_publish(body=json.dumps(wl_as_dict), exchange=cfg['rmq']['wlms']['exchange'], routing_key='wl') schedule = None while not schedule: method_frame, header_frame, schedule = chan.basic_get( queue=cfg['rmq']['executor']['queues']['schedule'], no_ack=True) schedule = json.loads(schedule) assert len(schedule) == 10 t.join(timeout=5)
def test_random_placer(): wl = Workload(num_tasks=10, ops_dist='uniform', dist_mean=10, dist_var=0) rs = Resource(num_cores=4, perf_dist='uniform', dist_mean=5, temporal_var=0, spatial_var=0) rs.create_core_list() schedule = random_placer(wl.task_list, rs.core_list) assert len(schedule) == wl.num_tasks for x in schedule: assert set(x.keys()) == set(['task', 'core']) assert None not in x.values()
def test_s2f(): wl = Workload(num_tasks=10, ops_dist='uniform', dist_mean=10, dist_var=0) rs = Resource(num_cores=4, perf_dist='uniform', dist_mean=5, temporal_var=0, spatial_var=0) rs.create_core_list() schedule = smallest_to_fastest(wl.task_list, rs.core_list) tasks = sorted(wl.task_list, key=lambda task: task.ops) cores = sorted(rs.core_list, key=lambda unit: unit.perf, reverse=True) for ind, x in enumerate(tasks): assert schedule[ind] == {'task': x, 'core': cores[ind % len(cores)]}
def test_select_all(): wl = Workload(num_tasks=10, ops_dist='uniform', dist_mean=10, dist_var=0) assert len(select_all(wl.task_list, 5)) == wl.num_tasks
def test_random_order(): wl = Workload(num_tasks=10, ops_dist='uniform', dist_mean=10, dist_var=0) schedule = random_order(workload=wl.task_list) assert len(schedule) == wl.num_tasks