def __init__(self, **kwargs): super(type(self), self).__init__(**kwargs) self.tot_util = kwargs.get('tot_util', 1.0) self.ts = TaskSet() self.last_id = -1 self.utilization_overflow = kwargs.get('util_over', True) self.deadline_scale = kwargs.get('deadline_scale', 1.0)
def test_task_is_counted(self): ts = TaskSet() t1 = Task() t2 = Task() ts.append(t1) ts.append(t2) self.assertEqual(len(ts), 2)
def next_task_set(self): ts = TaskSet() for i in range(self.num_task): t = self.next_task() t.id = i if ts.tot_util() + t.utilization() >= self.tot_util: break ts.append(t) return ts
def test_task_setter(self): ts = TaskSet() param1 = {'id': 2} t1 = Task(**param1) ts.append(t1) param2 = {'id': 3} t2 = Task(**param2) ts[0] = t2 self.assertEqual(ts[0].id, 3)
def next_task_set(self): divided_util = self.unifast_divide(self.num_task, self.tot_util) ts = TaskSet() for i in range(self.num_task): t = self.next_task(cand_util=divided_util[i]) t.id = i ts.append(t) return ts
def parallelize_pts_custom(pt_list, popt_list): if len(pt_list) != len(popt_list): raise Exception( 'pt_list or popt_list malformed. Length does not match.') ts = TaskSet() for i in range(len(pt_list)): ts.merge_ts(pt_list[i][popt_list[i]]) return ts
def test_utilization_and_density_difference(self): task_param = { 'exec_time': 6, 'deadline': 8, 'period': 10, } t = Task(**task_param) ts = TaskSet() ts.append(t) diff = tsutil.sum_utilization(ts) - tsutil.sum_density(ts) assert_almost_equals(diff, -0.15)
def test_sum_density(self): task_param1 = { 'exec_time': 8, 'deadline': 10, } t1 = Task(**task_param1) task_param2 = { 'exec_time': 9, 'deadline': 10, } t2 = Task(**task_param2) ts = TaskSet() ts.append(t1) ts.append(t2) assert_almost_equals(tsutil.sum_density(ts), 1.7)
def test_ts_merge(self): param1 = {'id': 2} t1 = Task(**param1) param2 = {'id': 3} t2 = Task(**param2) ts1 = TaskSet() ts2 = TaskSet() ts1.append(t1) ts2.append(t1) ts2.append(t2) ts1.merge_ts(ts2) self.assertEqual(len(ts1), 3) self.assertEqual(ts1[2], t2)
def __init__(self, **kwargs): """ **Role**: Initialize Parallelizable Taskset\n .. note:: * **max_option** : The maximum parallelize option that \n * **overhead** : The increasing rate of **execution_time** on every **parallelization** \n * **variance** : The **execution_time** difference between **paralleizable task** and **thread** on paralleization\n * **base_ts** : **parallelize task** needs **base_task** ( Default: exec_time=1, deadline=2, period=3 )\n * **pt_list** : Save **paralleizable tasks** in a list to make a **paralleizable taskset*\n * **populate_pt_list** : See the **"populate_pt_list"**\n * **popt_strategy** : parallel option (default **single** )\n * **popt_list** : parallel option list for each **pt_list**\n * **pts_serialized** : Generate **taskset** includes **parallelizable tasks** with **parallel option** * **serialize_pts** : See the **serialize pts** """ type(self).cnt += 1 self.id = kwargs.get('id', type(self).cnt) self.max_opt = kwargs.get('max_option', 1) # parallelizer info self.overhead = kwargs.get('overhead', 0.0) if self.overhead > 0.5: self.overhead = 3.0 self.variance = kwargs.get('variance', 0.0) # base task set info tmp_ts = TaskSet() tmp_ts.append(Task(**{'exec_time': 1, 'deadline': 2, 'period': 3})) self.base_ts = kwargs.get('base_ts', tmp_ts) self.pt_list = [] if kwargs.get('custom', 'False') == 'True': self.pt_list = kwargs.get('pt_list', [[]]) else: self.populate_pt_list() # pts serialized according to selected option. # defaults to single thread for each pt in pts. self.popt_strategy = kwargs.get('popt_strategy', 'single') self.popt_list = kwargs.get('popt_list', [1 for i in range(len(self.pt_list))]) self.pts_serialized = TaskSet() self.serialize_pts() self.task_list = self.pts_serialized.task_list return
def test_two_task(self): task_param1 = { 'exec_time': 4, 'deadline': 10, } t1 = Task(**task_param1) task_param2 = { 'exec_time': 6, 'deadline': 10, } t2 = Task(**task_param2) ts = TaskSet() ts.append(t1) ts.append(t2) gfb_param = { 'num_core': 2, } self.assertTrue(gfb.is_schedulable(ts, **gfb_param))
def custom_init(self): if self.max_opt >= 2: para.parallelize_pt_non_dec(self) for opt in range(1, self.max_opt + 1): ts = TaskSet() for i in range(0, opt): thr_param = { 'id': self.base_task.id, 'exec_time': self.exec_times[opt - 1][i], 'deadline': self.base_task.deadline, 'period': self.base_task.period, } thr = Thread(**thr_param) ts.append(thr) self.ts_table[str(opt)] = ts return
def test_single_and_max_popt(self): task_param = { 'exec_time': 40, 'deadline': 100, 'period': 100, } t1 = Task(**task_param) task_param = { 'exec_time': 100, 'deadline': 200, 'period': 200, } t2 = Task(**task_param) ts = TaskSet() ts.append(t1) ts.append(t2) pts_param1 = { 'base_ts': ts, 'max_option': 4, 'overhead': 0.0, 'variance': 0.3, 'popt': 'single', } pts1 = ParaTaskSet(**pts_param1) self.assertEqual(len(pts1), 2) pts_param2 = { 'base_ts': ts, 'max_option': 4, 'overhead': 0.0, 'variance': 0.3, 'popt': 'max', } pts2 = ParaTaskSet(**pts_param2) self.assertEqual(len(pts2), 2)
def __init__(self, **kwargs): """ **Role**: Initialize Parallelizable Task\n .. note:: * **max_option** : The maximum parallelize option that \n * **overhead** : The increasing rate of **execution_time** on every **parallelization** \n * **variance** : The **execution_time** difference between **threads** on paralleization\n * **base_task** : **parallelize task** needs **base_task** ( Default: exec_time=1, deadline=2, period=3 )\n * **ts_table** : Save taskset from **1** to **max_option**\n * **populate_ts_table** : See the **"populate_ts_table"**\n """ type(self).cnt += 1 self.id = kwargs.get('id', type(self).cnt) self.max_opt = kwargs.get('max_option', 1) # parallelizer info self.overhead = kwargs.get('overhead', 0.0) self.variance = kwargs.get('variance', 1.0) # base task info self.base_task = kwargs.get( 'base_task', Task(**{ 'exec_time': 1, 'deadline': 2, 'period': 3 })) ts = TaskSet() ts.append(self.base_task) self.ts_table = {'1': ts} if kwargs.get('custom', 'False') == 'True': self.exec_times = kwargs.get('exec_times', [[]]) self.custom_init() else: self.populate_ts_table()
} t_objdetect = Task(**task_param) para_task_param = { 'base_task': t_objdetect, 'max_option': 4, 'custom': 'True', 'exec_times': [[30], [17, 15], [12, 12, 10], [9, 8, 8, 7]], } pt_objdetect = ParaTask(**para_task_param) print(pt_objdetect) # create pts print('----------------') print('pts') ts = TaskSet() ts.append(t_lanetrack) ts.append(t_objdetect) pts_param_single = { 'base_ts': ts, 'max_option': 4, 'popt_strategy': 'single', 'custom': 'True', 'pt_list': [pt_lanetrack, pt_objdetect], } pts = ParaTaskSet(**pts_param_single) pts_util = pts.tot_util() print(pts_util) print(pts)
def next_task_set(self): ts = TaskSet() for i in range(self.num_task): t = self.next_task() ts.append(t) return ts
def test_id_does_not_overlap(self): ts1 = TaskSet() ts2 = TaskSet() self.assertNotEqual(ts1.id, ts2.id)
def test_taskset_is_cleared(self): ts = TaskSet() t1 = Task() ts.append(t1) ts.clear() self.assertEqual(len(ts), 0)
def parallelize_pts_max(pt_list, **kwargs): max_opt = kwargs.get('max_option', 1) ts = TaskSet() for pt in pt_list: ts.merge_ts(pt[max_opt]) return ts
def parallelize_pts_random(pt_list, **kwargs): max_opt = kwargs.get('max_option', 1) ts = TaskSet() for pt in pt_list: ts.merge_ts(pt[random.randint(1, max_opt)]) return ts
if __name__ == '__main__': task_param = { 'exec_time': 35, 'deadline': 60, 'period': 60, } t1 = Task(**task_param) task_param = { 'exec_time': 72, 'deadline': 80, 'period': 80, } t2 = Task(**task_param) ts = TaskSet() ts.append(t1) ts.append(t2) pts_param = { 'base_ts': ts, 'max_option': 4, 'overhead': 0.0, 'variance': 0.8, 'popt_strategy': 'custom', 'popt_list': [1, 1], } pts = ParaTaskSet(**pts_param) print(pts)
def test_task_getter(self): ts = TaskSet() param = {'id': 2} t = Task(**param) ts.append(t) self.assertEqual(ts[0].id, 2)
def parallelize_pt_non_dec_alpha(pt): # Paralleliuze task while non decreasing total execution time # largest execution time always non increases # total execution time # required to have total execution time larger than 3 * max_option e_tot = pt.base_task.exec_time if e_tot <= pt.max_opt * 3: raise Exception('Execution time too small') # largest execution time e_max = pt.base_task.exec_time e_tot_prev = e_tot e_max_prev = e_max for opt in range(2, pt.max_opt + 1): # print('----------------') # print('opt: ' + str(opt)) e_mean = e_tot_prev / opt # print('e_mean: ' + str(e_mean)) # random draw (opt) from [0, 1] # fixed s_tot, which is scaled later # necessary step, to keep the ratio same # discard if max thread is larger than before. max_effort = 10 effort = 0 while True: if effort >= max_effort: break s_tot = 1000 s_list = unifast_divide(opt, s_tot, (s_tot / opt) * (1.0 + pt.variance)) s_list_norm = normalize_list(s_list) # print(s_list_norm) e_list = [round(e_mean * (1.0 + s)) for s in s_list_norm] # print('e_list: ' + str(e_list)) e_max = max(e_list) # print('e_max: ' + str(e_max)) if e_max >= e_max_prev: effort += 1 continue break # scale e_tot accordingly e_tot = pt.overhead * (e_max_prev - e_max) + e_tot_prev # print('e_tot: ' + str(e_tot)) # make all tasks again, this time max pinned to e_max. e_list = [e_max] + unifast_divide(opt - 1, e_tot - e_max, e_max) e_list.sort(reverse=True) # print('e_list_new: ' + str(e_list)) pt.ts_table[str(opt)] = TaskSet() # alpha # alpha = (e_tot - e_tot_prev) / (e_max_prev - e_max) # print('alpha: ' + str(alpha)) # print('----------------') # update e_tot, e_max e_max_prev = e_max e_tot_prev = e_tot # create threads and append to task set ts = TaskSet() for i in range(len(e_list)): # set minimum e to 1.0 if e_list[i] < 0.1: e_list[i] = 1.0 thr_param = { 'id': pt.base_task.id, 'exec_time': e_list[i], 'deadline': pt.base_task.deadline, 'period': pt.base_task.period, } thr = Thread(**thr_param) ts.append(thr) # append to pt pt.ts_table[str(opt)] = ts return
def parallelize_pts_single(pt_list): ts = TaskSet() for pt in pt_list: ts.merge_ts(pt[1]) return ts
class Egen(Gen): def __init__(self, **kwargs): super(type(self), self).__init__(**kwargs) self.tot_util = kwargs.get('tot_util', 1.0) self.ts = TaskSet() self.last_id = -1 self.utilization_overflow = kwargs.get('util_over', True) self.deadline_scale = kwargs.get('deadline_scale', 1.0) def next_task(self, **kwargs): period = random.randint(self.min_period, self.max_period) exec_time = random.randint(self.min_exec_time, self.max_exec_time) # prevents tasks with utilization > 1.0 if not self.utilization_overflow: while exec_time > period + 0.1: period = random.randint(self.min_period, self.max_period) exec_time = random.randint(self.min_exec_time, self.max_exec_time) if self.implicit_deadline: deadline = period else: if self.constrained_deadline: deadline = random.randint(self.min_deadline, period) else: deadline = random.randint(self.min_deadline, self.max_deadline) task_param = { 'period': period, 'exec_time': exec_time, 'deadline': deadline * self.deadline_scale, } t = Task(**task_param) return t def __str__(self): info = 'Generator - egen\n' + \ super(type(self), self).__str__() + '\n' + \ 'tot_util = ' + str(self.tot_util) + '\n' + \ 'util_over = ' + str(self.utilization_overflow) return info def create_new_task_set(self, t): self.ts.clear() if t.utilization() <= self.tot_util: self.last_id = 0 t.id = self.last_id self.ts.append(t) return self.ts else: raise Exception('Cannot create new task set, tried utilization: ' + t.utilization()) def next_task_set(self): # try append task to existing task set t = self.next_task() if self.ts.tot_util() + t.utilization() >= self.tot_util: return self.create_new_task_set(t) # append task to existing task set self.last_id += 1 t.id = self.last_id self.ts.append(t) return self.ts
def parallelize_alpha(pt): # Parallelize task while non decreasing total execution time # Also largest execution time always non increases # total execution time # required to have total execution time larger than 3 * max_option e_tot = pt.base_task.exec_time if e_tot <= pt.max_opt * 3: raise Exception('Execution time too small') e_tot_prev = e_tot # largest execution time e_max = pt.base_task.exec_time e_max_prev = e_max for opt in range(2, pt.max_opt + 1): pt.ts_table[str(opt)] = TaskSet() # increase total execution time e_tot = e_tot_prev * (1.0 + pt.variance) # decrease first thread execution time accordingly e_max = e_max_prev - ((e_tot - e_tot_prev) / pt.overhead) # ideal seperation execution time e_ideal = e_tot / opt """ normalize variance variance = 0 --> e_max = e_ideal variance = 1 --> e_max = e_max (prev) e_max_limit = e_ideal + (e_max(prev) - e_ideal) * variance unifast split into pcs, while only accepting when largest generated e < e_max_limit """ # execution times e_max_limit = e_ideal + (e_max - e_ideal) * pt.variance e_list = unifast_divide(opt, e_tot, e_max_limit) e_max = e_list[0] # create threads and append to task set ts = TaskSet() for i in range(len(e_list)): # set minimum e to 1.0 if e_list[i] < 0.1: e_list[i] = 1.0 thr_param = { 'id': pt.base_task.id, 'exec_time': e_list[i], 'deadline': pt.base_task.deadline, 'period': pt.base_task.period, } thr = Thread(**thr_param) ts.append(thr) # append to pt pt.ts_table[str(opt)] = ts e_tot_prev = e_tot e_max_prev = e_max return