def gen_taskset(periods, period_distribution, tasks_n, utilization, period_granularity=None, scale=ms2us, want_integral=True): if periods in NAMED_PERIODS: # Look up by name. (period_min, period_max) = NAMED_PERIODS[periods] else: # If unknown, then assume caller specified range manually. (period_min, period_max) = periods x = StaffordRandFixedSum(tasks_n, utilization, 1) if period_granularity is None: period_granularity = period_min periods = gen_periods(tasks_n, 1, period_min, period_max, period_granularity, period_distribution) ts = TaskSystem() periods = numpy.maximum(periods[0], max(period_min, period_granularity)) C = scale(x[0] * periods) taskset = numpy.c_[x[0], C / periods, periods, C] for t in range(numpy.size(taskset,0)): ts.append(SporadicTask(taskset[t][3], scale(taskset[t][2]))) if want_integral: quantize_params(ts) return ts
def gen_tasksets(options): x = StaffordRandFixedSum(options.n, options.util, 1) periods = gen_periods(options.n, 1, options.permin, options.permax, options.pergran, options.perdist) ts = TaskSystem() C = x[0] * periods[0] if options.round_C: C = numpy.round(C, decimals=0) elif options.floor_C: C = numpy.floor(C) taskset = numpy.c_[x[0], C / periods[0], periods[0], C] for t in range(numpy.size(taskset,0)): ts.append(SporadicTask(taskset[t][3], taskset[t][2])) # print ts return ts
def find_independent_tasksubsets(taskset): by_task, by_res = find_connected_components(taskset) done = set() subsets = [] for t in by_task: if not t in done: subsets.append(TaskSystem(by_task[t])) done.update(by_task[t]) return subsets
def partition_tasks(cluster_size, clusters, dedicated_irq, taskset): first_cap = cluster_size - 1 if dedicated_irq \ else cluster_size first_bin = Bin(size=SporadicTask.utilization, capacity=first_cap) other_bins = [ Bin(size=SporadicTask.utilization, capacity=cluster_size) for _ in xrange(1, clusters) ] heuristic = WorstFit(initial_bins=[first_bin] + other_bins) heuristic.binpack(taskset) if not (heuristic.misfits): clusts = [TaskSystem(b.items) for b in heuristic.bins] for i, c in enumerate(clusts): if i == 0 and dedicated_irq: c.cpus = cluster_size - 1 else: c.cpus = cluster_size for task in c: task.partition = i return [c for c in clusts if len(c) > 0] else: return False
def example(): from schedcat.util.linprog import LinearProgram from schedcat.model.tasks import SporadicTask, TaskSystem from schedcat.model.resources import initialize_resource_model import schedcat.util.linprog t1 = SporadicTask(10, 100) t2 = SporadicTask(25, 200) t3 = SporadicTask(33, 33) ts = TaskSystem([t1, t2, t3]) ts.assign_ids() initialize_resource_model(ts) t1.resmodel[0].add_request(1) t2.resmodel[0].add_request(2) t3.resmodel[0].add_request(3) for t in ts: t.response_time = t.period t.partition = t.id % 2 # only one resource, assigned to the first processor resource_locality = { 0: 0 } lp = LinearProgram() # Configure blocking objective. set_blocking_objective(resource_locality, ts, t1, lp) print(lp) print(('*' * 80)) print('Adding mutex constraints:') add_var_mutex_constraints(ts, t1, lp) print(lp) print(('*' * 80)) print('Adding toplogy constraints:') add_topology_constraints(resource_locality, ts, t1, lp) print(lp) from .dflp import get_lp_for_task as get_dflp_lp from .dpcp import get_lp_for_task as get_dpcp_lp print(('*' * 80)) print('DFLP LP:') lp = get_dflp_lp(resource_locality, ts, t1) print(lp) lp.kill_non_positive_vars() print('DFLP LP (simplified)') print(lp) if schedcat.util.linprog.cplex_available: sol = lp.solve() print(('Solution: local=%d remote=%d' % \ (sol.evaluate(lp.local_objective), sol.evaluate(lp.remote_objective)))) for x in sol: print((x, '->', sol[x])) print(('*' * 80)) print('DPCP LP:') lp = get_dpcp_lp(resource_locality, ts, t1) print(lp) lp.kill_non_positive_vars() print('DPCP LP (simplified)') print(lp) if schedcat.util.linprog.cplex_available: sol = lp.solve() print(('Solution: local=%d remote=%d' % \ (sol.evaluate(lp.local_objective), sol.evaluate(lp.remote_objective)))) for x in sol: print((x, '->', sol[x]))