def test_case5(self): labels = [ MockLabel('l1', ['m3']), MockLabel('l2', ['m3']), MockLabel( 'l3', ['m1']) ] duts = self.gen_duts_by_name('m1', 'm2', 'm3') mim = MachineImageManager(labels, duts) self.assertTrue(mim.compute_initial_allocation()) self.assertTrue(mim.matrix_ == [['X', 'X', 'Y'], ['X', 'X', 'Y'], ['Y', 'X', 'X']])
def test_case4(self): labels = [ MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']), MockLabel( 'l3', ['m1']) ] duts = [MockDut('m1'), MockDut('m2', labels[0]), MockDut('m3')] mim = MachineImageManager(labels, duts) mim.compute_initial_allocation() self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X', 'X']])
def test_random_generated(self): n = 10 labels = [] duts = [] for i in range(10): # generate 3-5 machines that is compatible with this label l = MockLabel('l{}'.format(i), []) r = random.random() for _ in range(4): t = int(r * 10) % n r *= 10 l.remote.append('m{}'.format(t)) labels.append(l) duts.append(MockDut('m{}'.format(i))) mim = MachineImageManager(labels, duts) self.assertTrue(mim.compute_initial_allocation())
def test_10x10_general(self): """Gen 10x10 matrix.""" n = 10 labels = [] duts = [] for i in range(n): labels.append(MockLabel('l{}'.format(i))) duts.append(MockDut('m{}'.format(i))) mim = MachineImageManager(labels, duts) self.assertTrue(mim.compute_initial_allocation()) for i in range(n): for j in range(n): if i == j: self.assertTrue(mim.matrix_[i][j] == 'Y') else: self.assertTrue(mim.matrix_[i][j] == ' ') self.assertTrue(mim.allocate(duts[3]).name == 'l3')
def __init__(self, experiment): self._experiment = experiment self._logger = logger.GetLogger(experiment.log_dir) # Create shortcuts to nested data structure. "_duts" points to a list of # locked machines. _labels points to a list of all labels. self._duts = self._experiment.machine_manager.GetMachines() self._labels = self._experiment.labels # Bookkeeping for synchronization. self._workers_lock = Lock() # pylint: disable=unnecessary-lambda self._lock_map = defaultdict(lambda: Lock()) # Test mode flag self._in_test_mode = test_flag.GetTestMode() # Read benchmarkrun cache. self._read_br_cache() # Mapping from label to a list of benchmark_runs. self._label_brl_map = dict((l, []) for l in self._labels) for br in self._experiment.benchmark_runs: assert br.label in self._label_brl_map # Only put no-cache-hit br into the map. if br not in self._cached_br_list: self._label_brl_map[br.label].append(br) # Use machine image manager to calculate initial label allocation. self._mim = MachineImageManager(self._labels, self._duts) self._mim.compute_initial_allocation() # Create worker thread, 1 per dut. self._active_workers = [DutWorker(dut, self) for dut in self._duts] self._finished_workers = [] # Termination flag. self._terminated = False
def test_2x2_with_allocation(self): labels = [MockLabel('l0'), MockLabel('l1')] duts = [MockDut('m0'), MockDut('m1')] mim = MachineImageManager(labels, duts) self.assertTrue(mim.compute_initial_allocation()) self.assertTrue(mim.allocate(duts[0]) == labels[0]) self.assertTrue(mim.allocate(duts[0]) == labels[1]) self.assertTrue(mim.allocate(duts[0]) is None) self.assertTrue(mim.matrix_[0][0] == '_') self.assertTrue(mim.matrix_[1][0] == '_') self.assertTrue(mim.allocate(duts[1]) == labels[1])
def test_single_label(self): labels = [MockLabel('l1')] duts = self.gen_duts_by_name('m1', 'm2', 'm3') mim = MachineImageManager(labels, duts) mim.compute_initial_allocation() self.assertTrue(mim.matrix_ == [['Y', 'Y', 'Y']])
def test_single_dut(self): labels = [MockLabel('l1'), MockLabel('l2'), MockLabel('l3')] dut = MockDut('m1') mim = MachineImageManager(labels, [dut]) mim.compute_initial_allocation() self.assertTrue(mim.matrix_ == [['Y'], ['Y'], ['Y']])
def pattern_based_test(self, inp, output): labels, duts = self.create_labels_and_duts_from_pattern(inp) mim = MachineImageManager(labels, duts) self.assertTrue(mim.compute_initial_allocation()) self.check_matrix_against_pattern(mim.matrix_, output) return mim
class Schedv2(object): """New scheduler for crosperf.""" def __init__(self, experiment): self._experiment = experiment self._logger = logger.GetLogger(experiment.log_dir) # Create shortcuts to nested data structure. "_duts" points to a list of # locked machines. _labels points to a list of all labels. self._duts = self._experiment.machine_manager.GetMachines() self._labels = self._experiment.labels # Bookkeeping for synchronization. self._workers_lock = Lock() # pylint: disable=unnecessary-lambda self._lock_map = defaultdict(lambda: Lock()) # Test mode flag self._in_test_mode = test_flag.GetTestMode() # Read benchmarkrun cache. self._read_br_cache() # Mapping from label to a list of benchmark_runs. self._label_brl_map = dict((l, []) for l in self._labels) for br in self._experiment.benchmark_runs: assert br.label in self._label_brl_map # Only put no-cache-hit br into the map. if br not in self._cached_br_list: self._label_brl_map[br.label].append(br) # Use machine image manager to calculate initial label allocation. self._mim = MachineImageManager(self._labels, self._duts) self._mim.compute_initial_allocation() # Create worker thread, 1 per dut. self._active_workers = [DutWorker(dut, self) for dut in self._duts] self._finished_workers = [] # Termination flag. self._terminated = False def run_sched(self): """Start all dut worker threads and return immediately.""" for w in self._active_workers: w.start() def _read_br_cache(self): """Use multi-threading to read cache for all benchmarkruns. We do this by firstly creating a few threads, and then assign each thread a segment of all brs. Each thread will check cache status for each br and put those with cache into '_cached_br_list'. """ self._cached_br_list = [] n_benchmarkruns = len(self._experiment.benchmark_runs) if n_benchmarkruns <= 4: # Use single thread to read cache. self._logger.LogOutput(('Starting to read cache status for ' '{} benchmark runs ...').format(n_benchmarkruns)) BenchmarkRunCacheReader(self, self._experiment.benchmark_runs).run() return # Split benchmarkruns set into segments. Each segment will be handled by # a thread. Note, we use (x+3)/4 to mimic math.ceil(x/4). n_threads = max(2, min(20, (n_benchmarkruns + 3) / 4)) self._logger.LogOutput(('Starting {} threads to read cache status for ' '{} benchmark runs ...').format(n_threads, n_benchmarkruns)) benchmarkruns_per_thread = (n_benchmarkruns + n_threads - 1) / n_threads benchmarkrun_segments = [] for i in range(n_threads - 1): start = i * benchmarkruns_per_thread end = (i + 1) * benchmarkruns_per_thread benchmarkrun_segments.append(self._experiment.benchmark_runs[start:end]) benchmarkrun_segments.append(self._experiment.benchmark_runs[ (n_threads - 1) * benchmarkruns_per_thread:]) # Assert: aggregation of benchmarkrun_segments equals to benchmark_runs. assert sum(len(x) for x in benchmarkrun_segments) == n_benchmarkruns # Create and start all readers. cache_readers = [ BenchmarkRunCacheReader(self, x) for x in benchmarkrun_segments ] for x in cache_readers: x.start() # Wait till all readers finish. for x in cache_readers: x.join() # Summarize. self._logger.LogOutput( 'Total {} cache hit out of {} benchmark_runs.'.format( len(self._cached_br_list), n_benchmarkruns)) def get_cached_run_list(self): return self._cached_br_list def get_label_map(self): return self._label_brl_map def get_experiment(self): return self._experiment def get_labels(self, i=None): if i == None: return self._labels return self._labels[i] def get_logger(self): return self._logger def get_cached_benchmark_run(self): """Get a benchmark_run with 'cache hit'. Returns: The benchmark that has cache hit, if any. Otherwise none. """ with self.lock_on('_cached_br_list'): if self._cached_br_list: return self._cached_br_list.pop() return None def get_benchmark_run(self, dut): """Get a benchmark_run (br) object for a certain dut. Args: dut: the dut for which a br is returned. Returns: A br with its label matching that of the dut. If no such br could be found, return None (this usually means a reimage is required for the dut). """ # If terminated, stop providing any br. if self._terminated: return None # If dut bears an unrecognized label, return None. if dut.label is None: return None # If br list for the dut's label is empty (that means all brs for this # label have been done), return None. with self.lock_on(dut.label): brl = self._label_brl_map[dut.label] if not brl: return None # Return the first br. return brl.pop(0) def allocate_label(self, dut): """Allocate a label to a dut. The work is delegated to MachineImageManager. The dut_worker calling this method is responsible for reimage the dut to this label. Args: dut: the new label that is to be reimaged onto the dut. Returns: The label or None. """ if self._terminated: return None return self._mim.allocate(dut, self) def dut_worker_finished(self, dut_worker): """Notify schedv2 that the dut_worker thread finished. Args: dut_worker: the thread that is about to end. """ self._logger.LogOutput('{} finished.'.format(dut_worker)) with self._workers_lock: self._active_workers.remove(dut_worker) self._finished_workers.append(dut_worker) def is_complete(self): return len(self._active_workers) == 0 def lock_on(self, my_object): return self._lock_map[my_object] def terminate(self): """Mark flag so we stop providing br/reimages. Also terminate each DutWorker, so they refuse to execute br or reimage. """ self._terminated = True for dut_worker in self._active_workers: dut_worker.terminate() def threads_status_as_string(self): """Report the dut worker threads status.""" status = '{} active threads, {} finished threads.\n'.format( len(self._active_workers), len(self._finished_workers)) status += ' Active threads:' for dw in self._active_workers: status += '\n ' + dw.status_str() if self._finished_workers: status += '\n Finished threads:' for dw in self._finished_workers: status += '\n ' + dw.status_str() return status