def test_eq(self): f0 = Failure() f1 = Failure() f2 = Failure(uid=1) f3 = Failure(uid=1, message='different message') assert f0 != f1 # different id assert f1 != f2 # different id assert f2 == f3 # same id
def compute_batch_parallel(fxn, keys): """Execute a function in parallel on the entire batch of keys, using a multi-threaded executor. This is a helper function which subclasses of LazyDict can use to implement `compute_batch`. Note that speedups will only be obtained if compute is IO bound, due to Python's GIL. Args: fxn (Callable): function to be called in parallel keys (list): a list of keys Returns: list: result is equivalent to [fxn(key) for key in keys] """ no_result_failure = Failure.silent( 'No result returned by SimpleExecutor.') results = [no_result_failure] * len(keys) with SimpleExecutor(fxn) as ex: for i, key in enumerate(keys): ex.submit(i, key) for i, val in ex.results(): results[i] = val for result in results: assert result != no_result_failure return results
def summarize(self, fmt=None, verbose=False): if fmt is None: fmt = self.default_format for path in self.paths(): ws = TrainingRunWorkspace(path) try: print fmt(ws) except BaseException: msg = 'Failed to render experiment: {}.'.format(ws.root) f = Failure.silent(msg) if verbose: print msg print f.traceback
def get_batch(self, keys): if len(keys) == 0: return [] key_to_index = {k: i for i, k in enumerate(keys)} condition = self._key_conditions(keys) cmd = select([self.table]).where(condition) with self._transaction() as conn: results = conn.execute(cmd) no_result_failure = Failure.silent('No result returned from TableDict.') vals = [no_result_failure] * len(keys) for row in results: key = self._key_orm.from_row(row) val = self._val_orm.from_row(row) index = key_to_index[key] vals[index] = val return vals
def get_batch(self, keys): if len(keys) == 0: return [] key_to_index = {k: i for i, k in enumerate(keys)} condition = self._key_conditions(keys) cmd = select([self.table]).where(condition) with self._transaction() as conn: results = conn.execute(cmd) no_result_failure = Failure.silent( 'No result returned from TableDict.') vals = [no_result_failure] * len(keys) for row in results: key = self._key_orm.from_row(row) val = self._val_orm.from_row(row) index = key_to_index[key] vals[index] = val return vals
def compute_batch_parallel(fxn, keys): """Execute a function in parallel on the entire batch of keys, using a multi-threaded executor. This is a helper function which subclasses of LazyDict can use to implement `compute_batch`. Note that speedups will only be obtained if compute is IO bound, due to Python's GIL. Args: fxn (Callable): function to be called in parallel keys (list): a list of keys Returns: list: result is equivalent to [fxn(key) for key in keys] """ no_result_failure = Failure.silent('No result returned by SimpleExecutor.') results = [no_result_failure] * len(keys) with SimpleExecutor(fxn) as ex: for i, key in enumerate(keys): ex.submit(i, key) for i, val in ex.results(): results[i] = val for result in results: assert result != no_result_failure return results
def get_batch(self, keys): f = Failure.silent("Could not get key.") return [self._d.get(k, f) for k in keys]
def get_batch(self, keys): f = Failure.silent("Could not get key.") return [self._d.get(k, f) for k in keys]