def ls(root=".", abspaths=False, recursive=False): """ Return a list of files in directory. Directory listings are sorted alphabetically. If the named directory is a file, return it's path. Examples: >>> fs.ls("foo") ["a", "b", "c"] >>> fs.ls("foo/a") ["foo/a"] >>> fs.ls("foo", abspaths=True) ["/home/test/foo/a", "/home/test/foo/b", "/home/test/foo/c"] >>> fs.ls("foo", recursive=True) ["a", "b", "b/d", "b/d/e", "c"] Arguments: root (str): Path to directory. Can be relative or absolute. abspaths (bool, optional): Return absolute paths if true. recursive (bool, optional): Recursively list subdirectories if true. Returns: list of str: A list of paths. Raises: OSError: If root directory does not exist. """ def _expand_subdirs(file): if isdir(path(root, file)): return [file] + [path(file, x) for x in ls(path(root, file), recursive=True)] else: return [file] if isfile(root): # If argument is a file, return path. return [abspath(root)] if abspaths else [basename(root)] elif abspaths: # Get relative names. relpaths = ls(root, recursive=recursive, abspaths=False) # Prepend the absolute path to each relative name. base = abspath(root) return [path(base, relpath) for relpath in relpaths] elif recursive: # Recursively expand subdirectories. paths = ls(root, abspaths=abspaths, recursive=False) return lab.flatten([_expand_subdirs(file) for file in paths]) else: # List directory contents. return list(sorted(os.listdir(root)))
def xval_benchmarks(clf, data, **benchmark_name_opts): """cross-validate across benchmarks""" benchmark_names = get_benchmark_names(data, **benchmark_name_opts) return pd.DataFrame( flatten([ leave_one_benchmark_out(clf, cgo13_features, data, b) for b in benchmark_names ]))
def kernel_performance(db, output=None, **kwargs): labels = ["synthetic"] + db.real_kernel_names values = [lab.flatten([db.performance_of_kernels_with_name(name) for name in db.synthetic_kernel_names])] values += [db.performance_of_kernels_with_name(name) for name in db.real_kernel_names] title = kwargs.pop("title", "Workgroup size performance across kernels") _performance_plot(output, labels, values, title, color=sns.color_palette("Greens"), **kwargs)
def run_synthetic_benchmarks(iterations=250): """ Sample the space of synthetic benchmarks. """ allargs = list(experiment.SIMPLEBIG_ARGS) random.shuffle(allargs) for devargs in experiment.DEVARGS: for simplebigargs in allargs: args = labm8.flatten(simplebigargs + (devargs,)) io.debug(" ".join(args)) cmd_str = " ".join(args) sample_simplebig(args, iterations=iterations)
def kernel_performance(db, output=None, **kwargs): labels = ["synthetic"] + db.real_kernel_names values = [ lab.flatten([ db.performance_of_kernels_with_name(name) for name in db.synthetic_kernel_names ]) ] values += [ db.performance_of_kernels_with_name(name) for name in db.real_kernel_names ] title = kwargs.pop("title", "Workgroup size performance across kernels") _performance_plot(output, labels, values, title, color=sns.color_palette("Greens"), **kwargs)
def test_flatten(self): self._test([1, 2, 3], lab.flatten([[1], [2, 3]]))