Ejemplo n.º 1
0
def complete_benchmarks(ctx, param, incomplete):
    "Auto-completion for benchmarks."
    skip_import()

    # check the current incomplete path. If it does not exists, use its parent
    # as a starting point for lookup.
    incomplete_path = Path(incomplete)
    if not incomplete_path.exists():
        incomplete_path = incomplete_path.parent

    # List all sub directory
    all_dirs = [d for d in incomplete_path.glob('*') if d.is_dir()]
    all_benchmarks = [
        b for b in all_dirs if (Path(b) / "objective.py").exists()
    ]

    # First try to list benchmarks that match the incomplete pattern.
    proposed_benchmarks = propose_from_list(all_benchmarks, incomplete)
    if len(proposed_benchmarks) > 0:
        return proposed_benchmarks

    # Else do completion with sub-directories.
    matching_dirs = propose_from_list(all_dirs, incomplete)
    if len(matching_dirs) == 1:
        # If only one matches, complete the folder name and continue completion
        # from here.
        return complete_benchmarks(ctx, param, matching_dirs[0])
    return matching_dirs
Ejemplo n.º 2
0
def complete_datasets(ctx, param, incomplete):
    "Auto-completion for datasets."
    skip_import()
    benchmark = find_benchmark_in_args(ctx.args)
    if benchmark is None:
        return []
    datasets = [d.lower() for d in benchmark.get_dataset_names()]
    return propose_from_list(datasets, incomplete.lower())
Ejemplo n.º 3
0
def complete_solvers(ctx, param, incomplete):
    "Auto-completion for solvers."
    skip_import()
    benchmark = find_benchmark_in_args(ctx.args)
    if benchmark is None:
        return []
    solvers = [s.lower() for s in benchmark.get_solver_names()]
    return propose_from_list(solvers, incomplete.lower())
Ejemplo n.º 4
0
def get_output_files(ctx, args, incomplete):
    "Auto-completion for datasets."
    skip_import()
    benchmark = find_benchmark_in_args(args)
    if benchmark is None:
        return [("", 'Benchmark has not been provided before')]
    output_folder = benchmark.get_output_folder()
    candidates = list(output_folder.glob('*.csv'))
    return propose_from_list(candidates, incomplete)
Ejemplo n.º 5
0
def get_datasets(ctx, args, incomplete):
    "Auto-completion for datasets."
    skip_import()
    benchmark = find_benchmark_in_args(args)
    if benchmark is None:
        return [("", 'Benchmark has not been provided before')]
    datasets = benchmark.list_benchmark_dataset_names()
    datasets = [d.lower() for d in datasets]
    return propose_from_list(datasets, incomplete.lower())
Ejemplo n.º 6
0
def get_solvers(ctx, args, incomplete):
    "Auto-completion for solvers."
    skip_import()
    benchmark = find_benchmark_in_args(args)
    if benchmark is None:
        return [("", 'Benchmark has not been provided before')]
    solvers = benchmark.list_benchmark_solver_names()
    solvers = [s.lower() for s in solvers]
    return propose_from_list(solvers, incomplete.lower())
Ejemplo n.º 7
0
def complete_output_files(ctx, param, incomplete):
    "Auto-completion for output files."
    skip_import()
    benchmark = find_benchmark_in_args(ctx.args)
    if benchmark is None:
        return []
    output_folder = benchmark.get_output_folder()
    candidates = list(output_folder.glob('*.csv'))
    return propose_from_list(candidates, incomplete)
Ejemplo n.º 8
0
def complete_config_files(ctx, param, incomplete):
    "Auto-completion for configuration files."
    skip_import()
    benchmark = find_benchmark_in_args(ctx.args)
    if benchmark is None:
        return []
    benchmark_folder = benchmark.benchmark_dir

    # Only use absolute path to make sure we can use relative_to to
    # autocompletion with relative paths
    cwd = Path().resolve()
    candidates = [
        p.resolve().relative_to(cwd) for p in benchmark_folder.glob('*.yml')
    ]
    return propose_from_list(candidates, incomplete)
Ejemplo n.º 9
0
def complete_output_files(ctx, param, incomplete):
    "Auto-completion for output files."
    skip_import()
    benchmark = find_benchmark_in_args(ctx.args)
    if benchmark is None:
        return []
    output_folder = benchmark.get_output_folder()

    # Only use absolute path to make sure we can use relative_to to
    # autocompletion with relative paths
    cwd = Path().resolve()
    candidates = [
        p.resolve().relative_to(cwd) for ext in ['csv', 'parquet']
        for p in output_folder.glob(f"*.{ext}")
    ]
    return propose_from_list(candidates, incomplete)
Ejemplo n.º 10
0
def get_benchmark(ctx, args, incomplete):
    skip_import()
    benchmarks = [os.curdir] + [b.path for b in os.scandir('.') if b.is_dir()]
    benchmarks = [b for b in benchmarks if (Path(b) / "objective.py").exists()]
    return [b for b in benchmarks if incomplete in b]
Ejemplo n.º 11
0
def get_datasets(ctx, args, incomplete):
    skip_import()
    benchmark = Benchmark(args[1])
    datasets = benchmark.list_benchmark_dataset_names()
    datasets = [d.lower() for d in datasets]
    return [d for d in datasets if incomplete.lower() in d]
Ejemplo n.º 12
0
def get_solvers(ctx, args, incomplete):
    skip_import()
    benchmark = Benchmark(args[1])
    solvers = benchmark.list_benchmark_solver_names()
    solvers = [s.lower() for s in solvers]
    return [s for s in solvers if incomplete.lower() in s]