def createFromDir(dir): """Scan the directory containing a dataset and create a BenchDataset object from it.""" # Identify the paths where files may be found input_dir = dir.getChildByName('input') output_dir = dir.getChildByName('output') #benchmark_path = path.join(globals.root, 'benchmarks', name) def check_default_input_files(): # This function is called to see if the input file set # guessed by scanning the input directory can be used if invalid_default_input_files: raise ValueError, "Cannot infer command line when there are multiple input files in a data set\n(Fix by adding an input DESCRIPTION file)" if input_dir.exists(): input_descr = process.read_description_file(input_dir) input_files = input_dir.scanAndReturnNames() # If more than one input file was found, cannot use the default # input file list produced by scanning the directory invalid_default_input_files = len(input_files) > 1 else: # If there's no input directory, assume the benchmark # takes no input input_descr = None input_files = [] invalid_default_input_files = False # Read the text of the input description file if input_descr is not None: (parameters, input_files1, input_descr) = \ unpack_dataset_description(input_descr, input_files=None) if input_files1 is None: # No override value given; use the default check_default_input_files() else: input_files = input_files1 else: check_default_input_files() parameters = [] # Look for output files output_descr = process.read_description_file(output_dir) output_files = output_dir.scanAndReturnNames() if len(output_files) > 1: raise ValueError, "Multiple output files not supported" # Concatenate input and output descriptions if input_descr and output_descr: descr = input_descr + "\n\n" + output_descr else: descr = input_descr or output_descr return BenchDataset(dir, input_files, output_files, parameters, descr)
def createFromDir(dir): """Scan the directory containing a benchmark implementation and create a BenchImpl object from it.""" # Get the description from a file, if provided descr = process.read_description_file(dir) return BenchImpl(dir, descr)
def createFromName(name, impl): """Scan the directory containing a benchmark implementation and create a BenchImpl object from it.""" # Path to the implementation impl_path = path.join(globals.root, "benchmarks", name, "src", impl) # Get the description from a file, if provided descr = process.read_description_file(impl_path) return BenchImpl(impl, descr)
def createFromName(name, impl): """Scan the directory containing a benchmark implementation and create a BenchImpl object from it.""" # Path to the implementation impl_path = path.join(globals.root, 'benchmarks', name, 'src', impl) # Get the description from a file, if provided descr = process.read_description_file(impl_path) return BenchImpl(impl, descr)
def createFromName(name): """Scan the benchmark directory for the benchmark named 'name' and create a benchmark object for it.""" bmkpath = path.join(globals.root, "benchmarks", name) descr = process.read_description_file(bmkpath) try: # Scan implementations of the benchmark impls = [BenchImpl.createFromName(name, impl) for impl in process.scan_for_benchmark_versions(bmkpath)] # Scan data sets of the benchmark datas = [BenchDataset.createFromName(name, data) for data in process.scan_for_benchmark_datasets(bmkpath)] # If no exception occurred, the benchmark is valid return Benchmark(name, bmkpath, impls, datas, descr) except Exception, e: return Benchmark(name, invalid=e)
def createFromName(name): """Scan the benchmark directory for the benchmark named 'name' and create a benchmark object for it.""" bmkdir = globals.benchdir.getChildByName(name) datadir = globals.datadir.getChildByName(name) descr = process.read_description_file(bmkdir) try: # Scan implementations of the benchmark impls = [BenchImpl.createFromDir(impl) for impl in process.scan_for_benchmark_versions(bmkdir)] # Scan data sets of the benchmark datas = [BenchDataset.createFromDir(data) for data in process.scan_for_benchmark_datasets(datadir)] # If no exception occurred, the benchmark is valid return Benchmark(name, bmkdir.getPath(), impls, datas, descr) finally: pass
def createFromName(name): """Scan the benchmark directory for the benchmark named 'name' and create a benchmark object for it.""" bmkpath = path.join(globals.root, 'benchmarks', name) descr = process.read_description_file(bmkpath) try: # Scan implementations of the benchmark impls = [ BenchImpl.createFromName(name, impl) for impl in process.scan_for_benchmark_versions(bmkpath) ] # Scan data sets of the benchmark datas = [ BenchDataset.createFromName(name, data) for data in process.scan_for_benchmark_datasets(bmkpath) ] # If no exception occurred, the benchmark is valid return Benchmark(name, bmkpath, impls, datas, descr) except Exception, e: return Benchmark(name, invalid=e)
def createFromName(name, dset): """Scan the directory containing a dataset and create a BenchDataset object from it.""" # Identify the paths where files may be found benchmark_path = path.join(globals.root, "benchmarks", name) if path.exists(path.join(benchmark_path, "input")): input_path = path.join(benchmark_path, "input", dset) else: input_path = None output_path = path.join(benchmark_path, "output", dset) # Look for input files def check_default_input_files(): # This function is called to see if the input file set # guessed by scanning the input directory can be used if invalid_default_input_files: raise ValueError, "Cannot infer command line when there are multiple input files in a data set\n(Fix by adding an input DESCRIPTION file)" if input_path: input_descr = process.read_description_file(input_path) input_files = list(process.scan_for_files(input_path, boring=["DESCRIPTION", ".svn"])) # If more than one input file was found, cannot use the default # input file list produced by scanning the directory invalid_default_input_files = len(input_files) > 1 else: # If there's no input directory, assume the benchmark # takes no input input_descr = None input_files = [] invalid_default_input_files = False # Read the text of the input description file if input_descr is not None: (parameters, input_files1, input_descr) = unpack_dataset_description(input_descr, input_files=None) if input_files1 is None: # No override vaule given; use the default check_default_input_files() else: input_files = input_files1 else: check_default_input_files() parameters = [] # Look for output files output_descr = process.read_description_file(output_path) output_files = list(process.scan_for_files(output_path, boring=["DESCRIPTION", ".svn"])) if len(output_files) > 1: raise ValueError, "Multiple output files not supported" # Concatenate input and output descriptions if input_descr and output_descr: descr = input_descr + "\n\n" + output_descr else: descr = input_descr or output_descr return BenchDataset(dset, input_files, output_files, parameters, descr)