def parse_complexity(run_paths, complexities, run_as_key=False): # parse the AVRs for all the runs tables = datalib.parse_all(map(lambda x: path_complexity(x), run_paths), complexities, datalib.REQUIRED, keycolname='EPOCH-START') if run_as_key: # modify the map to use run dir as key, not Avr file tables = dict([(path_run_from_complexity(x[0]), x[1]) for x in tables.items()]) return tables
def parse_avrs(run_paths, classification, recent_type, complexities, run_as_key=False): # parse the AVRs for all the runs avrs = datalib.parse_all( map(lambda x: path_avr(x, classification, recent_type), run_paths), complexities, datalib.REQUIRED, keycolname="Timestep", ) if run_as_key: # modify the map to use run dir as key, not Avr file avrs = dict([(path_run_from_avr(x[0], classification, recent_type), x[1]) for x in avrs.items()]) return avrs
def parse_complexity(run_paths, complexities, run_as_key = False): # parse the AVRs for all the runs tables = datalib.parse_all( map(lambda x: path_complexity( x ), run_paths), complexities, datalib.REQUIRED, keycolname = 'EPOCH-START' ) if run_as_key: # modify the map to use run dir as key, not Avr file tables = dict( [(path_run_from_complexity( x[0] ), x[1]) for x in tables.items()] ) return tables
def parse_stats(run_paths, types, run_as_key=False): # make sure the datalib files exist for path in run_paths: __get_stats(path) # parse the stats for all the runs tables = datalib.parse_all(map(lambda x: path_stats(x), run_paths), types, datalib.REQUIRED, keycolname='step') if run_as_key: # modify the map to use run dir as key, not Avr file tables = dict([(path_run_from_stats(x[0]), x[1]) for x in tables.items()]) return tables
def parseValues(mode, run_paths, classification, dataset, values, run_as_key): relpath = mode.relpath(classification, dataset) tablename = mode.defaultValues keycolname = mode.colName_timestep # parse the AVRs for all the runs tables = datalib.parse_all( map(lambda x: os.path.join(x, relpath), run_paths), [tablename], datalib.REQUIRED, keycolname = keycolname ) if run_as_key: # modify the map to use run dir as key, not Avr file avrs = dict( [(mode.pathRunFromValue( x[0], classification, dataset ), x[1]) for x in tables.items()] ) return avrs
def parse_avrs(run_paths, classification, recent_type, complexities, run_as_key=False): # parse the AVRs for all the runs avrs = datalib.parse_all(map( lambda x: path_avr(x, classification, recent_type), run_paths), complexities, datalib.REQUIRED, keycolname='Timestep') if run_as_key: # modify the map to use run dir as key, not Avr file avrs = dict([(path_run_from_avr(x[0], classification, recent_type), x[1]) for x in avrs.items()]) return avrs
def parse_stats(run_paths, types, run_as_key = False): # make sure the datalib files exist for path in run_paths: __get_stats( path ) # parse the stats for all the runs tables = datalib.parse_all( map(lambda x: path_stats( x ), run_paths), types, datalib.REQUIRED, keycolname = 'step' ) if run_as_key: # modify the map to use run dir as key, not Avr file tables = dict( [(path_run_from_stats( x[0] ), x[1]) for x in tables.items()] ) return tables