def load(self, fList): from Gaugi import load from Gaugi import csvStr2List, expandFolders, progressbar fList = csvStr2List(fList) fList = expandFolders(fList) from saphyra import TunedData_v1 self._obj = TunedData_v1() for inputFile in progressbar(fList, len(fList), prefix="Reading tuned data collection...", logger=self._logger): raw = load(inputFile) # get the file version version = raw['__version'] # the current file version if version == 1: obj = TunedData_v1.fromRawObj(raw) self._obj.merge(obj) else: # error because the file does not exist self._logger.fatal('File version (%d) not supported in (%s)', version, inputFile) # return the list of keras models return self._obj
def __init__(self, fList): Logger.__init__(self) from Gaugi import csvStr2List from Gaugi import expandFolders self.fList = csvStr2List(fList) self.fList = expandFolders(fList) self.process_pipe = [] self.output_stack = [] import random import time random.seed(time.time()) self._base_id = random.randrange(100000)
def __init__(self, fList, reader, nFilesPerJob, nthreads): Logger.__init__(self) from Gaugi import csvStr2List from Gaugi import expandFolders fList = csvStr2List(fList) self._fList = expandFolders(fList) def chunks(l, n): """Yield successive n-sized chunks from l.""" for i in range(0, len(l), n): yield l[i:i + n] self._fList = [l for l in chunks(self._fList, nFilesPerJob)] self.process_pipe = [] self._outputs = [] self._nthreads = nthreads self._reader = reader