def commit(self): if self.dsPath is not None: if self.pickle: utils.save(self.dsPath, self.predictions) else: np.save(self.dsPath, self.predictions) self.predictions = np.array(self.predictions)
def createNet(self): inp, output = utils.load_yaml(self.path + ".shapes") if not hasattr(context.context, "net_cx"): context.context.net_cx = [] contributions = None if os.path.exists(self.path + ".contribution"): contributions = utils.load(self.path + ".contribution") else: contributions = None if isinstance(inp, list): inputs = [self.create_input(x) for x in inp] if contributions is not None: if isinstance(contributions, list): for i in range(len(inputs)): self.set_contribution(inputs[i], contributions[i]) else: for i in range(len(inputs)): self.set_contribution(inputs[i], contributions) else: i = keras.Input(inp) i.contribution = contributions inputs = [i] m = net.create_model_from_config(self.declarations, inputs, self.architecture, self.imports) if context.isTrainMode(): if hasattr(context.context, "net_cx"): utils.save(self.path + ".ncx", context.context.net_cx) context.context.net_cx = [] return m
def init_shapes(self, dataset): if dataset is None: dataset = self.get_dataset() self._dataset = dataset if self.preprocessing is not None: dataset = net.create_preprocessor_from_config(self.declarations, dataset, self.preprocessing, self.imports) predItem = dataset[0] if hasattr(dataset, "contribution"): utils.save(self.path+ ".contribution",getattr(dataset, "contribution")) elif hasattr(dataset, "contributions"): utils.save(self.path+ ".contribution",getattr(dataset, "contributions")) utils.save_yaml(self.path + ".shapes", (_shape(predItem.x), _shape(predItem.y))) return dataset
def all(self, workers=None): if workers is not None: ws = np.array_split(np.arange(0, len(self)), workers) threads = [] for w in ws: def process(): for i in tqdm(range(len(w))): v = self[w[i]] t1 = Thread(target=process) t1.setDaemon(True) t1.start() threads.append(t1) for t in threads: t.join() else: for i in tqdm(range(len(self))): v = self[i] save(self._indexPath(), self._cache)
def ccc1(input): try: name = cache_name(input) name = get_cache_dir() + name if name in storage: return storage[name] if isinstance(input, CompositeDataSet): components = list(map(lambda x: ccc1(x), input.components)) compositeDS = CompositeDataSet(components) inherit_dataset_params(input, compositeDS) if hasattr(input, "name"): compositeDS.origName = input.name return compositeDS data = None xStructPath = f"{name}/x.struct" yStructPath = f"{name}/y.struct" blocksCountPath = f"{name}/blocks_count.int" if os.path.exists(xStructPath) and os.path.exists( yStructPath) and os.path.exists(blocksCountPath): blocksCount = load(blocksCountPath) xStruct = load(xStructPath) yStruct = load(yStructPath) xIsListOrTuple = xStruct[2] in ["list", "tuple"] yIsListOrTuple = yStruct[2] in ["list", "tuple"] xData, yData = init_buffers(xStruct, yStruct) for blockInd in tqdm.tqdm(range(blocksCount), "loading disk cache for:" + name): if not xIsListOrTuple: blockPath = f"{name}/x_{blockInd}.dscache" if os.path.exists(blockPath): xBuff = load(blockPath) for x in xBuff: xData.append(x) else: raise Exception(f"Cache block is missing: {name}") else: for c in range(len(xStruct[0])): blockPath = f"{name}/x_{blockInd}_{c}.dscache" if os.path.exists(blockPath): xBuff = load(blockPath) for x in xBuff: xData[c].append(x) else: raise Exception( f"Cache block is missing: {name}") if not yIsListOrTuple: blockPath = f"{name}/y_{blockInd}.dscache" if os.path.exists(blockPath): yBuff = load(blockPath) for y in yBuff: yData.append(y) else: raise Exception(f"Cache block is missing: {name}") else: for c in range(len(yStruct[0])): blockPath = f"{name}/y_{blockInd}_{c}.dscache" if os.path.exists(blockPath): yBuff = load(blockPath) for y in yBuff: yData[c].append(y) else: raise Exception( f"Cache block is missing: {name}") data = (xData, yData) if data is None: if not os.path.isdir(name): os.mkdir(name) i0 = input[0] i0x = i0.x i0y = i0.y l = len(input) xStruct = inspect_structure(i0x) yStruct = inspect_structure(i0y) xIsListOrTuple = xStruct[2] in ["list", "tuple"] yIsListOrTuple = yStruct[2] in ["list", "tuple"] xData, yData = init_buffers(xStruct, yStruct) buffSize = 0 barrier = 64 * 1024 * 1024 blockInd = 0 for i in tqdm.tqdm(range(l), "building disk cache for:" + id): item = input[i] if not xIsListOrTuple: xData.append(item.x) else: for c in range(len(xStruct[0])): xData[c].append(item.x[c]) if not yIsListOrTuple: yData.append(item.y) else: for c in range(len(yStruct[0])): yData[c].append(item.y[c]) buffSize += get_size(item.x) buffSize += get_size(item.y) if buffSize > barrier or i == l - 1: if not xIsListOrTuple: arr = xData if xStruct[0][0].startswith("int") or xStruct[0][ 0].startswith("float"): arr = np.array(arr) save(f"{name}/x_{blockInd}.dscache", arr) else: for c in range(len(xStruct[0])): arr = xData[c] if xStruct[0][c].startswith("int") or xStruct[ 0][c].startswith("float"): arr = np.array(arr) save(f"{name}/x_{blockInd}_{c}.dscache", arr) if not yIsListOrTuple: arr = yData if yStruct[0][0].startswith("int") or yStruct[0][ 0].startswith("float"): arr = np.array(arr) save(f"{name}/y_{blockInd}.dscache", arr) else: for c in range(len(yStruct[0])): arr = yData[c] if yStruct[0][c].startswith("int") or yStruct[ 0][c].startswith("float"): arr = np.array(arr) save(f"{name}/y_{blockInd}_{c}.dscache", arr) buffSize = 0 blockInd += 1 xData, yData = init_buffers(xStruct, yStruct) pass save(xStructPath, xStruct) save(yStructPath, yStruct) save(blocksCountPath, blockInd) return ccc1(input) result = DiskCache1(input, data, xIsListOrTuple, yIsListOrTuple) storage[name] = result return result finally: pass
def provideArgsOneHotClass(ds: image_datasets.BinaryClassificationDataSet, cfg, resultPath): clazzMapPath = os.path.join(resultPath, "assets", ds.clazzColumn.replace('|', "_") + ".cm") utils.save(clazzMapPath, ds.class2Num) return "return inference.BasicEngine(os.path.join(os.path.dirname(__file__),'config.yaml')," + "['" + ds.imColumn + "'],['" + ds.clazzColumn + "']," + "{'" + ds.imColumn + "':'as_is','" + ds.clazzColumn + "':'categorical_one_hot'})"