def layer_extract(self, func, silent=True, **kwargs): if 'layer' in kwargs: self.input = kwargs['layer'] else: self.input = pyrat.data.active query = pyrat.data.queryLayer(self.input) if isinstance(query, list): dshape = query[0]['shape'] else: dshape = query['shape'] if self.vblock: self.initBP(dshape[-1]) else: self.initBP(dshape[-2]) if len(self.blocks) > 1 and self.nthreads > 1: idx = [ self.blocks[i:i + self.nthreads] for i in range(0, len(self.blocks), self.nthreads) ] else: idx = [[block] for block in self.blocks] out = [] nb = 0 metain = pyrat.data.getAnnotation(layer=self.input) if silent is False: P = pyrat.tools.ProgressBar(' ' + self.name, len(self.blocks)) P.update(0) for bidx in idx: meta = copy.deepcopy(metain) inputs = [] for ix in bidx: data = self.read_block(nb) kwargs_copy = copy.deepcopy(kwargs) kwargs_copy["args"] = data kwargs_copy["meta"] = meta if self.vblock: kwargs_copy['block'] = (0, dshape[-2]) + tuple( self.blocks[nb]) else: kwargs_copy['block'] = tuple( self.blocks[nb]) + (0, dshape[-1]) kwargs_copy['valid'] = tuple(self.valid[nb]) inputs.append((self, func.__name__, kwargs_copy)) nb += 1 if silent is False: P.update(nb) if self.nthreads > 1: result = multimap(inputs) else: result = map(exec_out, inputs) for res in result: out.append(res[0]) if silent is False: del P return self.input
def layer_fromfunc(self, func, size=(1, 1), silent=True, **kwargs): """ Generates a new layer from the return of its method 'func', called with **kwargs (and possible args stored in in the keyword 'args' as tuple). The size of the produced layer must be passed in the 'size' keyword. Returns the name of the new layer(s) """ if self.vblock: self.initBP(size[-1]) kwargs["size"] = (size[-2], self.blocksize) else: self.initBP(size[-2]) kwargs["size"] = (self.blocksize, size[-1]) if len(self.blocks) > 1 and self.nthreads > 1: idx = [self.blocks[i:i + self.nthreads] for i in range(0, len(self.blocks), self.nthreads)] else: idx = [[block] for block in self.blocks] kwargs["meta"] = {} nb1 = 0 # input block number nb2 = 0 # output block number if silent is False: P = pyrat.tools.ProgressBar(' ' + self.name, len(self.blocks)) P.update(0) for bidx in idx: inputs = [] for ix in bidx: kwargs_copy = copy.deepcopy(kwargs) if self.vblock: kwargs_copy['block'] = (0, size[-2]) + tuple(self.blocks[nb1]) else: kwargs_copy['block'] = tuple(self.blocks[nb1]) + (0, size[-1]) kwargs_copy['valid'] = tuple(self.valid[nb1]) inputs.append((self, func.__name__, kwargs_copy)) nb1 += 1 if self.nthreads > 1: result = multimap(inputs) else: result = map(exec_out, inputs) for res in result: if nb2 == 0: if isinstance(res[0], list) or isinstance(res[0], tuple): self.output = [] for n, re in enumerate(res[0]): self.output.append(pyrat.data.addLayer(dtype=re.dtype, shape=size)) else: self.output = pyrat.data.addLayer(dtype=res[0].dtype, shape=size) self.save_block(res[0], nb2) nb2 += 1 if silent is False: P.update(nb2) if silent is False: del P pyrat.data.setAnnotation(res[1], layer=self.output) # add meta data to output layer return self.output
def run(self, *args, **kwargs): # print("pyramid level", self.scale) if self.scale == 0: ilay = 'D' olay = 'P/0' ishp = [dim // 2 * 2 for dim in self.dshape] oshp = [dim // 2 * 2 for dim in self.dshape] else: ilay = 'P/' + str(self.scale - 1) olay = 'P/' + str(self.scale) ishp = [dim // 2 * 2 for dim in self.dshape] oshp = [dim // 2 for dim in self.dshape] idat = self.hdfgroup[ilay] if olay in self.hdfgroup and self.force is False: self.dset.append(self.hdfgroup[olay]) self.scale += 1 self.dshape = oshp self.progress.update(self.scale * 100) self.run() else: odat = self.hdfgroup.require_dataset(olay, self.lshape + tuple(oshp), 'float32') self.dset.append(odat) if min(oshp) > 1: idx, ivalid, ibs = self.calc_blocks(ishp[-2], pack=True, blocksize=128) odx, ovalid, obs = self.calc_blocks(oshp[-2], pack=False, blocksize=ibs // (ishp[-2] // oshp[-2])) nb = 0 for bidx in idx: inputs = [] for ix in bidx: data = idat[..., ix[0]:ix[1], 0:ishp[-1]] inputs.append( (subsample, (data, self.lshape + (obs, oshp[1]), self.mode))) result = multimap(inputs, mode='method') # result = map(absrebin, inputs) for res in result: odat[..., odx[nb][0] + ovalid[nb][0]:odx[nb][1], :] = res[ ..., ovalid[nb][0]:ovalid[nb][1], :] nb += 1 self.nblock += 1 self.progress.update(self.nblock) self.scale += 1 self.dshape = oshp self.run() return self.dset
def layer_fromfunc(self, func, size=(1, 1), silent=True, **kwargs): """ Generates a new layer from the return of its method 'func', called with **kwargs (and possible args stored in in the keyword 'args' as tuple). The size of the produced layer must be passed in the 'size' keyword. Returns the name of the new layer(s) """ if self.vblock: self.initBP(size[-1]) kwargs["size"] = (size[-2], self.blocksize) else: self.initBP(size[-2]) kwargs["size"] = (self.blocksize, size[-1]) if len(self.blocks) > 1 and self.nthreads > 1: idx = [self.blocks[i:i + self.nthreads] for i in range(0, len(self.blocks), self.nthreads)] else: idx = [[block] for block in self.blocks] kwargs["meta"] = {} nb1 = 0 # input block number nb2 = 0 # output block number if silent is False: P = ProgressBar(' ' + self.name, len(self.blocks)) P.update(0) for bidx in idx: inputs = [] for ix in bidx: kwargs_copy = copy.deepcopy(kwargs) if self.vblock: kwargs_copy['block'] = (0, size[-2]) + tuple(self.blocks[nb1]) else: kwargs_copy['block'] = tuple(self.blocks[nb1]) + (0, size[-1]) kwargs_copy['valid'] = tuple(self.valid[nb1]) inputs.append((self, func.__name__, kwargs_copy)) nb1 += 1 if self.nthreads > 1: result = multimap(inputs) else: result = map(exec_out, inputs) for res in result: if nb2 == 0: if isinstance(res[0], list) or isinstance(res[0], tuple): self.output = [] for n, re in enumerate(res[0]): self.output.append(pyrat.data.addLayer(dtype=re.dtype, shape=size)) else: self.output = pyrat.data.addLayer(dtype=res[0].dtype, shape=size) self.save_block(res[0], nb2) nb2 += 1 if silent is False: P.update(nb2) if silent is False: del P pyrat.data.setAnnotation(res[1], layer=self.output) # add meta data to output layer return self.output
def layer_extract(self, func, silent=True, **kwargs): if 'layer' in kwargs: self.input = kwargs['layer'] else: self.input = pyrat.data.active query = pyrat.data.queryLayer(self.input) if isinstance(query, list): dshape = query[0]['shape'] else: dshape = query['shape'] if self.vblock: self.initBP(dshape[-1]) else: self.initBP(dshape[-2]) if len(self.blocks) > 1 and self.nthreads > 1: idx = [self.blocks[i:i + self.nthreads] for i in range(0, len(self.blocks), self.nthreads)] else: idx = [[block] for block in self.blocks] out = [] nb = 0 metain = pyrat.data.getAnnotation(layer=self.input) if silent is False: P = pyrat.tools.ProgressBar(' ' + self.name, len(self.blocks)) P.update(0) for bidx in idx: meta = copy.deepcopy(metain) inputs = [] for ix in bidx: data = self.read_block(nb) kwargs_copy = copy.deepcopy(kwargs) kwargs_copy["args"] = data kwargs_copy["meta"] = meta if self.vblock: kwargs_copy['block'] = (0, dshape[-2]) + tuple(self.blocks[nb]) else: kwargs_copy['block'] = tuple(self.blocks[nb]) + (0, dshape[-1]) kwargs_copy['valid'] = tuple(self.valid[nb]) inputs.append((self, func.__name__, kwargs_copy)) nb += 1 if silent is False: P.update(nb) if self.nthreads > 1: result = multimap(inputs) else: result = map(exec_out, inputs) for res in result: out.append(res[0]) if silent is False: del P return self.input
def layer_process(self, func, silent=True, **kwargs): """ Generates a new layer from the return of its method 'func', called with **kwargs (and possible args stored in in the keyword 'args' as tuple). The size of the produced layer must be passed in the 'size' keyword. Returns the name of the new layer(s) """ if 'layer' in kwargs: self.input = kwargs['layer'] else: self.input = pyrat.data.active if any([isinstance(foo, list) for foo in self.input]): layshp = self.input self.input = flattenlist(self.input) nested = True else: nested = False query = pyrat.data.queryLayer(self.input) if isinstance(query, list): dshape = query[0]['shape'] else: dshape = query['shape'] if self.vblock: # init block processing self.initBP(dshape[-1]) else: self.initBP(dshape[-2]) if len(self.blocks) > 1 and self.nthreads > 1: # group chunks of blocks idx = [self.blocks[i:i + self.nthreads] for i in range(0, len(self.blocks), self.nthreads)] else: idx = [[block] for block in self.blocks] metain = pyrat.data.getAnnotation(layer=self.input) nb1 = 0 # input block number nb2 = 0 # output block number if silent is False: P = pyrat.tools.ProgressBar(' ' + self.name, len(self.blocks)) P.update(0) for bidx in idx: # loop over chunks of blocks meta = copy.deepcopy(metain) inputs = [] for ix in bidx: # loop over blocks in chunk data = self.read_block(nb1) if nested is True: data = unflattenlist(data, layshp) kwargs_copy = copy.deepcopy(kwargs) kwargs_copy["args"] = data kwargs_copy["meta"] = meta if self.vblock: kwargs_copy['block'] = (0, dshape[-2]) + tuple(self.blocks[nb1]) else: kwargs_copy['block'] = tuple(self.blocks[nb1]) + (0, dshape[-1]) kwargs_copy['valid'] = tuple(self.valid[nb1]) inputs.append((self, func.__name__, kwargs_copy)) # accumulate inputs nb1 += 1 if self.nthreads > 1: result = multimap(inputs) # do the multiprocessing else: result = map(exec_out, inputs) # or avoid it... for res in result: # loop over output blocks (in chunk) metaout = res[1] # meta data (possibly modified) if nb2 == 0: # first block -> generate new layer(s) if isinstance(res[0], list) or isinstance(res[0], tuple): self.output = [] for n, re in enumerate(res[0]): lshape = re.shape[0:-2] # layer geometry if self.vblock: dshape = (re.shape[-2], dshape[-1]) else: dshape = (dshape[-2], re.shape[-1]) if self.blockprocess is False: # no blockprocessing lshape = () # -> entire image dshape = re.shape self.output.append(pyrat.data.addLayer(dtype=re.dtype, shape=lshape + dshape)) else: lshape = res[0].shape[0:-2] # layer geometry if self.vblock: dshape = (res[0].shape[-2], dshape[-1]) else: dshape = (dshape[-2], res[0].shape[-1]) if self.blockprocess is False: # no blockprocessing lshape = () # -> entire image dshape = res[0].shape self.output = pyrat.data.addLayer(dtype=res[0].dtype, shape=lshape + dshape) self.save_block(res[0], nb2) nb2 += 1 if silent is False: P.update(nb2) if silent is False: del P pyrat.data.setAnnotation(metaout, layer=self.output) # add meta data to output layer return self.output # return output layer
def layer_process(self, func, silent=True, **kwargs): """ Generates a new layer from the return of its method 'func', called with **kwargs (and possible args stored in in the keyword 'args' as tuple). The size of the produced layer must be passed in the 'size' keyword. Returns the name of the new layer(s) """ if 'layer' in kwargs: self.input = kwargs['layer'] else: self.input = pyrat.data.active if any([isinstance(foo, list) for foo in self.input]): layshp = self.input self.input = flattenlist(self.input) nested = True else: nested = False query = pyrat.data.queryLayer(self.input) if isinstance(query, list): dshape = query[0]['shape'] else: dshape = query['shape'] if self.vblock: # init block processing self.initBP(dshape[-1]) else: self.initBP(dshape[-2]) if len(self.blocks ) > 1 and self.nthreads > 1: # group chunks of blocks idx = [ self.blocks[i:i + self.nthreads] for i in range(0, len(self.blocks), self.nthreads) ] else: idx = [[block] for block in self.blocks] metain = pyrat.data.getAnnotation(layer=self.input) nb1 = 0 # input block number nb2 = 0 # output block number if silent is False: P = pyrat.tools.ProgressBar(' ' + self.name, len(self.blocks)) P.update(0) for bidx in idx: # loop over chunks of blocks meta = copy.deepcopy(metain) inputs = [] for ix in bidx: # loop over blocks in chunk data = self.read_block(nb1) if nested is True: data = unflattenlist(data, layshp) kwargs_copy = copy.deepcopy(kwargs) kwargs_copy["args"] = data kwargs_copy["meta"] = meta if self.vblock: kwargs_copy['block'] = (0, dshape[-2]) + tuple( self.blocks[nb1]) else: kwargs_copy['block'] = tuple( self.blocks[nb1]) + (0, dshape[-1]) kwargs_copy['valid'] = tuple(self.valid[nb1]) inputs.append( (self, func.__name__, kwargs_copy)) # accumulate inputs nb1 += 1 if self.nthreads > 1: result = multimap(inputs) # do the multiprocessing else: result = map(exec_out, inputs) # or avoid it... for res in result: # loop over output blocks (in chunk) metaout = res[1] # meta data (possibly modified) if nb2 == 0: # first block -> generate new layer(s) if isinstance(res[0], list) or isinstance(res[0], tuple): self.output = [] for n, re in enumerate(res[0]): lshape = re.shape[0:-2] # layer geometry if self.vblock: dshape = (re.shape[-2], dshape[-1]) else: dshape = (dshape[-2], re.shape[-1]) if self.blockprocess is False: # no blockprocessing lshape = () # -> entire image dshape = re.shape self.output.append( pyrat.data.addLayer(dtype=re.dtype, shape=lshape + dshape)) else: lshape = res[0].shape[0:-2] # layer geometry if self.vblock: dshape = (res[0].shape[-2], dshape[-1]) else: dshape = (dshape[-2], res[0].shape[-1]) if self.blockprocess is False: # no blockprocessing lshape = () # -> entire image dshape = res[0].shape self.output = pyrat.data.addLayer(dtype=res[0].dtype, shape=lshape + dshape) self.save_block(res[0], nb2) nb2 += 1 if silent is False: P.update(nb2) if silent is False: del P if 'path' in metaout: del metaout['path'] pyrat.data.setAnnotation( metaout, layer=self.output) # add meta data to output layer return self.output # return output layer