def fillDataDict(radlist, anglelist, freqlist): """ Get min/max statistics on data lists """ d = {} freqnumpy = numpy.asarray(freqlist, dtype=numpy.int32) d['minf'] = float(ndimage.minimum(freqnumpy)) d['maxf'] = float(ndimage.maximum(freqnumpy)) if ndimage.sum(freqnumpy) < 10: apDisplay.printWarning("not enough eulers to draw a map") return None d['rangef'] = d['maxf']-d['minf']+1 angnumpy = numpy.asarray(anglelist, dtype=numpy.float32) d['mina'] = float(ndimage.minimum(angnumpy)) d['maxa'] = float(ndimage.maximum(angnumpy)) if d['maxa'] > 330.0*math.pi/180.0: d['maxa'] = 2.0*math.pi d['rangea'] = d['maxa']-d['mina'] radnumpy = numpy.asarray(radlist, dtype=numpy.float32) d['minr'] = float(ndimage.minimum(radnumpy)) d['maxr'] = float(ndimage.maximum(radnumpy)) d['ranger'] = d['maxr']-d['minr'] xnumpy = radnumpy * numpy.cos(angnumpy - d['mina']) ynumpy = radnumpy * numpy.sin(angnumpy - d['mina']) d['minx'] = float(ndimage.minimum(xnumpy)) d['maxx'] = float(ndimage.maximum(xnumpy)) d['miny'] = float(ndimage.minimum(ynumpy)) d['maxy'] = float(ndimage.maximum(ynumpy)) d['rangex'] = d['maxx']-d['minx'] d['rangey'] = d['maxy']-d['miny'] return d
def rawFrameToImageFile(image, filename): """Writes a single raw image frame to image file. The file type must be given, e.g. png or jpg. The image need not be scaled beforehand, it is done prior to writing out the image. Could be one of BMP, JPG, JPEG, PNG, PPM, TIFF, XBM, XPM) but the file types available depends on the QT imsave plugin in use. Args: | image (np.ndarray): two-dimensional array representing an image | filename (string): name of file to be written to, with extension Returns: | Nothing Raises: | No exception is raised. """ #normalise input image (img) data to between 0 and 1 from scipy import ndimage image = (image - ndimage.minimum(image)) / (ndimage.maximum(image) - ndimage.minimum(image)) # http://scikit-image.org/docs/dev/api/skimage.io.html#imsave import skimage.io as io io.imsave(filename, image)
def get_edge(self, pos=None, rep=1): if not np.iterable(pos): pos = self.get_pos() from scipy import ndimage xlst = np.unique(pos[0]) ylst = np.unique(pos[1]) out = np.array([xlst, ndimage.minimum(pos[1], pos[0], xlst)]).T out = np.r_[ out, np.array([xlst, ndimage.maximum(pos[1], pos[0], xlst)]).T] out2 = np.array([ndimage.minimum(pos[0], pos[1], ylst), ylst]).T out2 = np.r_[out2, np.array([ndimage.maximum(pos[0], pos[1], ylst), ylst]).T] #l.scatter(*pos) cent = pos.mean(1) allang = np.r_[np.arctan2(out[:, 0] - cent[0], out[:, 1] - cent[1]), np.arctan2(out2[:, 0] - cent[0], out2[:, 1] - cent[1])] sorang = allang.argsort() dif = allang[sorang][1:] - allang[sorang][:-1] okang = [sorang[0]] + list(sorang[1:][dif > 0]) edgpos = np.r_[out, out2][okang] if rep == 0: klist = [ "pos_" + str(list(p))[1:-1].replace(",", "_").replace(" ", "") for p in edgpos ] return [ self.samps[self.names[k]] for k in klist if k in self.names ] return edgpos
def run(self, ips, snap, img, para = None): intenimg = ImageManager.get(para['inten']).img strc = ndimage.generate_binary_structure(2, 1 if para['con']=='4-connect' else 2) buf, n = ndimage.label(snap, strc, output=np.uint32) index = range(1, n+1) idx = (np.ones(n+1)*para['front']).astype(np.uint8) msk = np.ones(n, dtype=np.bool) if para['mean']>0: msk *= ndimage.mean(intenimg, buf, index)>=para['mean'] if para['mean']<0: msk *= ndimage.mean(intenimg, buf, index)<-para['mean'] if para['max']>0: msk *= ndimage.maximum(intenimg, buf, index)>=para['max'] if para['max']<0: msk *= ndimage.maximum(intenimg, buf, index)<-para['max'] if para['min']>0: msk *= ndimage.minimum(intenimg, buf, index)>=para['min'] if para['min']<0: msk *= ndimage.minimum(intenimg, buf, index)<-para['min'] if para['sum']>0: msk *= ndimage.sum(intenimg, buf, index)>=para['sum'] if para['sum']<0: msk *= ndimage.sum(intenimg, buf, index)<-para['sum'] if para['std']>0: msk *= ndimage.standard_deviation(intenimg, buf, index)>=para['std'] if para['std']<0: msk *= ndimage.standard_deviation(intenimg, buf, index)<-para['std'] xy = ndimage.center_of_mass(intenimg, buf, index) xy = np.array(xy).round(2).T idx[1:][~msk] = para['back'] idx[0] = 0 img[:] = idx[buf] ImageManager.get(para['inten']).mark = RGMark((xy.T, msk)) ImageManager.get(para['inten']).update = True
def analyzeList(self, mylist, myrange=(0, 1, 1), filename=None): """ histogram2(a, bins) -- Compute histogram of a using divisions in bins Description: Count the number of times values from array a fall into numerical ranges defined by bins. Range x is given by bins[x] <= range_x < bins[x+1] where x =0,N and N is the length of the bins array. The last range is given by bins[N] <= range_N < infinity. Values less than bins[0] are not included in the histogram. Arguments: a -- 1D array. The array of values to be divied into bins bins -- 1D array. Defines the ranges of values to use during histogramming. Returns: 1D array. Each value represents the occurences for a given bin (range) of values. """ #hist,bmin,minw,err = stats.histogram(mynumpy, numbins=36) #print hist,bmin,minw,err,"\n" if len(mylist) < 2: apDisplay.printWarning("Did not write file not enough rows (" + str(filename) + ")") return if myrange[0] is None: mymin = float(math.floor(ndimage.minimum(mylist))) else: mymin = float(myrange[0]) if myrange[1] is None: mymax = float(math.ceil(ndimage.maximum(mylist))) else: mymax = float(myrange[1]) mystep = float(myrange[2]) mynumpy = numpy.asarray(mylist, dtype=numpy.float32) print "range=", round(ndimage.minimum(mynumpy), 2), " <> ", round(ndimage.maximum(mynumpy), 2) print " mean=", round(ndimage.mean(mynumpy), 2), " +- ", round( ndimage.standard_deviation(mynumpy), 2) #histogram bins = [] mybin = mymin while mybin <= mymax: bins.append(mybin) mybin += mystep bins = numpy.asarray(bins, dtype=numpy.float32) apDisplay.printMsg("Creating histogram with " + str(len(bins)) + " bins") hist = stats.histogram2(mynumpy, bins=bins) #print bins #print hist if filename is not None: f = open(filename, "w") for i in range(len(bins)): out = ("%3.4f %d\n" % (bins[i] + mystep / 2.0, hist[i])) f.write(out) f.write("&\n")
def color_label(label, order='shuffle', cmap=6, start=1, negative=0): """ make a color image from label. :Inputs: - order: how to choose the color order - either: * shuffle: shuffle lavels id (>start) * xmin: order label mapping by the labels minimum x coordinates * ymin: order label mapping by the labels minimum y coordinates * an integer: use directly the label id multiplied by this number - cmap: the color map - either * None: use the module default _colormap (8 basic colors) * a colormap (Nx3 array of N colors), * or a number (simply apply modulus, and return a grey color) - start: loop into cmap starting at this label. it should be less than the number of colors in the color map if order is shuffle, labels below start are not shuffled - negative: method to treat negative indices - a value to replace <0 labels by """ label = _np.copy(label) label[label < 0] = negative if cmap is None: cmap = _colormap start = _np.minimum(start, cmap.shape[0]) else: cmap = _np.asarray(cmap) if cmap.size == 1: cmap = _np.arange(cmap) if order == 'xmin': x = add_dim(_np.arange(0, label.shape[1]), axis=0, size=label.shape[0]) ind = _np.argsort( _np.argsort(_nd.minimum(x, label, index=range(0, label.max() + 1)))) elif order == 'ymin': y = add_dim(_np.arange(0, label.shape[0]), axis=1, size=label.shape[1]) ind = _np.argsort( _np.argsort(_nd.minimum(y, label, index=range(0, label.max() + 1)))) else: ind = _np.arange(label.max() + 1) if order == 'shuffle': _np.random.shuffle(ind[start:]) else: factor = max(order, 1) ind[start:] = factor * ind[start:] ind[ind >= start] = start + _np.mod(ind[ind >= start], cmap.shape[0] - start) Lind = ind[label] clabel = cmap[Lind, :] return clabel
def analyzeList(self, mylist, myrange=(0,1,1), filename=None): """ histogram2(a, bins) -- Compute histogram of a using divisions in bins Description: Count the number of times values from array a fall into numerical ranges defined by bins. Range x is given by bins[x] <= range_x < bins[x+1] where x =0,N and N is the length of the bins array. The last range is given by bins[N] <= range_N < infinity. Values less than bins[0] are not included in the histogram. Arguments: a -- 1D array. The array of values to be divied into bins bins -- 1D array. Defines the ranges of values to use during histogramming. Returns: 1D array. Each value represents the occurences for a given bin (range) of values. """ #hist,bmin,minw,err = stats.histogram(mynumpy, numbins=36) #print hist,bmin,minw,err,"\n" if len(mylist) < 2: apDisplay.printWarning("Did not write file not enough rows ("+str(filename)+")") return if myrange[0] is None: mymin = float(math.floor(ndimage.minimum(mylist))) else: mymin = float(myrange[0]) if myrange[1] is None: mymax = float(math.ceil(ndimage.maximum(mylist))) else: mymax = float(myrange[1]) mystep = float(myrange[2]) mynumpy = numpy.asarray(mylist, dtype=numpy.float32) print "range=",round(ndimage.minimum(mynumpy),2)," <> ",round(ndimage.maximum(mynumpy),2) print " mean=",round(ndimage.mean(mynumpy),2)," +- ",round(ndimage.standard_deviation(mynumpy),2) #histogram bins = [] mybin = mymin while mybin <= mymax: bins.append(mybin) mybin += mystep bins = numpy.asarray(bins, dtype=numpy.float32) apDisplay.printMsg("Creating histogram with "+str(len(bins))+" bins") hist = stats.histogram2(mynumpy, bins=bins) #print bins #print hist if filename is not None: f = open(filename, "w") for i in range(len(bins)): out = ("%3.4f %d\n" % (bins[i] + mystep/2.0, hist[i]) ) f.write(out) f.write("&\n")
def run(self, ips, snap, img, para=None): intenimg = self.app.get_img(para['inten']).img strc = ndimage.generate_binary_structure( 2, 1 if para['con'] == '4-connect' else 2) buf, n = ndimage.label(snap, strc, output=np.uint32) index = range(1, n + 1) idx = (np.ones(n + 1) * para['front']).astype(np.uint8) msk = np.ones(n, dtype=np.bool) if para['mean'] > 0: msk *= ndimage.mean(intenimg, buf, index) >= para['mean'] if para['mean'] < 0: msk *= ndimage.mean(intenimg, buf, index) < -para['mean'] if para['max'] > 0: msk *= ndimage.maximum(intenimg, buf, index) >= para['max'] if para['max'] < 0: msk *= ndimage.maximum(intenimg, buf, index) < -para['max'] if para['min'] > 0: msk *= ndimage.minimum(intenimg, buf, index) >= para['min'] if para['min'] < 0: msk *= ndimage.minimum(intenimg, buf, index) < -para['min'] if para['sum'] > 0: msk *= ndimage.sum(intenimg, buf, index) >= para['sum'] if para['sum'] < 0: msk *= ndimage.sum(intenimg, buf, index) < -para['sum'] if para['std'] > 0: msk *= ndimage.standard_deviation(intenimg, buf, index) >= para['std'] if para['std'] < 0: msk *= ndimage.standard_deviation(intenimg, buf, index) < -para['std'] xy = ndimage.center_of_mass(intenimg, buf, index) xy = np.array(xy).round(2).T idx[1:][~msk] = para['back'] idx[0] = 0 img[:] = idx[buf] red_pts = { 'type': 'points', 'body': xy[::-1].T[~msk], 'color': (255, 0, 0) } green_pts = { 'type': 'points', 'body': xy[::-1].T[msk], 'color': (0, 255, 0) } self.app.get_img(para['inten']).mark = mark2shp({ 'type': 'layer', 'body': [red_pts, green_pts] }) self.app.get_img(para['inten']).update()
def fourierlaplaceinhom(siz, sigma): """ Laplacian operator in Fourier domain is very simple: D^2 g(x,y,z) => -(4pi^2) (u^2 + v^2 + w^2) G(u,v,w) """ (uu, vv, ww) = fouriercoords(siz) laplace = -(uu * uu / (sigma[0] * sigma[0]) + vv * vv / (sigma[1] * sigma[1]) + ww * ww / (sigma[2] * sigma[2])) laplace = (laplace - ndimage.minimum(laplace)) / \ (ndimage.maximum(laplace) - ndimage.minimum(laplace)) return laplace
def normalizeImage(a): """ Normalizes numarray to fit into an image format that is values between 0 and 255. """ #Minimum image value, i.e. how black the image can get minlevel = 0.0 #Maximum image value, i.e. how white the image can get maxlevel = 235.0 #Maximum standard deviations to include, i.e. pixel > N*stdev --> white devlimit=5.0 imrange = maxlevel - minlevel avg1=ndimage.mean(a) stdev1=ndimage.standard_deviation(a) min1=ndimage.minimum(a) if(min1 < avg1-devlimit*stdev1): min1 = avg1-devlimit*stdev1 max1=ndimage.maximum(a) if(max1 > avg1+devlimit*stdev1): max1 = avg1+devlimit*stdev1 a = (a - min1)/(max1 - min1)*imrange + minlevel a = numarray.where(a > maxlevel,255.0,a) a = numarray.where(a < minlevel,0.0,a) return a
def test_minimum04(): labels = np.array([[1, 2], [2, 3]]) for type in types: input = np.array([[1, 2], [3, 4]], type) output = ndimage.minimum(input, labels=labels, index=[2, 3, 8]) assert_array_almost_equal(output, [2.0, 4.0, 0.0])
def labelstats_str(factors, values, stat='mvnx'): # works also for string labels in ys, but requires 1D # from mailing list scipy-user 2009-02-11 unil, unilinv = np.unique1d(factors, return_index=False, return_inverse=True) res = [] if 'm' in stat: labelmeans = np.array( ndimage.mean(values, labels=unilinv, index=np.arange(len(unil)))) res.append(labelmeans) if 'v' in stat: labelvars = np.array( ndimage.variance(values, labels=unilinv, index=np.arange(len(unil)))) res.append(labelvars) if 'n' in stat: labelmin = np.array( ndimage.minimum(values, labels=unilinv, index=np.arange(len(unil)))) res.append(labelmin) if 'x' in stat: labelmax = np.array( ndimage.maximum(values, labels=unilinv, index=np.arange(len(unil)))) res.append(labelmax) return res
def scale_array_min_max(img, new_min, new_max): old_min = ndimage.minimum(img) old_max = ndimage.maximum(img) if old_min == old_max: return img - old_min + new_min return new_min + (img - old_min) * ((new_max - new_min) / (old_max - old_min))
def blackEdges(img, rad=None, black=None): shape = img.shape if rad is None: rad = min(shape)/64.0 if black is None: black = ndimage.minimum(img[int(rad/2.0):int(shape[0]-rad/2.0), int(rad/2.0):int(shape[1]-rad/2.0)]) img2 = img edgesize = 2 #left edge img2[0:edgesize, 0:shape[1]] = black #right edge img2[int(shape[0]-edgesize):shape[0], 0:shape[1]] = black #top edge img2[0:shape[0], 0:edgesize] = black #bottom edge img2[0:shape[0], int(shape[1]-edgesize):shape[1]] = black #top-left corner img2[0:int(rad/2.0), 0:int(rad/2.0)] = black #bottom-left corner img2[int(shape[0]-rad/2.0):shape[0], 0:int(rad/2.0)] = black #top-right corner img2[0:int(rad/2.0), int(shape[1]-rad/2.0):shape[1]] = black #bottom-right corner img2[int(shape[0]-rad/2.0):shape[0], int(shape[1]-rad/2.0):shape[1]] = black #vertical bar img2[int(shape[0]/2.0-rad):int(shape[0]/2.0+rad),0:shape[1]] = black #horizontal bar img2[0:shape[0],int(shape[1]/2.0-rad):int(shape[1]/2.0+rad)] = black return img2
def test_minimum03(): labels = np.array([1, 2]) for type in types: input = np.array([[1, 2], [3, 4]], type) output = ndimage.minimum(input, labels=labels, index=2) assert_almost_equal(output, 2.0)
def blackNormalizeImage(a): """ Normalizes numarray to fit into an image format that is values between 0 and 255. """ #Minimum image value, i.e. how black the image can get minlevel = 0.0 #Maximum image value, i.e. how white the image can get maxlevel = 200.0 #Maximum standard deviations to include, i.e. pixel > N*stdev --> white devlimit = 5.0 imrange = maxlevel - minlevel avg1 = ndimage.mean(a) stdev1 = ndimage.standard_deviation(a) min1 = ndimage.minimum(a) if (min1 < avg1 - devlimit * stdev1): min1 = avg1 - devlimit * stdev1 max1 = ndimage.maximum(a) if (max1 > avg1 + devlimit * stdev1): max1 = avg1 + devlimit * stdev1 a = (a - min1) / (max1 - min1) * imrange + minlevel a = numarray.where(a > maxlevel, 215.0, a) a = numarray.where(a < minlevel, 0.0, a) return a
def tworepnoiseest(img1, img2): if np.iscomplexobj(img1) and np.iscomplexobj(img2): real_STD = noiseest(img1.real, img2.real) imag_STD = noiseest(img1.imag, img2.imag) return np.sqrt((real_STD**2 + imag_STD**2) / 2.0) else: # Normalise image nimg1 = (img1 - ndimage.minimum(img1)) / \ (ndimage.maximum(img1) - ndimage.minimum(img1)) nimg2 = (img2 - ndimage.minimum(img2)) / \ (ndimage.maximum(img2) - ndimage.minimum(img2)) # nimg1*=256.0 # nimg2*=256.0 return np.sqrt(0.5) * (nimg1 - nimg2)
def LabelCutStr(labels3d, nstrCutMax, nstrCutMin): labels1d = np.unique(labels3d)[1:] print '---------------------------------' print 'Max nstream threshold', nstrCutMax print 'Min nstream threshold', nstrCutMin print '#Haloes before nstreams-cut:', labels1d.shape maxnstrEachBlob = np.array( ndi.maximum(nstream, labels=labels3d, index=labels1d)) c1 = (maxnstrEachBlob < nstrCutMax ) #max(nstream) = 1 ( entire region in void) minnstrEachBlob = np.array( ndi.minimum(nstream, labels=labels3d, index=labels1d)) c2 = (minnstrEachBlob < nstrCutMin ) #max(nstream) = 1 ( entire region in void) MaskOutCondition0 = np.where(c1 | c2) maskingValues = labels1d[MaskOutCondition0] labels3d_out = MaskedRemove(labels3d, maskingValues) print '#Haloes after nstreams-cut nstreams:', ( np.unique(labels3d_out)[1:]).shape # maxnstrEachBlob = np.array(ndi.maximum(nstream, labels=labels3d_out, index= (np.unique(labels3d_out)[1:]))) # minnstrEachBlob = np.array(ndi.minimum(nstream, labels=labels3d_out, index= (np.unique(labels3d_out)[1:]))) # # print 'Streams min(min) ', minnstrEachBlob.min() # print 'Streams min(max) ', maxnstrEachBlob.min() return labels3d_out
def test_minimum01(): "minimum 1" labels = np.array([1, 0], bool) for type in types: input = np.array([[1, 2], [3, 4]], type) output = ndimage.minimum(input, labels=labels) assert_almost_equal(output, 1.0)
def get_morton_lut(array, no_data_value): """ Return lookup table to rearrange an array of ints in morton order. :param array: 2D int array with a range of integers from 0 to no_data_value :param no_data_value: no data value that is excluded from rearrangement. The no_data_value does not have to be present in the array, but if it is, it does not get reordered by the lookup table (lut): lut[no_data_value] == no_data_value """ # morton variables have underscores _array = morton_array(array.shape) _no_data_value = _array.max().item() + 1 # make lookup from node to morton number index = np.arange(no_data_value + 1) lut1 = ndimage.minimum(_array, labels=array, index=index) lut1[no_data_value] = _no_data_value # make lookup from morton number back to node numbers lut2 = np.empty(_no_data_value + 1, dtype='i8') lut2[np.sort(lut1)] = index lut2[_no_data_value] = no_data_value # return the combined lookup table return lut2[lut1]
def run(self, ips, imgs, para=None): lab = WindowsManager.get(para['lab']).ips.get_img() if lab.dtype != np.uint8 and lab.dtype != np.uint16: IPy.alert('Label image must be in type 8-bit or 16-bit') return index = range(1, lab.max() + 1) titles = ['Max', 'Min', 'Mean', 'Variance', 'Standard', 'Sum'] key = { 'Max': 'max', 'Min': 'min', 'Mean': 'mean', 'Variance': 'var', 'Standard': 'std', 'Sum': 'sum' } titles = ['value'] + [i for i in titles if para[key[i]]] data = [index] img = ips.get_img() if img is lab: img = img > 0 if para['max']: data.append(ndimage.maximum(img, lab, index)) if para['min']: data.append(ndimage.minimum(img, lab, index)) if para['mean']: data.append(ndimage.mean(img, lab, index).round(4)) if para['var']: data.append(ndimage.variance(img, lab, index).round(4)) if para['std']: data.append(ndimage.standard_deviation(img, lab, index).round(4)) if para['sum']: data.append(ndimage.sum(img, lab, index).round(4)) data = zip(*data) IPy.table(ips.title + '-segment', data, titles)
def run(self, ips, imgs, para = None): inten = ImageManager.get(para['inten']) if not para['slice']: imgs = [inten.img] msks = [ips.img] else: msks = ips.imgs imgs = inten.imgs if len(msks)==1: msks *= len(imgs) buf = imgs[0].astype(np.uint16) strc = ndimage.generate_binary_structure(2, 1 if para['con']=='4-connect' else 2) idct = ['Max','Min','Mean','Variance','Standard','Sum'] key = {'Max':'max','Min':'min','Mean':'mean', 'Variance':'var','Standard':'std','Sum':'sum'} idct = [i for i in idct if para[key[i]]] titles = ['Slice', 'ID'][0 if para['slice'] else 1:] if para['center']: titles.extend(['Center-X','Center-Y']) if para['extent']: titles.extend(['Min-Y','Min-X','Max-Y','Max-X']) titles.extend(idct) k = ips.unit[0] data, mark = [],{'type':'layers', 'body':{}} # data,mark=[],[] for i in range(len(imgs)): n = ndimage.label(msks[i], strc, output=buf) index = range(1, n+1) dt = [] if para['slice']:dt.append([i]*n) dt.append(range(n)) xy = ndimage.center_of_mass(imgs[i], buf, index) xy = np.array(xy).round(2).T if para['center']:dt.extend([xy[1]*k, xy[0]*k]) boxs = [None] * n if para['extent']: boxs = ndimage.find_objects(buf) boxs = [( i[1].start+(i[1].stop-i[1].start)/2, i[0].start+(i[0].stop-i[0].start)/2, i[1].stop-i[1].start,i[0].stop-i[0].start) for i in boxs] for j in (0,1,2,3): dt.append([i[j]*k for i in boxs]) if para['max']:dt.append(ndimage.maximum(imgs[i], buf, index).round(2)) if para['min']:dt.append(ndimage.minimum(imgs[i], buf, index).round(2)) if para['mean']:dt.append(ndimage.mean(imgs[i], buf, index).round(2)) if para['var']:dt.append(ndimage.variance(imgs[i], buf, index).round(2)) if para['std']:dt.append(ndimage.standard_deviation(imgs[i], buf, index).round(2)) if para['sum']:dt.append(ndimage.sum(imgs[i], buf, index).round(2)) layer = {'type':'layer', 'body':[]} xy=np.int0(xy).T texts = [(i[1],i[0])+('id=%d'%n,) for i,n in zip(xy,range(len(xy)))] layer['body'].append({'type':'texts', 'body':texts}) if para['extent']: layer['body'].append({'type':'rectangles', 'body':boxs}) mark['body'][i] = layer data.extend(list(zip(*dt))) IPy.show_table(pd.DataFrame(data, columns=titles), inten.title+'-region statistic') inten.mark = GeometryMark(mark) inten.update = True
def checkArrayMinMax(self, a1, a2): """ Tests whether an image has a valid range for libcv """ a1b = ndimage.median_filter(a1, size=3) min1 = ndimage.minimum(a1b) max1 = ndimage.maximum(a1b) if max1 - min1 < 10: self.logger.error("Old Image Range Error %d" % int(max1 - min1)) return False a2b = ndimage.median_filter(a2, size=3) min2 = ndimage.minimum(a2b) max2 = ndimage.maximum(a2b) if max2 - min2 < 10: self.logger.error("New Image Range Error %d" % int(max2 - min2)) return False return True
def normalise(data): """Normalise image """ maxval = ndimage.maximum(data) minval = ndimage.minimum(data) print "Normalise max %f min %f" % (maxval, minval) # return as float32 return data.astype(np.float32) # (data - minval) * (maxval - minval)
def basicswi(cmplx_input_image, mask, order=2): """ Suseptibility-like weighted image - modified to use normalised local phase as the weighting """ if np.iscomplexobj(cmplx_input_image): magn = np.abs(cmplx_input_image) phase = np.angle(cmplx_input_image) from scipy.ndimage.filters import uniform_filter normphase = uniform_filter(phase, 5.0, mode='constant', origin=-2.5) normphase = (normphase - ndimage.minimum(normphase)) / \ (ndimage.maximum(normphase) - ndimage.minimum(normphase)) weight = (normphase + 1.0) weight = weight.clip(min=0.0, max=1.0) return magn * (weight ** order) * mask else: print 'Error basicswi2: input image not complex' return np.abs(cmplx_input_image)
def coadd_class(class_star,labels,index=None): if index is None: index = np.unique(labels) class_avg = nd.mean(class_star,labels=labels,index=index) class_std = nd.standard_deviation(class_star,labels=labels,index=index) class_min = nd.minimum(class_star,labels=labels,index=index) class_max = nd.maximum(class_star,labels=labels,index=index) return class_avg, class_std, class_min, class_max
def getImageInfo(im): """ prints out image information good for debugging """ avg1 = ndimage.mean(im) stdev1 = ndimage.standard_deviation(im) min1 = ndimage.minimum(im) max1 = ndimage.maximum(im) return avg1, stdev1, min1, max1
def labelstats_str(factors, values): # works also for string labels in ys, but requires 1D # from mailing list scipy-user 2009-02-11 unil, unilinv = np.unique1d(factors, return_index=False, return_inverse=True) labelmeans = np.array(ndimage.mean(values, labels=unilinv, index=np.arange(len(unil)))) labelvars = np.array(ndimage.variance(values, labels=unilinv, index=np.arange(len(unil)))) labelmin = np.array(ndimage.minimum(values, labels=unilinv, index=np.arange(len(unil)))) labelmax = np.array(ndimage.maximum(values, labels=unilinv, index=np.arange(len(unil)))) return labelmeans, labelvars, labelmin, labelmax
def getImageInfo(im): """ prints out image information good for debugging """ avg1=ndimage.mean(im) stdev1=ndimage.standard_deviation(im) min1=ndimage.minimum(im) max1=ndimage.maximum(im) return avg1,stdev1,min1,max1
def keypoints(m1, m2, m3): keypoint_candidate = m2.item(4) if keypoint_candidate == ndimage.maximum(m2): if keypoint_candidate > ndimage.maximum(m1): if keypoint_candidate > ndimage.maximum(m3): return 1 #max keypoint_candidate else: return -1 else: return -1 elif keypoint_candidate == ndimage.minimum(m2): if keypoint_candidate < ndimage.minimum(m1): if keypoint_candidate < ndimage.minimum(m3): return 2 #min keypoint_candidate else: return -1 else: return -1 return -1
def normalise(data): """Normalise ndimage (must not be complex) """ _max = ndimage.maximum(data) _min = ndimage.minimum(data) print "Normalise max %f min %f" % (_max, _min) # return as float32 data = ((data - _min) * (_max - _min)) return data.astype(np.float32)
def test_extrema02(): labels = np.array([1, 2]) for type in types: input = np.array([[1, 2], [3, 4]], type) output1 = ndimage.extrema(input, labels=labels, index=2) output2 = ndimage.minimum(input, labels=labels, index=2) output3 = ndimage.maximum(input, labels=labels, index=2) output4 = ndimage.minimum_position(input, labels=labels, index=2) output5 = ndimage.maximum_position(input, labels=labels, index=2) assert_equal(output1, (output2, output3, output4, output5))
def copy_labels(labels, segmented): '''Carry differences between orig_segmented and new_segmented into "labels" labels - labels matrix similarly segmented to "segmented" segmented - the newly numbered labels matrix (a subset of pixels are labeled) ''' max_labels = np.max(segmented) seglabel = scind.minimum(labels, segmented, np.arange(1, max_labels+1)) labels_new = labels.copy() labels_new[segmented != 0] = seglabel[segmented[segmented != 0] - 1] return labels_new
def test_extrema01(): "extrema 1" labels = np.array([1, 0], bool) for type in types: input = np.array([[1, 2], [3, 4]], type) output1 = ndimage.extrema(input, labels=labels) output2 = ndimage.minimum(input, labels=labels) output3 = ndimage.maximum(input, labels=labels) output4 = ndimage.minimum_position(input, labels=labels) output5 = ndimage.maximum_position(input, labels=labels) assert_equal(output1, (output2, output3, output4, output5))
def copy_labels(labels, segmented): '''Carry differences between orig_segmented and new_segmented into "labels" labels - labels matrix similarly segmented to "segmented" segmented - the newly numbered labels matrix (a subset of pixels are labeled) ''' max_labels = np.max(segmented) seglabel = scind.minimum(labels, segmented, np.arange(1, max_labels + 1)) labels_new = labels.copy() labels_new[segmented != 0] = seglabel[segmented[segmented != 0] - 1] return labels_new
def test_extrema01(): labels = np.array([1, 0], bool) for type in types: input = np.array([[1, 2], [3, 4]], type) output1 = ndimage.extrema(input, labels=labels) output2 = ndimage.minimum(input, labels=labels) output3 = ndimage.maximum(input, labels=labels) output4 = ndimage.minimum_position(input, labels=labels) output5 = ndimage.maximum_position(input, labels=labels) assert_equal(output1, (output2, output3, output4, output5))
def imageinfo(im): #print " ... size: ",im.shape #print " ... sum: ",im.sum() avg1=ndimage.mean(im) stdev1=ndimage.standard_deviation(im) print " ... avg: ",round(avg1,6),"+-",round(stdev1,6) min1=ndimage.minimum(im) max1=ndimage.maximum(im) print " ... range:",round(min1,6),"<>",round(max1,6) return
def cal(self, stat = 'mean'): if stat=='mean': zonalstats = ndimage.mean(self.data, labels=self.lb, index=self.labSet) if stat=='minimum': zonalstats = ndimage.minimum(self.data, labels=self.lb, index=self.labSet) if stat=='maximum': zonalstats = ndimage.maximum(self.data, labels=self.lb, index=self.labSet) if stat=='sum': zonalstats = ndimage.sum(self.data, labels=self.lb, index=self.labSet) if stat=='std': zonalstats = ndimage.standard_deviation(self.data, labels=self.lb, index=self.labSet) if stat=='variance': zonalstats = ndimage.variance(self.data, labels=self.lb, index=self.labSet) return zonalstats
def test_extrema02(): "extrema 2" labels = np.array([1, 2]) for type in types: input = np.array([[1, 2], [3, 4]], type) output1 = ndimage.extrema(input, labels=labels, index=2) output2 = ndimage.minimum(input, labels=labels, index=2) output3 = ndimage.maximum(input, labels=labels, index=2) output4 = ndimage.minimum_position(input, labels=labels, index=2) output5 = ndimage.maximum_position(input, labels=labels, index=2) assert_equal(output1, (output2, output3, output4, output5))
def test_extrema04(): labels = [1, 2, 0, 4] for type in types: input = np.array([[5, 4, 2, 5], [3, 7, 8, 2], [1, 5, 1, 1]], type) output1 = ndimage.extrema(input, labels, [1, 2]) output2 = ndimage.minimum(input, labels, [1, 2]) output3 = ndimage.maximum(input, labels, [1, 2]) output4 = ndimage.minimum_position(input, labels, [1, 2]) output5 = ndimage.maximum_position(input, labels, [1, 2]) assert_array_almost_equal(output1[0], output2) assert_array_almost_equal(output1[1], output3) assert_array_almost_equal(output1[2], output4) assert_array_almost_equal(output1[3], output5)
def test_extrema03(): labels = np.array([[1, 2], [2, 3]]) for type in types: input = np.array([[1, 2], [3, 4]], type) output1 = ndimage.extrema(input, labels=labels, index=[2, 3, 8]) output2 = ndimage.minimum(input, labels=labels, index=[2, 3, 8]) output3 = ndimage.maximum(input, labels=labels, index=[2, 3, 8]) output4 = ndimage.minimum_position(input, labels=labels, index=[2, 3, 8]) output5 = ndimage.maximum_position(input, labels=labels, index=[2, 3, 8]) assert_array_almost_equal(output1[0], output2) assert_array_almost_equal(output1[1], output3) assert_array_almost_equal(output1[2], output4) assert_array_almost_equal(output1[3], output5)
def form_clusters(data,threshold,type='p-value',cluster_size_threshold=1): s=ndimage.morphology.generate_binary_structure(3,3) if type=='p-value': clusters, n_clusters = ndimage.label((data < threshold) & (data>0),structure=s) stat_cl=ndimage.minimum(data,labels=clusters, index=range(1,n_clusters+1)) elif type=='t-stat': clusters, n_clusters = ndimage.label(data > threshold,structure=s) stat_cl=ndimage.maximum(data,labels=clusters, index=range(1,n_clusters+1)) else: raise ValueError('Wrong map type!') clusters_label=np.arange(1,n_clusters+1) count,sum=ndimage.measurements._stats(data,labels=clusters,index=clusters_label) clusters_mask=(count>cluster_size_threshold) if np.sum(count>10**5)!=0: raise ValueError('Some of the clusters are too huge for analysis {}.' 'Need to change the threshold to form clusters or check your input image.' 'If everything is correct, then probably you need to use -model correlation '.format(np.max(count))) #TODO correlation clusters_label=clusters_label[clusters_mask] return clusters,clusters_label,stat_cl
def test_stat_funcs_2d(): a = np.array([[5,6,0,0,0], [8,9,0,0,0], [0,0,0,3,5]]) lbl = np.array([[1,1,0,0,0], [1,1,0,0,0], [0,0,0,2,2]]) mean = ndimage.mean(a, labels=lbl, index=[1, 2]) assert_array_equal(mean, [7.0, 4.0]) var = ndimage.variance(a, labels=lbl, index=[1, 2]) assert_array_equal(var, [2.5, 1.0]) std = ndimage.standard_deviation(a, labels=lbl, index=[1, 2]) assert_array_almost_equal(std, np.sqrt([2.5, 1.0])) med = ndimage.median(a, labels=lbl, index=[1, 2]) assert_array_equal(med, [7.0, 4.0]) min = ndimage.minimum(a, labels=lbl, index=[1, 2]) assert_array_equal(min, [5, 3]) max = ndimage.maximum(a, labels=lbl, index=[1, 2]) assert_array_equal(max, [9, 5])
def anz(self): self.result = {'id':list(self.labSet), 'mean':[ round(x, 4) for x in list(ndimage.mean(self.data, labels=self.lb, index=self.labSet))], 'min':list(ndimage.minimum(self.data, labels=self.lb, index=self.labSet)), 'max':list(ndimage.maximum(self.data, labels=self.lb, index=self.labSet)), 'std':list(ndimage.variance(self.data, labels=self.lb, index=self.labSet)) } #print self.result['id'] #print self.result['min'] #print len(self.result['min']) self.df = pd.DataFrame(self.result) self.df = self.df[self.df['id']>0 ] self.df.set_index(self.df['id']) # save each zonal ouput ...TODO # self.outname = self._inDs[:-4]+'.csv' # f = open(self.outname, 'w') # self.df.to_csv( f, index=False ) # f.close() print self.df.iloc[0:5, ] return self.df
def normRange(im): min1=ndimage.minimum(im) max1=ndimage.maximum(im) return (im - min1)/(max1 - min1)
def minimum(input, labels, index): return fix(scind.minimum(input, labels, index))
def test_minimum02(): labels = np.array([1, 0], bool) input = np.array([[2, 2], [2, 4]], bool) output = ndimage.minimum(input, labels=labels) assert_almost_equal(output, 1.0)
def filter_using_image(self, workspace, mask): '''Filter out connections using local intensity minima between objects workspace - the workspace for the image set mask - mask of background points within the minimum distance ''' # # NOTE: This is an efficient implementation and an improvement # in accuracy over the Matlab version. It would be faster and # more accurate to eliminate the line-connecting and instead # do the following: # * Distance transform to get the coordinates of the closest # point in an object for points in the background that are # at most 1/2 of the max distance between objects. # * Take the intensity at this closest point and similarly # label the background point if the background intensity # is at least the minimum intensity fraction # * Assume there is a connection between objects if, after this # labeling, there are adjacent points in each object. # # As it is, the algorithm duplicates the Matlab version but suffers # for cells whose intensity isn't high in the centroid and clearly # suffers when two cells touch at some point that's off of the line # between the two. # objects = workspace.object_set.get_objects(self.objects_name.value) labels = objects.segmented image = self.get_image(workspace) if self.show_window: # Save the image for display workspace.display_data.image = image # # Do a distance transform into the background to label points # in the background with their closest foreground object # i, j = scind.distance_transform_edt(labels==0, return_indices=True, return_distances=False) confluent_labels = labels[i,j] confluent_labels[~mask] = 0 if self.where_algorithm == CA_CLOSEST_POINT: # # For the closest point method, find the intensity at # the closest point in the object (which will be the point itself # for points in the object). # object_intensity = image[i,j] * self.minimum_intensity_fraction.value confluent_labels[object_intensity > image] = 0 count, index, c_j = morph.find_neighbors(confluent_labels) if len(c_j) == 0: # Nobody touches - return the labels matrix return labels # # Make a row of i matching the touching j # c_i = np.zeros(len(c_j)) # # Eliminate labels without matches # label_numbers = np.arange(1,len(count)+1)[count > 0] index = index[count > 0] count = count[count > 0] # # Get the differences between labels so we can use a cumsum trick # to increment to the next label when they change # label_numbers[1:] = label_numbers[1:] - label_numbers[:-1] c_i[index] = label_numbers c_i = np.cumsum(c_i).astype(int) if self.where_algorithm == CA_CENTROIDS: # # Only connect points > minimum intensity fraction # center_i, center_j = morph.centers_of_labels(labels) indexes, counts, i, j = morph.get_line_pts( center_i[c_i-1], center_j[c_i-1], center_i[c_j-1], center_j[c_j-1]) # # The indexes of the centroids at pt1 # last_indexes = indexes+counts-1 # # The minimum of the intensities at pt0 and pt1 # centroid_intensities = np.minimum( image[i[indexes],j[indexes]], image[i[last_indexes], j[last_indexes]]) # # Assign label numbers to each point so we can use # scipy.ndimage.minimum. The label numbers are indexes into # "connections" above. # pt_labels = np.zeros(len(i), int) pt_labels[indexes[1:]] = 1 pt_labels = np.cumsum(pt_labels) minima = scind.minimum(image[i,j], pt_labels, np.arange(len(indexes))) minima = morph.fixup_scipy_ndimage_result(minima) # # Filter the connections using the image # mif = self.minimum_intensity_fraction.value i = c_i[centroid_intensities * mif <= minima] j = c_j[centroid_intensities * mif <= minima] else: i = c_i j = c_j # # Add in connections from self to self # unique_labels = np.unique(labels) i = np.hstack((i, unique_labels)) j = np.hstack((j, unique_labels)) # # Run "all_connected_components" to get a component # for # objects identified as same. # new_indexes = morph.all_connected_components(i, j) new_labels = np.zeros(labels.shape, int) new_labels[labels != 0] = new_indexes[labels[labels != 0]] return new_labels
def run(self, workspace): if self.show_window: workspace.display_data.col_labels = ("Image", "Object", "Feature", "Mean", "Median", "STD") workspace.display_data.statistics = statistics = [] for image_name in [img.name for img in self.images]: image = workspace.image_set.get_image(image_name.value, must_be_grayscale=True) for object_name in [obj.name for obj in self.objects]: # Need to refresh image after each iteration... img = image.pixel_data if image.has_mask: masked_image = img.copy() masked_image[~image.mask] = 0 else: masked_image = img objects = workspace.object_set.get_objects(object_name.value) nobjects = objects.count integrated_intensity = np.zeros((nobjects,)) integrated_intensity_edge = np.zeros((nobjects,)) mean_intensity = np.zeros((nobjects,)) mean_intensity_edge = np.zeros((nobjects,)) std_intensity = np.zeros((nobjects,)) std_intensity_edge = np.zeros((nobjects,)) min_intensity = np.zeros((nobjects,)) min_intensity_edge = np.zeros((nobjects,)) max_intensity = np.zeros((nobjects,)) max_intensity_edge = np.zeros((nobjects,)) mass_displacement = np.zeros((nobjects,)) lower_quartile_intensity = np.zeros((nobjects,)) median_intensity = np.zeros((nobjects,)) mad_intensity = np.zeros((nobjects,)) upper_quartile_intensity = np.zeros((nobjects,)) cmi_x = np.zeros((nobjects,)) cmi_y = np.zeros((nobjects,)) max_x = np.zeros((nobjects,)) max_y = np.zeros((nobjects,)) for labels, lindexes in objects.get_labels(): lindexes = lindexes[lindexes != 0] labels, img = cpo.crop_labels_and_image(labels, img) _, masked_image = cpo.crop_labels_and_image(labels, masked_image) outlines = cpmo.outline(labels) if image.has_mask: _, mask = cpo.crop_labels_and_image(labels, image.mask) masked_labels = labels.copy() masked_labels[~mask] = 0 masked_outlines = outlines.copy() masked_outlines[~mask] = 0 else: masked_labels = labels masked_outlines = outlines lmask = masked_labels > 0 & np.isfinite(img) # Ignore NaNs, Infs has_objects = np.any(lmask) if has_objects: limg = img[lmask] llabels = labels[lmask] mesh_y, mesh_x = np.mgrid[0 : masked_image.shape[0], 0 : masked_image.shape[1]] mesh_x = mesh_x[lmask] mesh_y = mesh_y[lmask] lcount = fix(nd.sum(np.ones(len(limg)), llabels, lindexes)) integrated_intensity[lindexes - 1] = fix(nd.sum(limg, llabels, lindexes)) mean_intensity[lindexes - 1] = integrated_intensity[lindexes - 1] / lcount std_intensity[lindexes - 1] = np.sqrt( fix(nd.mean((limg - mean_intensity[llabels - 1]) ** 2, llabels, lindexes)) ) min_intensity[lindexes - 1] = fix(nd.minimum(limg, llabels, lindexes)) max_intensity[lindexes - 1] = fix(nd.maximum(limg, llabels, lindexes)) # Compute the position of the intensity maximum max_position = np.array(fix(nd.maximum_position(limg, llabels, lindexes)), dtype=int) max_position = np.reshape(max_position, (max_position.shape[0],)) max_x[lindexes - 1] = mesh_x[max_position] max_y[lindexes - 1] = mesh_y[max_position] # The mass displacement is the distance between the center # of mass of the binary image and of the intensity image. The # center of mass is the average X or Y for the binary image # and the sum of X or Y * intensity / integrated intensity cm_x = fix(nd.mean(mesh_x, llabels, lindexes)) cm_y = fix(nd.mean(mesh_y, llabels, lindexes)) i_x = fix(nd.sum(mesh_x * limg, llabels, lindexes)) i_y = fix(nd.sum(mesh_y * limg, llabels, lindexes)) cmi_x[lindexes - 1] = i_x / integrated_intensity[lindexes - 1] cmi_y[lindexes - 1] = i_y / integrated_intensity[lindexes - 1] diff_x = cm_x - cmi_x[lindexes - 1] diff_y = cm_y - cmi_y[lindexes - 1] mass_displacement[lindexes - 1] = np.sqrt(diff_x * diff_x + diff_y * diff_y) # # Sort the intensities by label, then intensity. # For each label, find the index above and below # the 25%, 50% and 75% mark and take the weighted # average. # order = np.lexsort((limg, llabels)) areas = lcount.astype(int) indices = np.cumsum(areas) - areas for dest, fraction in ( (lower_quartile_intensity, 1.0 / 4.0), (median_intensity, 1.0 / 2.0), (upper_quartile_intensity, 3.0 / 4.0), ): qindex = indices.astype(float) + areas * fraction qfraction = qindex - np.floor(qindex) qindex = qindex.astype(int) qmask = qindex < indices + areas - 1 qi = qindex[qmask] qf = qfraction[qmask] dest[lindexes[qmask] - 1] = limg[order[qi]] * (1 - qf) + limg[order[qi + 1]] * qf # # In some situations (e.g. only 3 points), there may # not be an upper bound. # qmask = (~qmask) & (areas > 0) dest[lindexes[qmask] - 1] = limg[order[qindex[qmask]]] # # Once again, for the MAD # madimg = np.abs(limg - median_intensity[llabels - 1]) order = np.lexsort((madimg, llabels)) qindex = indices.astype(float) + areas / 2.0 qfraction = qindex - np.floor(qindex) qindex = qindex.astype(int) qmask = qindex < indices + areas - 1 qi = qindex[qmask] qf = qfraction[qmask] mad_intensity[lindexes[qmask] - 1] = madimg[order[qi]] * (1 - qf) + madimg[order[qi + 1]] * qf qmask = (~qmask) & (areas > 0) mad_intensity[lindexes[qmask] - 1] = madimg[order[qindex[qmask]]] emask = masked_outlines > 0 eimg = img[emask] elabels = labels[emask] has_edge = len(eimg) > 0 if has_edge: ecount = fix(nd.sum(np.ones(len(eimg)), elabels, lindexes)) integrated_intensity_edge[lindexes - 1] = fix(nd.sum(eimg, elabels, lindexes)) mean_intensity_edge[lindexes - 1] = integrated_intensity_edge[lindexes - 1] / ecount std_intensity_edge[lindexes - 1] = np.sqrt( fix(nd.mean((eimg - mean_intensity_edge[elabels - 1]) ** 2, elabels, lindexes)) ) min_intensity_edge[lindexes - 1] = fix(nd.minimum(eimg, elabels, lindexes)) max_intensity_edge[lindexes - 1] = fix(nd.maximum(eimg, elabels, lindexes)) m = workspace.measurements for category, feature_name, measurement in ( (INTENSITY, INTEGRATED_INTENSITY, integrated_intensity), (INTENSITY, MEAN_INTENSITY, mean_intensity), (INTENSITY, STD_INTENSITY, std_intensity), (INTENSITY, MIN_INTENSITY, min_intensity), (INTENSITY, MAX_INTENSITY, max_intensity), (INTENSITY, INTEGRATED_INTENSITY_EDGE, integrated_intensity_edge), (INTENSITY, MEAN_INTENSITY_EDGE, mean_intensity_edge), (INTENSITY, STD_INTENSITY_EDGE, std_intensity_edge), (INTENSITY, MIN_INTENSITY_EDGE, min_intensity_edge), (INTENSITY, MAX_INTENSITY_EDGE, max_intensity_edge), (INTENSITY, MASS_DISPLACEMENT, mass_displacement), (INTENSITY, LOWER_QUARTILE_INTENSITY, lower_quartile_intensity), (INTENSITY, MEDIAN_INTENSITY, median_intensity), (INTENSITY, MAD_INTENSITY, mad_intensity), (INTENSITY, UPPER_QUARTILE_INTENSITY, upper_quartile_intensity), (C_LOCATION, LOC_CMI_X, cmi_x), (C_LOCATION, LOC_CMI_Y, cmi_y), (C_LOCATION, LOC_MAX_X, max_x), (C_LOCATION, LOC_MAX_Y, max_y), ): measurement_name = "%s_%s_%s" % (category, feature_name, image_name.value) m.add_measurement(object_name.value, measurement_name, measurement) if self.show_window and len(measurement) > 0: statistics.append( ( image_name.value, object_name.value, feature_name, np.round(np.mean(measurement), 3), np.round(np.median(measurement), 3), np.round(np.std(measurement), 3), ) )
def calcNormConvMap(image, imagefft, tmplmask, oversized, pixrad): t1 = time.time() print " ... computing FindEM's norm_conv_map" # print " IMAGE" # imageinfo(image) # numeric_to_jpg(image,"image.jpg") # print " TMPLMASK" # imageinfo(tmplmask) # numeric_to_jpg(tmplmask,"tmplmask.jpg") if nd_image.minimum(image) < 0.0 or nd_image.minimum(tmplmask) < 0.0: print " !!! WARNING image or mask is less than zero" tmplsize = (tmplmask.shape)[1] nmask = tmplmask.sum() tmplshape = tmplmask.shape imshape = image.shape shift = int(-1 * tmplsize / 2.0) # tmplmask2 = nd_image.shift(tmplmask, shift, mode='wrap', order=0) # tmplmask2 = tmplmask err = 0.000001 # print " IMAGESQ" # imageinfo(image*image) # print " CNV2 = convolution(image**2, mask)" tmplmaskfft = fft.real_fft2d(tmplmask, s=oversized) imagesqfft = fft.real_fft2d(image * image, s=oversized) cnv2 = convolution_fft(imagesqfft, tmplmaskfft, oversized) cnv2 = cnv2 + err del imagesqfft # SHIFTING CAN BE SLOW # cnv2 = nd_image.shift(cnv2, shift, mode='wrap', order=0) # imageinfo(cnv2) # print cnv2[499,499],cnv2[500,500],cnv2[501,501] # numeric_to_jpg(cnv2,"cnv2.jpg") # print " CNV1 = convolution(image, mask)" cnv1 = convolution_fft(imagefft, tmplmaskfft, oversized) cnv1 = cnv1 + err del tmplmaskfft # SHIFTING CAN BE SLOW cnv1 = nd_image.shift(cnv1, shift, mode="wrap", order=0) # imageinfo(cnv1) # print cnv1[499,499],cnv1[500,500],cnv1[501,501] # numeric_to_jpg(cnv1*cnv1,"cnv1.jpg") # print " V2 = ((nm*cnv2)-(cnv1*cnv1))/(nm*nm)" a1 = nmask * cnv2 a1 = a1[ tmplshape[0] / 2 - 1 : imshape[0] + tmplshape[0] / 2 - 1, tmplshape[1] / 2 - 1 : imshape[1] + tmplshape[1] / 2 - 1, ] # imageinfo(a1) # print a1[499,499],a1[500,500],a1[501,501] b1 = cnv1 * cnv1 b1 = b1[ tmplshape[0] / 2 - 1 : imshape[0] + tmplshape[0] / 2 - 1, tmplshape[1] / 2 - 1 : imshape[1] + tmplshape[1] / 2 - 1, ] del cnv2 del cnv1 # imageinfo(b1) # print b1[499,499],b1[500,500],b1[501,501] # print (a1[500,500]-b1[500,500]) # print nmask**2 # cross = cross_correlate(a1,b1) # print numarray.argmax(numarray.ravel(cross)) # cross = normRange(cross) # cross = numarray.where(cross > 0.8,cross,0.7) # cross = nd_image.shift(cross, (cross.shape)[0]/2, mode='wrap', order=0) # numeric_to_jpg(cross,"cross.jpg") # phase = phase_correlate(a1[128:896,128:896],b1[128:896,128:896]) # print numarray.argmax(numarray.ravel(phase)) # phase = normRange(phase) # phase = numarray.where(phase > 0.7,phase,0.6) # phase = nd_image.shift(phase, (phase.shape)[0]/2, mode='wrap', order=0) # numeric_to_jpg(phase,"phase.jpg") v2 = a1 - b1 v2 = v2 / (nmask ** 2) # REMOVE OUTSIDE AREA cshape = v2.shape white1 = 0.01 v2[0 : pixrad * 2, 0 : cshape[1]] = white1 v2[0 : cshape[0], 0 : pixrad * 2] = white1 v2[cshape[0] - pixrad * 2 : cshape[0], 0 : cshape[1]] = white1 v2[0 : cshape[0], cshape[1] - pixrad * 2 : cshape[1]] = white1 xn = (v2.shape)[0] / 2 # IMPORTANT TO CHECK FOR ERROR if ( v2[xn - 1, xn - 1] > 1.0 or v2[xn, xn] > 1.0 or v2[xn + 1, xn + 1] > 1.0 or nd_image.mean(v2[xn / 2 : 3 * xn / 2, xn / 2 : 3 * xn / 2]) > 1.0 ): print " !!! MAJOR ERROR IN NORMALIZATION CALCUATION (values > 1)" imageinfo(v2) print " ... VALUES: ", v2[xn - 1, xn - 1], v2[xn, xn], v2[xn + 1, xn + 1], nd_image.mean(v2) numeric_to_jpg(a1, "a1.jpg") numeric_to_jpg(b1, "b1.jpg") numeric_to_jpg(b1, "v2.jpg") sys.exit(1) if ( v2[xn - 1, xn - 1] < 0.0 or v2[xn, xn] < 0.0 or v2[xn + 1, xn + 1] < 0.0 or nd_image.mean(v2[xn / 2 : 3 * xn / 2, xn / 2 : 3 * xn / 2]) < 0.0 ): print " !!! MAJOR ERROR IN NORMALIZATION CALCUATION (values < 0)" imageinfo(v2) print " ... VALUES: ", v2[xn - 1, xn - 1], v2[xn, xn], v2[xn + 1, xn + 1], nd_image.mean(v2) numeric_to_jpg(a1, "a1.jpg") numeric_to_jpg(b1, "b1.jpg") numeric_to_jpg(b1, "v2.jpg") sys.exit(1) del a1 del b1 # numeric_to_jpg(v2,"v2.jpg") # print " Normconvmap = sqrt(v2)" v2 = numarray.where(v2 < err, err, v2) normconvmap = numarray.sqrt(v2) # numeric_to_jpg(normconvmap,"normconvmap-zero.jpg") # normconvmap = numarray.where(v2 > err, numarray.sqrt(v2), 0.0) del v2 # imageinfo(normconvmap) # print normconvmap[499,499],normconvmap[500,500],normconvmap[501,501] # numeric_to_jpg(normconvmap,"normconvmap-big.jpg") print " ... ... time %.2f sec" % float(time.time() - t1) # RETURN CENTER return normconvmap
def calculate_minimum_distances(self, workspace, parent_name): '''Calculate the distance from child center to parent perimeter''' meas = workspace.measurements assert isinstance(meas,cpmeas.Measurements) sub_object_name = self.sub_object_name.value parents = workspace.object_set.get_objects(parent_name) children = workspace.object_set.get_objects(sub_object_name) parents_of = self.get_parents_of(workspace, parent_name) if len(parents_of) == 0: dist = np.zeros((0,)) elif np.all(parents_of == 0): dist = np.array([np.NaN] * len(parents_of)) else: mask = parents_of > 0 ccenters = centers_of_labels(children.segmented).transpose() ccenters = ccenters[mask,:] parents_of_masked = parents_of[mask] - 1 pperim = outline(parents.segmented) # # Get a list of all points on the perimeter # perim_loc = np.argwhere(pperim != 0) # # Get the label # for each point # perim_idx = pperim[perim_loc[:,0],perim_loc[:,1]] # # Sort the points by label # # idx = np.lexsort((perim_loc[:,1],perim_loc[:,0],perim_idx)) perim_loc = perim_loc[idx,:] perim_idx = perim_idx[idx] # # Get counts and indexes to each run of perimeter points # counts = fix(scind.sum(np.ones(len(perim_idx)),perim_idx, np.arange(1,perim_idx[-1]+1))).astype(np.int32) indexes = np.cumsum(counts) - counts # # For the children, get the index and count of the parent # ccounts = counts[parents_of_masked] cindexes = indexes[parents_of_masked] # # Now make an array that has an element for each of that child's # perimeter points # clabel = np.zeros(np.sum(ccounts), int) # # cfirst is the eventual first index of each child in the # clabel array # cfirst = np.cumsum(ccounts) - ccounts clabel[cfirst[1:]] += 1 clabel = np.cumsum(clabel) # # Make an index that runs from 0 to ccounts for each # child label. # cp_index = np.arange(len(clabel)) - cfirst[clabel] # # then add cindexes to get an index to the perimeter point # cp_index += cindexes[clabel] # # Now, calculate the distance from the centroid of each label # to each perimeter point in the parent. # dist = np.sqrt(np.sum((perim_loc[cp_index,:] - ccenters[clabel,:])**2,1)) # # Finally, find the minimum distance per child # min_dist = fix(scind.minimum(dist, clabel, np.arange(len(ccounts)))) # # Account for unparented children # dist = np.array([np.NaN] * len(mask)) dist[mask] = min_dist meas.add_measurement(sub_object_name, FF_MINIMUM % parent_name, dist)
def objstats(args): # Open and read from image and segmentation try: img_ds = gdal.Open(args.image, gdal.GA_ReadOnly) except: logger.error("Could not open image: {}".format(i=args.image)) sys.exit(1) try: seg_ds = ogr.Open(args.segment, 0) seg_layer = seg_ds.GetLayer() except: logger.error("Could not open segmentation vector file: {}".format(args.segment)) sys.exit(1) cols, rows = img_ds.RasterXSize, img_ds.RasterYSize bands = range(1, img_ds.RasterCount + 1) if args.bands is not None: bands = args.bands # Rasterize segments logger.debug("About to rasterize segment vector file") img_srs = osr.SpatialReference() img_srs.ImportFromWkt(img_ds.GetProjectionRef()) mem_raster = gdal.GetDriverByName("MEM").Create("", cols, rows, 1, gdal.GDT_UInt32) mem_raster.SetProjection(img_ds.GetProjection()) mem_raster.SetGeoTransform(img_ds.GetGeoTransform()) # Create artificial 'FID' field fid_layer = seg_ds.ExecuteSQL('select FID, * from "{l}"'.format(l=seg_layer.GetName())) gdal.RasterizeLayer(mem_raster, [1], fid_layer, options=["ATTRIBUTE=FID"]) logger.debug("Rasterized segment vector file") seg = mem_raster.GetRasterBand(1).ReadAsArray() logger.debug("Read segmentation image into memory") mem_raster = None seg_ds = None # Get list of unique segments useg = np.unique(seg) # If calc is num, do only for 1 band out_bands = 0 for stat in args.stat: if stat == "num": out_bands += 1 else: out_bands += len(bands) # Create output driver driver = gdal.GetDriverByName(args.format) out_ds = driver.Create(args.output, cols, rows, out_bands, gdal.GDT_Float32) # Loop through image bands out_b = 0 out_2d = np.empty_like(seg, dtype=np.float32) for i_b, b in enumerate(bands): img_band = img_ds.GetRasterBand(b) ndv = img_band.GetNoDataValue() band_name = img_band.GetDescription() if not band_name: band_name = "Band {i}".format(i=b) logger.info('Processing input band {i}, "{b}"'.format(i=b, b=band_name)) img = img_band.ReadAsArray().astype(gdal_array.GDALTypeCodeToNumericTypeCode(img_band.DataType)) logger.debug('Read image band {i}, "{b}" into memory'.format(i=b, b=band_name)) for stat in args.stat: logger.debug(" calculating {s}".format(s=stat)) if stat == "mean": out = ndimage.mean(img, seg, useg) elif stat == "var": out = ndimage.variance(img, seg, useg) elif stat == "num": # Remove from list of stats so it is only calculated once args.stat.remove("num") count = np.ones_like(seg) out = ndimage.sum(count, seg, useg) elif stat == "sum": out = ndimage.sum(img, seg, useg) elif stat == "min": out = ndimage.minimum(img, seg, useg) elif stat == "max": out = ndimage.maximum(img, seg, useg) elif stat == "mode": out = ndimage.labeled_comprehension(img, seg, useg, scipy_mode, out_2d.dtype, ndv) else: logger.error("Unknown stat. Not sure how you got here") sys.exit(1) # Transform to 2D out_2d = out[seg - seg.min()] # Fill in NDV if ndv is not None: out_2d[np.where(img == ndv)] = ndv # Write out the data out_band = out_ds.GetRasterBand(out_b + 1) out_band.SetDescription(band_name) if ndv is not None: out_band.SetNoDataValue(ndv) logger.debug(" Writing object statistic for band {b}".format(b=b + 1)) out_band.WriteArray(out_2d, 0, 0) out_band.FlushCache() logger.debug(" Wrote out object statistic for band {b}".format(b=b + 1)) out_b += 1 out_ds.SetGeoTransform(img_ds.GetGeoTransform()) out_ds.SetProjection(img_ds.GetProjection()) img_ds = None seg_ds = None out_ds = None logger.info("Completed object statistic calculation")
def freqListStat(freqlist): freqnumpy = numpy.asarray(freqlist, dtype=numpy.int32) print "min=", ndimage.minimum(freqnumpy) print "max=", ndimage.maximum(freqnumpy) print "mean=", ndimage.mean(freqnumpy) print "stdev=", ndimage.standard_deviation(freqnumpy)