Beispiel #1
0
    def Get_Parameters(self):
        copy_img = self.img
        if len(self.img.shape) == 3:
            copy_img = rgb2gray(self.img)

        self.Variance = variance(laplace(copy_img, ksize=3))
        self.MaxVariance = np.amax(laplace(copy_img, ksize=3))
        self.Noise = estimate_sigma(copy_img)
        self.Scharr = variance(scharr(copy_img))

        print(self.Variance, self.MaxVariance, self.Scharr, self.Noise)
    def __init__(self,
                 image,
                 adaptive_filter_name,
                 filter_h,
                 filter_w,
                 sizeMax=0,
                 a=0,
                 b=0):
        """initializes the variables frequency filtering on an input image
        takes as input:
        image: the input image
        filter_name: the name of the mask to use
        cutoff: the cutoff frequency of the filter
        sizeMax: the maximum allowed size of the filter (only for median)
        returns"""
        self.image = image
        self.variance = ndimage.variance(image)

        if adaptive_filter_name == 'reduction':
            self.filter = self.get_reduction_filter

        elif adaptive_filter_name == 'median':
            self.filter = self.get_median_filter
        elif adaptive_filter_name == 'wallis':
            self.filter = self.get_wallis_filter

        self.filter_h = filter_h
        self.filter_w = filter_w
        self.sizeMax = sizeMax
        self.aWallis = a
        self.bWallis = b
Beispiel #3
0
def variance_feature_oud(img):
    print('[INFO] Computing variance feature.')
    img_array = np.array(img, dtype='float64')
    print(img_array.shape)
    lbl, nlbl = ndimage.label(img_array)
    var = ndimage.variance(img_array, labels=None, index=np.arange(1, nlbl + 1))
    return var
Beispiel #4
0
def variance_normal_sharpness(image):
    """
    Variance-based method with normalization
    """
    H, W = image.shape
    int_mean = ndimage.mean(image)
    return (ndimage.variance(image) / (H * W * int_mean))
Beispiel #5
0
def Rspectra(image):
    imNorm = image - np.mean(np.mean(image))
    [length, width] = imNorm.shape
    Ipq = length * width * (np.absolute(np.fft.fftshift(np.fft.fft2(imNorm)))**
                            2)

    CentralPoint = [Ipq.shape[0] / 2, Ipq.shape[1] / 2]
    r = []
    sigma = ndimage.variance(imNorm)
    for i in range(int(np.around(Ipq.shape[0] / 2 * np.sqrt(2))) + 1):
        r.append([0, 0])
    for i in range(Ipq.shape[0]):
        for j in range(Ipq.shape[1] / 2 + 1):
            di = i - CentralPoint[0]
            dj = CentralPoint[1] - j
            dist = int(np.sqrt(di**2 + dj**2))
            r[dist] = [r[dist][0] + 1, r[dist][1] + (Ipq[i, j])]

    radialSpectrum = []
    for i in range(len(r)):
        if r[i][0] != 0:
            radialSpectrum.append(r[i][1] / r[i][0])
        else:
            radialSpectrum.append(0)

    # fig = plt.figure()
    # ax = fig.add_subplot(111)
    # ax.plot(range(len(radialSpectrum)), (radialSpectrum))
    # plt.xlabel("r")
    # plt.ylabel("rSpectrum")
    # plt.show()
    return radialSpectrum
def test_variance05():
    "variance 5"
    labels = [2, 2, 3]
    for type in types:
        input = np.array([1, 3, 8], type)
        output = ndimage.variance(input, labels, 2)
        assert_almost_equal(output, 1.0)
Beispiel #7
0
def laplace_image(input_folder):
    sub_folders = os.listdir(input_folder)
    variances = []
    maximumes = []

    for folder in sub_folders:
        sub_folder = os.path.join(input_folder, folder)
        if not os.path.isdir(sub_folder):
            continue
        list_file = os.listdir(sub_folder)

        for file in list_file:
            if file.endswith(('.png', '.jpg', 'JPEG')):
                input_file = os.path.join(sub_folder, file)

                #preprocessing
                img = io.imread(input_file)
                img = resize(img, (400, 600))
                img = rgb2gray(img)

                #Edge Detection use Laplace
                edge_laplace = laplace(img, ksize=3)

                #print(f"Variance: {variance(edge_laplace)}")
                variances.append(variance(edge_laplace))

                #print(f'Maximum: {np.amax(edge_laplace)}')
                maximumes.append(np.amax(edge_laplace))
    return variances, maximumes
Beispiel #8
0
def labelstats_str(factors, values, stat='mvnx'):
    # works also for string labels in ys, but requires 1D
    # from mailing list scipy-user 2009-02-11
    unil, unilinv = np.unique1d(factors,
                                return_index=False,
                                return_inverse=True)
    res = []
    if 'm' in stat:
        labelmeans = np.array(
            ndimage.mean(values, labels=unilinv, index=np.arange(len(unil))))
        res.append(labelmeans)
    if 'v' in stat:
        labelvars = np.array(
            ndimage.variance(values,
                             labels=unilinv,
                             index=np.arange(len(unil))))
        res.append(labelvars)
    if 'n' in stat:
        labelmin = np.array(
            ndimage.minimum(values, labels=unilinv,
                            index=np.arange(len(unil))))
        res.append(labelmin)
    if 'x' in stat:
        labelmax = np.array(
            ndimage.maximum(values, labels=unilinv,
                            index=np.arange(len(unil))))
        res.append(labelmax)
    return res
Beispiel #9
0
def getVarianceAndMaxi(img_type):
    obtained_laplaces = []
    if (img_type == 'blur'):
        path = '/media/blur'
    elif (img_type == 'sharp'):
        path = '/media/sharp'

    for image_path in os.listdir(path):
        print(f"img_path: {image_path}")
        img = io.imread(path + '/' + image_path)

        # Resizing the image.
        img = resize(img, (400, 600))

        # Convert the image to greyscale
        img = rgb2gray(img)

        # Detecting the Edge of the image
        edge_laplace = laplace(img, ksize=3)

        print(f"Variance: {variance(edge_laplace)}")
        print(f"Maximum : {np.amax(edge_laplace)}")

        # Adding the variance and maximum to a list of sets
        obtained_laplaces.insert(i + 1, ((variance(edge_laplace),
                                          (np.amax(edge_laplace)))))
        if (img_type == 'blur'):
            print(f"blur_obtained_laplaces : {obtained_laplaces}")
        elif (img_type == 'sharp'):
            print(f"sharp_laplaces : {obtained_laplaces}")
    return obtained_laplaces
def test_variance06():
    "variance 6"
    labels = [2, 2, 3, 3, 4]
    for type in types:
        input = np.array([1, 3, 8, 10, 8], type)
        output = ndimage.variance(input, labels, [2, 3, 4])
        assert_array_almost_equal(output, [1.0, 1.0, 0.0])
Beispiel #11
0
def test_variance05():
    "variance 5"
    labels = [2, 2, 3]
    for type in types:
        input = np.array([1, 3, 8], type)
        output = ndimage.variance(input, labels, 2)
        assert_almost_equal(output, 1.0)
Beispiel #12
0
    def run(self, ips, imgs, para=None):
        lab = WindowsManager.get(para['lab']).ips.get_img()
        if lab.dtype != np.uint8 and lab.dtype != np.uint16:
            IPy.alert('Label image must be in type 8-bit or 16-bit')
            return
        index = range(1, lab.max() + 1)
        titles = ['Max', 'Min', 'Mean', 'Variance', 'Standard', 'Sum']
        key = {
            'Max': 'max',
            'Min': 'min',
            'Mean': 'mean',
            'Variance': 'var',
            'Standard': 'std',
            'Sum': 'sum'
        }
        titles = ['value'] + [i for i in titles if para[key[i]]]

        data = [index]
        img = ips.get_img()
        if img is lab: img = img > 0
        if para['max']: data.append(ndimage.maximum(img, lab, index))
        if para['min']: data.append(ndimage.minimum(img, lab, index))
        if para['mean']: data.append(ndimage.mean(img, lab, index).round(4))
        if para['var']: data.append(ndimage.variance(img, lab, index).round(4))
        if para['std']:
            data.append(ndimage.standard_deviation(img, lab, index).round(4))
        if para['sum']: data.append(ndimage.sum(img, lab, index).round(4))
        data = zip(*data)
        IPy.table(ips.title + '-segment', data, titles)
def test_variance06():
    labels = [2, 2, 3, 3, 4]
    with np.errstate(all='ignore'):
        for type in types:
            input = np.array([1, 3, 8, 10, 8], type)
            output = ndimage.variance(input, labels, [2, 3, 4])
            assert_array_almost_equal(output, [1.0, 1.0, 0.0])
Beispiel #14
0
def getVariance(img, colorDim):
  '''Uses ndimage to calculate the variance of an array of pixels. If the image
     is rgb, the colorDim determines which color the variance will be 
     calculated for '''
  if img.ndim == 3:
    img = img[:,:,colorDim]
  return ndimage.variance(img)
Beispiel #15
0
def Rspectra(image):
	imNorm = image-np.mean(np.mean(image))
	[length, width]=imNorm.shape
	Ipq=length*width*(np.absolute(np.fft.fftshift(np.fft.fft2(imNorm)))**2)

	CentralPoint=[Ipq.shape[0]/2,Ipq.shape[1]/2]
	r=[]
	sigma=ndimage.variance(imNorm)
	for i in range(int(np.around(Ipq.shape[0]/2*np.sqrt(2)))+1):
		r.append([0,0])
	for i in range(Ipq.shape[0]):
		for j in range(Ipq.shape[1]/2+1):
			di=i-CentralPoint[0]
			dj=CentralPoint[1]-j
			dist=int(np.sqrt(di**2+dj**2))
			r[dist]=[r[dist][0]+1, r[dist][1]+(Ipq[i,j])]

	radialSpectrum=[]
	for i in range(len(r)):
		if r[i][0]!=0 :
			radialSpectrum.append(r[i][1]/r[i][0])
		else:
			radialSpectrum.append(0)

	# fig = plt.figure()
	# ax = fig.add_subplot(111)
	# ax.plot(range(len(radialSpectrum)), (radialSpectrum))
	# plt.xlabel("r")
	# plt.ylabel("rSpectrum")
	# plt.show()
	return radialSpectrum
Beispiel #16
0
    def run(self, ips, imgs, para = None):
        inten = ImageManager.get(para['inten'])
        if not para['slice']:
            imgs = [inten.img]
            msks = [ips.img]
        else: 
            msks = ips.imgs
            imgs = inten.imgs
            if len(msks)==1:
                msks *= len(imgs)
        buf = imgs[0].astype(np.uint16)
        strc = ndimage.generate_binary_structure(2, 1 if para['con']=='4-connect' else 2)
        idct = ['Max','Min','Mean','Variance','Standard','Sum']
        key = {'Max':'max','Min':'min','Mean':'mean',
               'Variance':'var','Standard':'std','Sum':'sum'}
        idct = [i for i in idct if para[key[i]]]
        titles = ['Slice', 'ID'][0 if para['slice'] else 1:] 
        if para['center']: titles.extend(['Center-X','Center-Y'])
        if para['extent']: titles.extend(['Min-Y','Min-X','Max-Y','Max-X'])
        titles.extend(idct)
        k = ips.unit[0]
        data, mark = [],{'type':'layers', 'body':{}}
        # data,mark=[],[]
        for i in range(len(imgs)):
            n = ndimage.label(msks[i], strc, output=buf)
            index = range(1, n+1)
            dt = []
            if para['slice']:dt.append([i]*n)
            dt.append(range(n))
            
            xy = ndimage.center_of_mass(imgs[i], buf, index)
            xy = np.array(xy).round(2).T
            if para['center']:dt.extend([xy[1]*k, xy[0]*k])

            boxs = [None] * n
            if para['extent']:
                boxs = ndimage.find_objects(buf)
                boxs = [( i[1].start+(i[1].stop-i[1].start)/2, i[0].start+(i[0].stop-i[0].start)/2, i[1].stop-i[1].start,i[0].stop-i[0].start) for i in boxs]
                for j in (0,1,2,3):
                    dt.append([i[j]*k for i in boxs])
            if para['max']:dt.append(ndimage.maximum(imgs[i], buf, index).round(2))
            if para['min']:dt.append(ndimage.minimum(imgs[i], buf, index).round(2))        
            if para['mean']:dt.append(ndimage.mean(imgs[i], buf, index).round(2))
            if para['var']:dt.append(ndimage.variance(imgs[i], buf, index).round(2)) 
            if para['std']:dt.append(ndimage.standard_deviation(imgs[i], buf, index).round(2))
            if para['sum']:dt.append(ndimage.sum(imgs[i], buf, index).round(2))      
 
            layer = {'type':'layer', 'body':[]}
            xy=np.int0(xy).T

            texts = [(i[1],i[0])+('id=%d'%n,) for i,n in zip(xy,range(len(xy)))]
            layer['body'].append({'type':'texts', 'body':texts})
            if para['extent']: layer['body'].append({'type':'rectangles', 'body':boxs})
            mark['body'][i] = layer

            data.extend(list(zip(*dt)))
        IPy.show_table(pd.DataFrame(data, columns=titles), inten.title+'-region statistic')
        inten.mark = GeometryMark(mark)
        inten.update = True
def test_variance01():
    with np.errstate(all='ignore'):
        for type in types:
            input = np.array([], type)
            with suppress_warnings() as sup:
                sup.filter(RuntimeWarning, "Mean of empty slice")
                output = ndimage.variance(input)
            assert_(np.isnan(output))
Beispiel #18
0
def get_info_from_image(image):

    if len(image.shape) == 3:  # convert image to gray scale
        img = rgb2gray(image)

    x1 = variance(laplace(img, ksize=3))  # return variance of laplancian
    x2 = estimate_sigma(img)  # returns Noise

    return x1, x2
Beispiel #19
0
def labelstats_str(factors, values):
    # works also for string labels in ys, but requires 1D
    # from mailing list scipy-user 2009-02-11
    unil, unilinv = np.unique1d(factors, return_index=False, return_inverse=True)
    labelmeans = np.array(ndimage.mean(values, labels=unilinv, index=np.arange(len(unil))))
    labelvars = np.array(ndimage.variance(values, labels=unilinv, index=np.arange(len(unil))))
    labelmin = np.array(ndimage.minimum(values, labels=unilinv, index=np.arange(len(unil))))
    labelmax = np.array(ndimage.maximum(values, labels=unilinv, index=np.arange(len(unil))))
    return labelmeans, labelvars, labelmin, labelmax
def randomPatchExtraction(img):
    points=range(125*125)
    random.shuffle(points)
    
    VR = 0.1 #variance ratio 
    '''
    To filter out frequently occurring constant color regions,
    we reject sample patches with variance less than 10%
    of the maximum pixel value. 	
    '''

    patchlocs=filter(lambda y: y.shape[0]*y.shape[1]==PS*PS, [img[x/125:x/125+PS,x%125:x%125+PS] for x in points[0:400] \
    if ndimage.variance(img[x/125:x/125+PS,x%125:x%125+PS]) > VR*ndimage.variance(img)])
    
    random.shuffle(patchlocs)
    if len(patchlocs)<=100:
        return patchlocs
    else:
        return patchlocs[0:100] 
Beispiel #21
0
def getVarAndMean(img, win_row, win_cols):
    '''Identifies & highlights areas of image fname with high pixel intensity variance (focus)'''
    sum = 0
    for i in range(win_row):
        for j in range(win_cols):
            sum += img[i][j]
    win_mean = sum / (win_row * win_cols)
    win_var = math.sqrt(ndimage.variance(img))

    return win_mean, win_var
Beispiel #22
0
def test_variance06():
    labels = [2, 2, 3, 3, 4]
    olderr = np.seterr(all='ignore')
    try:
        for type in types:
            input = np.array([1, 3, 8, 10, 8], type)
            output = ndimage.variance(input, labels, [2, 3, 4])
            assert_array_almost_equal(output, [1.0, 1.0, 0.0])
    finally:
        np.seterr(**olderr)
Beispiel #23
0
def test_variance01():
    "variance 1"
    olderr = np.seterr(all='ignore')
    try:
        for type in types:
            input = np.array([], type)
            output = ndimage.variance(input)
            assert_(np.isnan(output))
    finally:
        np.seterr(**olderr)
def test_variance01():
    "variance 1"
    olderr = np.seterr(all='ignore')
    try:
        for type in types:
            input = np.array([], type)
            output = ndimage.variance(input)
            assert_(np.isnan(output))
    finally:
        np.seterr(**olderr)
Beispiel #25
0
def getVarAndMean(img,win_row,win_cols):
  sum=0
  for i in range(win_row) :
    for j in range(win_cols) :
        sum+=img[i][j]
  #win_mean=sum/(win_row*win_cols)
  win_mean=sum/3800
  win_var=math.sqrt(ndimage.variance(img))

  return win_mean,win_var
Beispiel #26
0
def test_variance06():
    labels = [2, 2, 3, 3, 4]
    olderr = np.seterr(all='ignore')
    try:
        for type in types:
            input = np.array([1, 3, 8, 10, 8], type)
            output = ndimage.variance(input, labels, [2, 3, 4])
            assert_array_almost_equal(output, [1.0, 1.0, 0.0])
    finally:
        np.seterr(**olderr)
Beispiel #27
0
def laplace_folder(input_folder, sub_folder_b=False):
    sub_folders = []
    if sub_folder_b:
        sub_folders = os.listdir(input_folder)
    else:
        sub_folders.append(input_folder)
    variances = []
    maximumes = []

    variance_sobel = []
    maximumes_sobel = []

    for sub_folder in sub_folders:
        if sub_folder_b:
            folder = os.path.join(input_folder, sub_folder)
        else:
            folder = sub_folder
        if not os.path.isdir(folder):
            continue
        list_file = os.listdir(folder)

        for file in list_file:
            if file.endswith(('.png', '.jpg', 'JPEG')):
                input_file = os.path.join(folder, file)

                #preprocessing
                image = io.imread(input_file)
                img = resize(image, (112, 112))
                img = rgb2gray(img)

                #edge detection use laplace
                edge_laplace = laplace(img, ksize=3)

                #edge detection with Sobel filter
                edge_soble = sobel(img)

                variances.append(variance(edge_laplace))
                maximumes.append(np.amax(edge_laplace))

                # test_blurry = '/home/duong/Documents/researching/GAN/common/image_enhance/image_cmt/test_blurry'
                # blurry_folder = os.path.join(test_blurry,file)

                # test_good = '/home/duong/Documents/researching/GAN/common/image_enhance/image_cmt/test_good'
                # good_folder = os.path.join(test_good,file)

                # if variance(edge_laplace) < 0.0015 and np.amax(edge_laplace) < 0.3:
                #     io.imsave(blurry_folder,image)
                # else:
                #     io.imsave(good_folder,image)

                #variance_sobel.append(variance(edge_soble))
                #maximumes_sobel.append(np.amax(edge_soble))
    #return variances, maximumes, variance_sobel, maximumes_sobel
    return variances, maximumes
Beispiel #28
0
def test_variance01():
    olderr = np.seterr(all='ignore')
    try:
        for type in types:
            input = np.array([], type)
            with suppress_warnings() as sup:
                sup.filter(RuntimeWarning, "Mean of empty slice")
                output = ndimage.variance(input)
            assert_(np.isnan(output))
    finally:
        np.seterr(**olderr)
Beispiel #29
0
def test_variance01():
    olderr = np.seterr(all='ignore')
    try:
        for type in types:
            input = np.array([], type)
            with suppress_warnings() as sup:
                sup.filter(RuntimeWarning, "Mean of empty slice")
                output = ndimage.variance(input)
            assert_(np.isnan(output))
    finally:
        np.seterr(**olderr)
Beispiel #30
0
 def calculate_laplacian(self):
     for image_name, image_path in utils.folder_reader(self.frames_raw):
         image_read = io.imread(os.path.join(image_path, image_name))
         gray = rgb2gray(image_read)
         gauss = gaussian(gray)
         fm = laplace(gauss, ksize=3)
         # Output
         self.fm_list.append(fm)
         self.variance_list.append(variance(fm) * 1000)
         self.max_list.append(np.amax(fm))
         os.makedirs(self.frames_blur, exist_ok=True)
def test_variance01():
    olderr = np.seterr(all='ignore')
    try:
        with warnings.catch_warnings():
            # Numpy 1.9 gives warnings for mean([])
            warnings.filterwarnings('ignore', message="Mean of empty slice.")
            for type in types:
                input = np.array([], type)
                output = ndimage.variance(input)
                assert_(np.isnan(output))
    finally:
        np.seterr(**olderr)
Beispiel #32
0
def test_variance01():
    olderr = np.seterr(all='ignore')
    try:
        with warnings.catch_warnings():
            # Numpy 1.9 gives warnings for mean([])
            warnings.filterwarnings('ignore', message="Mean of empty slice.")
            for type in types:
                input = np.array([], type)
                output = ndimage.variance(input)
                assert_(np.isnan(output))
    finally:
        np.seterr(**olderr)
Beispiel #33
0
    def run(self, ips, imgs, para = None):
        inten = WindowsManager.get(para['inten']).ips
        if not para['slice']:
            imgs = [inten.img]
            msks = [ips.img]
        else: 
            msks = ips.imgs
            if len(msks)==1:
                msks *= len(imgs)
        buf = imgs[0].astype(np.uint16)
        strc = ndimage.generate_binary_structure(2, 1 if para['con']=='4-connect' else 2)
        idct = ['Max','Min','Mean','Variance','Standard','Sum']
        key = {'Max':'max','Min':'min','Mean':'mean',
               'Variance':'var','Standard':'std','Sum':'sum'}
        idct = [i for i in idct if para[key[i]]]
        titles = ['Slice', 'ID'][0 if para['slice'] else 1:] 
        if para['center']: titles.extend(['Center-X','Center-Y'])
        if para['extent']: titles.extend(['Min-Y','Min-X','Max-Y','Max-X'])
        titles.extend(idct)
        k = ips.unit[0]
        data, mark = [], []
        for i in range(len(imgs)):
            n = ndimage.label(msks[i], strc, output=buf)
            index = range(1, n+1)
            dt = []
            if para['slice']:dt.append([i]*n)
            dt.append(range(n))
            
            xy = ndimage.center_of_mass(imgs[i], buf, index)
            xy = np.array(xy).round(2).T
            if para['center']:dt.extend([xy[1]*k, xy[0]*k])

            boxs = [None] * n
            if para['extent']:
                boxs = ndimage.find_objects(buf)
                boxs = [(i[0].start, i[1].start, i[0].stop, i[1].stop) for i in boxs]
                for j in (0,1,2,3):
                    dt.append([i[j]*k for i in boxs])
            if para['max']:dt.append(ndimage.maximum(imgs[i], buf, index).round(2))
            if para['min']:dt.append(ndimage.minimum(imgs[i], buf, index).round(2))        
            if para['mean']:dt.append(ndimage.mean(imgs[i], buf, index).round(2))
            if para['var']:dt.append(ndimage.variance(imgs[i], buf, index).round(2)) 
            if para['std']:dt.append(ndimage.standard_deviation(imgs[i], buf, index).round(2))
            if para['sum']:dt.append(ndimage.sum(imgs[i], buf, index).round(2))      

            mark.append([(center, cov) for center,cov in zip(xy.T, boxs)]) 
            data.extend(list(zip(*dt)))

        IPy.table(inten.title+'-region statistic', data, titles)
        inten.mark = Mark(mark)
        inten.update = True
Beispiel #34
0
def procSEG(rawdata, a_tp, d_args):
    i_res = d_args['s']
    i_slice = d_args['t']
    i_mode = d_args['m']
    shape = (i_slice, i_res * i_res)
    ndata = npy.zeros(i_res * i_res * i_slice, dtype=npy.float).reshape(shape)
    # find cell boundary use stdev and average projections for K-means thresholding on normalized image stacks
    nave = clt.vq.whiten(npy.average(rawdata, axis=0))
    tabck, tacel = npy.sort(clt.vq.kmeans(nave, 2)[0])
    th1 = 1.75 * tabck + tacel * 0.25
    th2 = 1.25 * tabck + tacel * 0.75
    # Whole cell Thresholding
    obck = npy.where(nave < th1, 1, 0)
    ocel = npy.where(nave < th1, 0, 1)
    ncel = len(ocel)
    # At this point i would be possible to segment the images further with call scipy watershedding and labeling
    # to generate separate regions of interest for each cell with in the image. However this would have knock on
    # effect on distribution of pixels in mpi. Also watershedding might need manual tuning to prevent shattering
    # cells
    # Calculate average focal pixel intensities and variance
    zabck = npy.zeros(i_slice, dtype=npy.float)
    s1bck = npy.zeros(i_slice, dtype=npy.float)
    for i in range(i_slice):
        s1bck[i] = ndi.variance(rawdata[i], obck)
        ndata[i] = rawdata[i] - ndi.mean(rawdata[i], obck)
    #
    # Cell
    #
    zacel = npy.zeros(i_slice, dtype=npy.float)
    s1cel = npy.zeros(i_slice, dtype=npy.float)
    for i in range(i_slice):
        zacel[i] = ndi.mean(ndata[i], ocel)
        s1cel[i] = npy.sqrt(ndi.variance(ndata[i], ocel) + s1bck[i])
    # initialize signal, error and time points
    # Fit cell average to get ball park figures
    initfit, tp, cf1, sig, se, fpar = procFitIt(zacel, s1cel, a_tp, i_mode,
                                                d_args, None)
    return [ocel.reshape((i_res, i_res)), initfit, tp, cf1, sig, se, fpar]
Beispiel #35
0
    def variance(self, objectPath):

        theObject = self.app.FindDisplayObject(objectPath).QueryInterface(
            ESVision.IImage)
        #var = self.prosys.Variance(theObject.Data).Real

        arr = np.array(theObject.Data.array)

        noiseReduced = ndimage.uniform_filter(arr, 5)

        var = ndimage.variance(noiseReduced)
        logging.info(var)

        return float(var)
Beispiel #36
0
 def cal(self, stat = 'mean'):
         if stat=='mean':
                 zonalstats = ndimage.mean(self.data, labels=self.lb, index=self.labSet)
         if stat=='minimum':
                 zonalstats = ndimage.minimum(self.data, labels=self.lb, index=self.labSet)
         if stat=='maximum':
                 zonalstats = ndimage.maximum(self.data, labels=self.lb, index=self.labSet)
         if stat=='sum':
                 zonalstats = ndimage.sum(self.data, labels=self.lb, index=self.labSet)
         if stat=='std':
                 zonalstats = ndimage.standard_deviation(self.data, labels=self.lb, index=self.labSet)
         if stat=='variance':
                 zonalstats = ndimage.variance(self.data, labels=self.lb, index=self.labSet)
         return zonalstats
def procSEG(rawdata,a_tp,d_args):
    i_res=d_args['s']
    i_slice=d_args['t']
    i_mode=d_args['m']
    shape=(i_slice,i_res*i_res)
    ndata=npy.zeros(i_res*i_res*i_slice,dtype=npy.float).reshape(shape)
    # find cell boundary use stdev and average projections for K-means thresholding on normalized image stacks
    nave=clt.vq.whiten(npy.average(rawdata,axis=0))
    tabck,tacel=npy.sort(clt.vq.kmeans(nave,2)[0])
    th1=1.75*tabck+tacel*0.25
    th2=1.25*tabck+tacel*0.75
    # Whole cell Thresholding 
    obck=npy.where(nave<th1,1,0)
    ocel=npy.where(nave<th1,0,1)
    ncel=len(ocel)
    # At this point i would be possible to segment the images further with call scipy watershedding and labeling
    # to generate separate regions of interest for each cell with in the image. However this would have knock on 
    # effect on distribution of pixels in mpi. Also watershedding might need manual tuning to prevent shattering 
    # cells 
    # Calculate average focal pixel intensities and variance
    zabck=npy.zeros(i_slice,dtype=npy.float)
    s1bck=npy.zeros(i_slice,dtype=npy.float)
    for i in range(i_slice):
        s1bck[i]=ndi.variance(rawdata[i],obck)
        ndata[i]=rawdata[i]-ndi.mean(rawdata[i],obck)
    #
    # Cell 
    #
    zacel=npy.zeros(i_slice,dtype=npy.float)
    s1cel=npy.zeros(i_slice,dtype=npy.float)
    for i in range(i_slice):
        zacel[i]=ndi.mean(ndata[i],ocel)
        s1cel[i]=npy.sqrt(ndi.variance(ndata[i],ocel)+s1bck[i])
    # initialize signal, error and time points
    # Fit cell average to get ball park figures
    initfit,tp,cf1,sig,se,fpar=procFitIt(zacel,s1cel,a_tp,i_mode,d_args,None)
    return [ocel.reshape((i_res,i_res)),initfit,tp,cf1,sig,se,fpar]
Beispiel #38
0
def rolling_window_var(imarr, kernel):
    '''
    imarr: numpy array with shape (n, m) 
        one frame of video with n rows and m cols
    kernel: integer n
        kernel size, assumes kernel rows = cols

    var: numpy array with shape (n, m)
        local variance at each pixel within kernel neighborhood (mean or constant padding)
    '''
    imrow, imcol = imarr.shape
    imarr = np.pad(imarr, kernel // 2, pad_with, padder=np.mean(imarr))
    patches = image.extract_patches_2d(imarr, (kernel, kernel))
    var = np.array([ndimage.variance(patch) for patch in patches]).reshape(
        (imrow, imcol))
    return var
 def get_adaptive_reduction_filter(self, image):
     filter_h = 3
     img_var = int(ndimage.variance(image))
     filter_w = 3
     pad_h = int((filter_h - 1) / 2)
     pad_w = int((filter_w - 1) / 2)
     pad_img = np.pad(image, ((pad_h, pad_h), (pad_w, pad_w)),
                      'constant',
                      constant_values=0)
     height, width = image.shape
     filtered_image = np.zeros((height, width))
     for u in range(height):
         for v in range(width):
             temp_image = pad_img[u:(u + 3), v:(v + 3)]
             filtered_image[u][v] = self.reduction_filter(
                 temp_image, pad_img[u][v], filter_h, filter_w, img_var)
     return filtered_image
 def reduction_filter(self, image_slice, value, filter_h, filter_w,
                      variance):
     v = variance
     if v == 0:
         return value
     else:
         local_var = int(ndimage.variance(image_slice))
         m = image_slice.mean()
         if variance == local_var:
             self.local_count = self.local_count + 1
             return np.uint8(m)
         elif int(local_var) == 0:
             self.local_zero = self.local_zero + 1
             return value
         else:
             self.local_reduced_intensity = self.local_reduced_intensity + 1
             return (value - (v / local_var) * (value - m))
Beispiel #41
0
    def adaptive_reduction(self, filter_type):
        self.filter_type = filter_type
        self.get_filter_size() #this will get height and width of filter

        filtered_image = np.zeros(shape=(self.height, self.width))
        pad_h = int(1 / 2 * (self.filter_h - 1))
        pad_w = int(1 / 2 * (self.filter_w - 1))
        image_pad = np.pad(self.PIL_image, ((pad_h, pad_h), (pad_w, pad_w)), 'constant', constant_values=0)
        variance_cal = np.zeros(shape=(self.height, self.width)) #create a 2-D array to calculate noise variance
            #compute noise variance
        for h in range(self.height):
            for w in range(self.width):
                 vert_start = h
                 vert_end = h + self.filter_h
                 horiz_start = w
                 horiz_end = w + self.filter_w
                 image_slice = image_pad[vert_start:vert_end, horiz_start:horiz_end]
                 variance_cal[h,w] = ndimage.variance(image_slice)

        noise_variance = np.mean(variance_cal)
        for h in range(self.height):
            for w in range(self.width):  # check each pixel
                vert_start = h
                vert_end = h + self.filter_h
                horiz_start = w
                horiz_end = w + self.filter_w
                image_slice = image_pad[vert_start:vert_end, horiz_start:horiz_end]

                if (noise_variance == 0):
                    filtered_image[h, w] = self.array_image[h, w]
                else:
                    local_variance = variance_cal[h,w]  # the local variance of image slice
                    mean = np.mean(image_slice)   # the mean or average

                    if (noise_variance == local_variance):
                        filtered_image[h,w] =  np.uint8(mean)
                    elif (int(local_variance) == 0):
                        filtered_image[h, w] = self.array_image[h, w]
                    else:
                        filtered_image[h, w] = np.uint8(self.array_image[h, w] - (noise_variance) / (local_variance) * (self.array_image[h, w] - mean))

        self.filtered_image_result = self.full_contrast_stretch(filtered_image)
        self.save_image()
        self.display_image()
Beispiel #42
0
def ndstats(data,sigma=0,bias=0,xbin=None,errs=None,bin=100,frac_min=0.75,step=None,loud=1):
    '''regroups values by "bin" bins using ndimage library
    also the x-bins and errors if provided
    other parameters as in extra.stats
    '''
    from scipy import ndimage
    global avgs,xpos
    if sigma!=None: sele=mask_data(data,bias,sigma)
    else: sele=ones(len(data),dtype=bool)
    global labs,cnts
    if step!=None and xbin!=None:
        if step<0:
            from numpy import median
            step=bin*median(xbin[1:]-xbin[:-1])
        labs=((xbin-xbin[0])/step).astype(int32)
        if sum(labs<0)>0:
            print('x-axis not rising')
            return [],[],[]
        bin=int(len(xbin)/labs[-1])
        if loud: print('using step %.3f, grouping in average by %i bins'%(step,bin))
    else: labs=(arange(len(data))/bin).astype(int32)
    if sigma!=None: labs[sele==False]=-1
    cnts=zeros((max(labs)+1,))
    if loud>1: print("labels [%i-%i], length %i"%(min(labs),max(labs),len(cnts)))
    for l in labs[sele]:
        cnts[l]+=1
    idx=arange(len(cnts))[cnts>=bin*frac_min]
    if len(idx)<1:
        print("all bins empty")
        return None
    else:
        if loud>0: print("%i bin(s) empty"%(len(cnts)-len(idx)))
    #print 'max. index %i'%(labs[-1])
    avgs=ndimage.mean(data,labs,idx)
    if errs!=None: 
        errs=sqrt(array(ndimage.mean(errs**2,labs,idx))/bin)
    else: errs=sqrt(array(ndimage.variance(data,labs,idx)))
    if xbin==None: xbin=arange(len(data))
    xpos=ndimage.mean(xbin,labs,idx)
    #print 'check: data %i -> avgs %i labs %i idx %i'%(len(data),len(avgs),len(labs),len(xpos))
    return array(avgs),errs,array(xpos)
Beispiel #43
0
def test_stat_funcs_2d():
    a = np.array([[5,6,0,0,0], [8,9,0,0,0], [0,0,0,3,5]])
    lbl = np.array([[1,1,0,0,0], [1,1,0,0,0], [0,0,0,2,2]])

    mean = ndimage.mean(a, labels=lbl, index=[1, 2])
    assert_array_equal(mean, [7.0, 4.0])

    var = ndimage.variance(a, labels=lbl, index=[1, 2])
    assert_array_equal(var, [2.5, 1.0])

    std = ndimage.standard_deviation(a, labels=lbl, index=[1, 2])
    assert_array_almost_equal(std, np.sqrt([2.5, 1.0]))

    med = ndimage.median(a, labels=lbl, index=[1, 2])
    assert_array_equal(med, [7.0, 4.0])

    min = ndimage.minimum(a, labels=lbl, index=[1, 2])
    assert_array_equal(min, [5, 3])

    max = ndimage.maximum(a, labels=lbl, index=[1, 2])
    assert_array_equal(max, [9, 5])
Beispiel #44
0
        def anz(self):
                self.result = {'id':list(self.labSet),
                         'mean':[ round(x, 4) for x in list(ndimage.mean(self.data, labels=self.lb, index=self.labSet))],
                         'min':list(ndimage.minimum(self.data, labels=self.lb, index=self.labSet)),
                         'max':list(ndimage.maximum(self.data, labels=self.lb, index=self.labSet)),
                         'std':list(ndimage.variance(self.data, labels=self.lb, index=self.labSet))
                         }
                #print self.result['id']
                #print self.result['min']
                #print len(self.result['min'])
                self.df = pd.DataFrame(self.result)
		self.df = self.df[self.df['id']>0 ]
                self.df.set_index(self.df['id'])
               	
		# save each zonal ouput ...TODO
                # self.outname = self._inDs[:-4]+'.csv'
                # f = open(self.outname, 'w')
                # self.df.to_csv( f, index=False )
                # f.close()
		print self.df.iloc[0:5, ]

                return self.df
Beispiel #45
0
def TetaSpectra(image):
	imNorm = image-np.mean(np.mean(image))
	[length, width]=imNorm.shape
	Ipq=length*width*(np.absolute(np.fft.fftshift(np.fft.fft2(imNorm)))**2)

	CentralPoint=[Ipq.shape[0]/2,Ipq.shape[1]/2]
	a=[]
	sigma=ndimage.variance(imNorm)
	for i in range(0,180,1):
		a.append([0,0])

	for i in range(Ipq.shape[0]):
		for j in range(Ipq.shape[1]/2+1):
			di=i-CentralPoint[0]
			dj=CentralPoint[1]-j
			if dj==0 :
				teta=0
			else :
				tmp=2*np.arctan(dj/(di+np.sqrt(di**2+dj**2)))
				teta=int(np.degrees(tmp))
			a[teta]=[a[teta][0]+1, a[teta][1]+Ipq[i,j]]

	angularSpectrum=[]
	for i in range(len(a)):
		if a[i][0]!=0 :
			angularSpectrum.append(a[i][1]/a[i][0])
		else:
			angularSpectrum.append(0)

	# fig = plt.figure()
	# ax = fig.add_subplot(111)
	# ax.plot(range(len(angularSpectrum)), (angularSpectrum))
	# plt.xlabel("Teta")
	# plt.ylabel("Angular Spectrum")
	# plt.show()

	return angularSpectrum/sigma**2
Beispiel #46
0
def toPolarCoordinates(Ipq, imNorm):
	CentralPoint=[Ipq.shape[0]/2,Ipq.shape[1]/2]
	r=[]
	a=[]
	sigma=ndimage.variance(imNorm)
	for i in range(int(np.around(Ipq.shape[0]/2*np.sqrt(2)))+1):
		r.append([0,0])
	for i in range(0,180,1):
		a.append([0,0])

	for i in range(Ipq.shape[0]):
		for j in range(Ipq.shape[1]/2+1):
			di=i-CentralPoint[0]
			dj=CentralPoint[1]-j
			
			dist=int(np.sqrt(di**2+dj**2))
			r[dist]=[r[dist][0]+1, r[dist][1]+Ipq[i,j]]
			if dj==0 :
				teta=0
			else :
				tmp=2*np.arctan(dj/(di+np.sqrt(di**2+dj**2)))
				teta=int(np.degrees(tmp))
			a[teta]=[a[teta][0]+1, a[teta][1]+Ipq[i,j]]

	radialSpectrum=[]
	angularSpectrum=[]
	for i in range(len(r)):
		if r[i][0]!=0 :
			radialSpectrum.append(r[i][1]/r[i][0])
		else:
			radialSpectrum.append(0)
	for i in range(len(a)):
		if a[i][0]!=0 :
			angularSpectrum.append(a[i][1]/a[i][0])
		else:
			angularSpectrum.append(0)
	return radialSpectrum/sigma**2, angularSpectrum/sigma**2
def extractPatch1(img):
    return filter(lambda y: y.shape[0]*y.shape[1]==PS*PS, [standardizePatch(img[x/64:x/64+PS,x%64:x%64+PS]) for x in range(64*64) \
    if ndimage.variance(img[x/64:x/64+PS,x%64:x%64+PS]) > VR*ndimage.variance(img)])
Beispiel #48
0
def objstats(args):
    # Open and read from image and segmentation
    try:
        img_ds = gdal.Open(args.image, gdal.GA_ReadOnly)
    except:
        logger.error("Could not open image: {}".format(i=args.image))
        sys.exit(1)

    try:
        seg_ds = ogr.Open(args.segment, 0)
        seg_layer = seg_ds.GetLayer()
    except:
        logger.error("Could not open segmentation vector file: {}".format(args.segment))
        sys.exit(1)

    cols, rows = img_ds.RasterXSize, img_ds.RasterYSize
    bands = range(1, img_ds.RasterCount + 1)
    if args.bands is not None:
        bands = args.bands

    # Rasterize segments
    logger.debug("About to rasterize segment vector file")
    img_srs = osr.SpatialReference()
    img_srs.ImportFromWkt(img_ds.GetProjectionRef())

    mem_raster = gdal.GetDriverByName("MEM").Create("", cols, rows, 1, gdal.GDT_UInt32)
    mem_raster.SetProjection(img_ds.GetProjection())
    mem_raster.SetGeoTransform(img_ds.GetGeoTransform())

    # Create artificial 'FID' field
    fid_layer = seg_ds.ExecuteSQL('select FID, * from "{l}"'.format(l=seg_layer.GetName()))
    gdal.RasterizeLayer(mem_raster, [1], fid_layer, options=["ATTRIBUTE=FID"])
    logger.debug("Rasterized segment vector file")

    seg = mem_raster.GetRasterBand(1).ReadAsArray()
    logger.debug("Read segmentation image into memory")
    mem_raster = None
    seg_ds = None

    # Get list of unique segments
    useg = np.unique(seg)

    # If calc is num, do only for 1 band
    out_bands = 0
    for stat in args.stat:
        if stat == "num":
            out_bands += 1
        else:
            out_bands += len(bands)

    # Create output driver
    driver = gdal.GetDriverByName(args.format)
    out_ds = driver.Create(args.output, cols, rows, out_bands, gdal.GDT_Float32)

    # Loop through image bands
    out_b = 0
    out_2d = np.empty_like(seg, dtype=np.float32)
    for i_b, b in enumerate(bands):
        img_band = img_ds.GetRasterBand(b)
        ndv = img_band.GetNoDataValue()
        band_name = img_band.GetDescription()
        if not band_name:
            band_name = "Band {i}".format(i=b)
        logger.info('Processing input band {i}, "{b}"'.format(i=b, b=band_name))

        img = img_band.ReadAsArray().astype(gdal_array.GDALTypeCodeToNumericTypeCode(img_band.DataType))
        logger.debug('Read image band {i}, "{b}" into memory'.format(i=b, b=band_name))

        for stat in args.stat:
            logger.debug("    calculating {s}".format(s=stat))
            if stat == "mean":
                out = ndimage.mean(img, seg, useg)
            elif stat == "var":
                out = ndimage.variance(img, seg, useg)
            elif stat == "num":
                # Remove from list of stats so it is only calculated once
                args.stat.remove("num")
                count = np.ones_like(seg)
                out = ndimage.sum(count, seg, useg)
            elif stat == "sum":
                out = ndimage.sum(img, seg, useg)
            elif stat == "min":
                out = ndimage.minimum(img, seg, useg)
            elif stat == "max":
                out = ndimage.maximum(img, seg, useg)
            elif stat == "mode":
                out = ndimage.labeled_comprehension(img, seg, useg, scipy_mode, out_2d.dtype, ndv)
            else:
                logger.error("Unknown stat. Not sure how you got here")
                sys.exit(1)

            # Transform to 2D
            out_2d = out[seg - seg.min()]

            # Fill in NDV
            if ndv is not None:
                out_2d[np.where(img == ndv)] = ndv

            # Write out the data
            out_band = out_ds.GetRasterBand(out_b + 1)
            out_band.SetDescription(band_name)
            if ndv is not None:
                out_band.SetNoDataValue(ndv)
            logger.debug("    Writing object statistic for band {b}".format(b=b + 1))
            out_band.WriteArray(out_2d, 0, 0)
            out_band.FlushCache()
            logger.debug("    Wrote out object statistic for band {b}".format(b=b + 1))
            out_b += 1

    out_ds.SetGeoTransform(img_ds.GetGeoTransform())
    out_ds.SetProjection(img_ds.GetProjection())

    img_ds = None
    seg_ds = None
    out_ds = None
    logger.info("Completed object statistic calculation")
Beispiel #49
0
def test_variance04():
    input = np.array([1, 0], bool)
    output = ndimage.variance(input)
    assert_almost_equal(output, 0.25)
Beispiel #50
0
def test_variance03():
    for type in types:
        input = np.array([1, 3], type)
        output = ndimage.variance(input)
        assert_almost_equal(output, 1.0)
def standardizePatch(im):
    s1=[(x-ndimage.mean(im))/(ndimage.variance(im)+0.01) for x in im]
    return  np.reshape(np.asarray([item for x in s1 for item in x]),(im.shape[0],im.shape[1]))     
Beispiel #52
0
def test_variance02():
    "variance 2"
    for type in types:
        input = np.array([1], type)
        output = ndimage.variance(input)
        assert_almost_equal(output, 0.0)
def test_variance01():
    "variance 1"
    for type in types:
        input = np.array([], type)
        output = ndimage.variance(input)
        assert_(np.isnan(output))
def calc_region_attributes(org_img, label_img, dsm = None, dtm = None):
    """
    CALCULATES OBJECT ATTRIBUTES FOR EACH REGION AND RETURNS A RAT
    
    org_img = orginal RGB image
    label_img = relabeld image (output from replace function)
    dsm = dsm of area (optional)
    dtm = dtm of area (optional)
    
    output = raster attribute table as numpy array
    
    """
    # Define index numbers
    index = np.unique(label_img)

# Calculate GRVI and NDVI
    
    if args.att == 'rgb':
        print("Calculating Greennes..")
        grvi = 200*(org_img[:,:,1]*100.0) / (org_img[:,:,1]*100.0 + org_img[:,:,0]*100.0 + org_img[:,:,2]*100.0)
        mean_grvi = ndimage.mean(grvi, labels = label_img , index = index)
        
    if args.att == 'rgbnir':
        print("Calculating NDVI and Greennes..")
        grvi = 200*(org_img[:,:,1]*100.0) / (org_img[:,:,1]*100.0 + org_img[:,:,0]*100.0 + org_img[:,:,2]*100.0)
        mean_grvi = ndimage.mean(grvi, labels = label_img , index = index)
    
        ndvi = 100*(org_img[:,:,3]*100.0 - org_img[:,:,0]*100.0) / (org_img[:,:,3]*100.0 + org_img[:,:,0]*100.0) + 100
        mean_ndvi = ndimage.mean(ndvi, labels = label_img , index = index)

    # Calculate mean for all bands and heights and append to array
    if args.att == 'rgb':    
        w, h = 3, len(index)
    if args.att == 'rgbnir':
        w, h = 4, len(index)
    meanbands = np.zeros((h, w))
    varbands = np.zeros((h, w))
        
    if args.att == 'rgb':
        print("Averaging spectral RGB bands:")
        for i in tqdm.tqdm(xrange(0,3)):
            meanbands[:,i] = ndimage.mean(org_img[:,:,i].astype(float), labels = label_img, index = index)
            varbands[:,i] = ndimage.variance(org_img[:,:,i].astype(float), labels = label_img, index = index)
    else:
        print("Averaging spectral RGBNIR bands:")
        for i in tqdm.tqdm(xrange(0,4)):
            meanbands[:,i] = ndimage.mean(org_img[:,:,i].astype(float), labels = label_img, index = index)
            varbands[:,i] = ndimage.variance(org_img[:,:,i].astype(float), labels = label_img, index = index)
            
    print("Calculating ratios:")
    #r/g
    redgreen = meanbands[:,0] / meanbands[:,1]
    #g/r
    greenred = meanbands[:,1] / meanbands[:,0]
    #Blue/Nir
    bluenir = meanbands[:,2] / meanbands[:,3]
    #Green/Nir
    greennir = meanbands[:,1] / meanbands[:,3]

    ratios = np.vstack((redgreen, greenred, bluenir, greennir)).T
            
    print("Calculating roughness..")
    if os.path.exists(args.output+'/'+imagenamenoext[0]+'_texture.tif') == True:
        texture = tiff.imread(args.output+'/'+imagenamenoext[0]+'_texture.tif')
    else:
        call('gdaldem roughness '+args.image+' '+args.output+'/'+imagenamenoext[0]+'_texture.tif -of GTiff -b 1')
        texture = tiff.imread(args.output+'/'+imagenamenoext[0]+'_texture.tif')
    blurred = gaussian_filter(texture, sigma=7) # apply gaussian blur for smoothing
    meantexture = np.zeros((len(index), 1))
    meantexture[:,0] = ndimage.mean(blurred.astype(float), labels = label_img, index = index)
    
    print("Calculating vissible brightness..")
    vissiblebright = (org_img[:,:,0] + org_img[:,:,1] + org_img[:,:,2]) / 3
    meanbrightness = np.zeros((len(index), 1))
    meanbrightness[:,0] = ndimage.mean(vissiblebright.astype(float), labels = label_img, index = index)
    
    print("Calculating region props:")
    # Define the regions props and calulcate
    regionprops_per = []
    regionprops_ar = []
    regionprops_ex = []
    regionprops_ecc = []
    
    for region in tqdm.tqdm(regionprops(label_img)):
        regionprops_per.append(region.perimeter)
        regionprops_ar.append(region.area)
        regionprops_ex.append(region.extent)
        regionprops_ecc.append(region.eccentricity)

    # Convert regionprops results to numpy array
    regionprops_calc = np.vstack((regionprops_per, regionprops_ar, regionprops_ex, regionprops_ecc)).T

    # Calculate coordinate attributes of regions
    regionprops_cen = []
    regionprops_coor = []
    
    print("Calculating region coordinates:")
    for region in tqdm.tqdm(regionprops(label_img)):
        regionprops_cen.append(region.centroid)
        regionprops_coor.append(region.coords)
        
    # Calculate distance between points
    xcoords = []
    ycoords = []
    print("Calculating distance between points:")
    for i in tqdm.tqdm((xrange(len(regionprops_cen)))):
        xcoords.append(regionprops_cen[i][0])
        ycoords.append(regionprops_cen[i][1])
        
    xcoords = np.asarray(xcoords)
    ycoords = np.asarray(ycoords)
    
    # Collect all data in 1 np array
    if dsm != None:
        # Calculate crop surface model
        print("Calculating crop surface model..")
        csm = dsm.astype(float) - dtm.astype(float)     
        heights = np.dstack((dsm, dtm, csm))
    
        # Resample heights
        print("Resampling heights..")
        heights_res = ndimage.zoom(heights, (2, 2, 1))
        meanheight = np.zeros((h, w))
        meanheight[:,i] = ndimage.mean(heights_res[:,:,i].astype(float), labels = label_img, index = index)
        
        data = np.concatenate((meanbands, varbands, ratios, meanheight, regionprops_calc, meantexture, meanbrightness), axis=1)
        if np.shape(org_img)[2] == 3:
            data2 = np.column_stack(((index.astype(int), regionslist, mean_grvi, data)))
        else:
            data2 = np.column_stack(((index.astype(int), regionslist, mean_ndvi, mean_grvi, data)))
        print("Data collected, attributes calculated..all done!")

    else:
        data = np.concatenate((meanbands, varbands, ratios, regionprops_calc, meantexture, meanbrightness), axis=1)
        if args.att == 'rgb':
            print("grvi, data")
            data2 = np.column_stack(((index.astype(int), regionslist, mean_grvi, data)))
        else:
            print("ndvi, grvi, data")
            data2 = np.column_stack(((index.astype(int), regionslist, mean_ndvi, mean_grvi, data)))
        print("Data collected, attributes calculated..all done!")
        
    return(data2)