Пример #1
0
def diff_data(inarray):
    """ Function calculates the Differential Uniformity in a
        horizontal and vertical range of five pixels with maximum contrast.

        Returns contrast percentages and upper-left pixel of the locations
        with highest contrast.
    """
    outputx = []
    outputy = []

    # convenient method to create pairs of 5 pixels in vert and horiz direction
    # by shifting the array 5 times in vert/horiz direction, and concatenating
    # the flattened shifted arrays. In order to correctly identify the first
    # pixel of the group of five pixels with the highest nonuniformity, the range
    # is running from -4..0
    for i in range(-4, 1):
        tmpx = np.roll(inarray, i, 0)
        tmpy = np.roll(inarray, i, 1)

        outputx.append(tmpx.ravel())
        outputy.append(tmpy.ravel())

    difflistx = []
    difflisty = []

    # calculate the uniformity inside all possible pairs of five concurrent pixels
    # in vertical direction:
    for vector in np.ma.array(outputx).T:
        # ignore groups of five pixels (partly) outside the mask
        if np.ma.count_masked(vector) == 0:
            difflistx.append(100 * unifcalc(vector))
            #difflistx.append(100*(ma.max(vector) - ma.min(vector))/(ma.max(vector) + ma.min(vector)))
        else:
            difflistx.append(0.0)

    img = np.reshape(np.array(difflistx), np.shape(inarray))
    vertmax = ndimage.maximum_position(img)

    # calculate the uniformity inside all possible pairs of five concurrent pixels
    # in horizontal direction:
    for vector in np.ma.array(outputy).T:
        # ignore groups of five pixels (partly) outside the mask
        if np.ma.count_masked(vector) == 0:
            difflisty.append(100 * unifcalc(vector))
        else:
            difflisty.append(0.0)

    img = np.reshape(np.array(difflisty), np.shape(inarray))
    hormax = ndimage.maximum_position(img)

    highx = np.max(difflistx)
    # lowest DU in vert direction, larger than zero
    lowx = np.min(np.array(difflistx)[np.nonzero(difflistx)])

    highy = np.max(difflisty)
    # lowest DU in horiz direction, larger than zero
    lowy = np.min(np.array(difflisty)[np.nonzero(difflisty)])

    #return highx, highy, difflistx,difflisty
    return highx, highy, vertmax, hormax
Пример #2
0
def kspaceshift(ksp):
    """kspaceshift
    Shift k-space data to centre maximum
    """
    print "K-space shift ", ksp.shape
    if len(ksp.shape) == 3:
        kmax = np.array(ndimage.maximum_position(np.abs(ksp)))
        siz = np.array(ksp.shape[0:3])
        sub = (siz / 2.).astype(int) - kmax
        print "Shifting kspace ", sub
        for x in xrange(0, 3):
            if sub[x] != 0:
                ksp = np.roll(ksp, sub[x], axis=x)
            print ""
    else:
        kmax = np.array(
            ndimage.maximum_position(np.squeeze(np.abs(ksp[:, :, :, 0, 0]))))
        siz = np.array(ksp.shape[0:3])
        sub = (siz / 2.).astype(int) - kmax
        for echo in xrange(0, ksp.shape[4]):
            for nchannel in xrange(0, ksp.shape[3]):
                print "Shifting kspace ", sub
                for x in xrange(0, 3):
                    if sub[x] != 0:
                        ksp[:, :, :, nchannel, echo] = np.roll(
                            ksp[:, :, :, nchannel, echo], sub[x], axis=x)
                    # print ""
    return ksp
Пример #3
0
def cut_psf_to_match(filename, outname=None, cutsize=79):
    """
        make a cutout from some PSF (presumably from Tiny Tim?)
        to use with MCFOST
    """
    input = pyfits.open(filename)

    if outname is None:
        outname="mcfost_"+filename

    # find the location of the PSF peak
    mx, my = ndimage.maximum_position(input[0].data)

    cut = input[0].data[mx-cutsize/2:mx-cutsize/2+cutsize, my-cutsize/2:my-cutsize/2+cutsize]
    input[0].data=cut

    mx2, my2 = ndimage.maximum_position(cut)
    input[0].header.update('CENTER_X', mx2, comment="PSF Center")
    input[0].header.update('CENTER_Y', my2, comment="PSF Center")
    input[0].header.add_history("  Created by mcfost.cut_psf_to_match (Python) ")
    input[0].header.add_history("  Created from "+filename)

    input.writeto(outname)

    print "Cutout "+str(cutsize)+" pixels across writen to "+outname
Пример #4
0
def imageshift(image1, image2):
    """imageshift
    Shift image2 into same arrangement as image 1 using the maximum
    value position. Return the shifted image2
    """
    print "Image shift ", image.shape, image2.shape
    i1max = np.array(ndimage.maximum_position(np.abs(image1)))
    i2max = np.array(ndimage.maximum_position(np.abs(image2)))
    siz = np.array(image1.shape[0:3])
    sub = i1max - i2max
    print "Shifting image ", sub, "size ", siz
    for x in xrange(0, 3):
        image2 = np.roll(image2, sub[x], axis=x)
    print ""
    return image2
Пример #5
0
    def run(self, ips, imgs, para=None):
        lab = WindowsManager.get(para['lab']).ips.get_img()
        if lab.dtype != np.uint8 and lab.dtype != np.uint16:
            IPy.alert('Label image must be in type 8-bit or 16-bit')
            return
        index = range(1, lab.max() + 1)
        titles = ['Center-X', 'Center-Y', 'Max-X', 'Max-Y', 'Min-X', 'Min-Y']
        key = {
            'Max-X': 'max',
            'Max-Y': 'max',
            'Min-X': 'min',
            'Min-Y': 'min',
            'Center-X': 'center',
            'Center-Y': 'center'
        }
        titles = ['value'] + [i for i in titles if para[key[i]]]

        data = [index]
        img = ips.get_img()
        if img is lab: img = img > 0
        if para['center']:
            pos = np.round(ndimage.center_of_mass(img, lab, index), 2)
            data.append(pos[:, 0])
            data.append(pos[:, 1])
        if para['max']:
            pos = np.round(ndimage.minimum_position(img, lab, index), 2)
            data.append(pos[:, 0])
            data.append(pos[:, 1])
        if para['min']:
            pos = np.round(ndimage.maximum_position(img, lab, index), 2)
            data.append(pos[:, 0])
            data.append(pos[:, 1])
        data = zip(*data)
        IPy.table(ips.title + '-position', data, titles)
Пример #6
0
def test_maximum_position03():
    "maximum position 3"
    input = np.array([[5, 4, 2, 5],
                            [3, 7, 8, 2],
                            [1, 5, 1, 1]], bool)
    output = ndimage.maximum_position(input)
    assert_equal(output, (0, 0))
Пример #7
0
def test_maximum_position05():
    "maximum position 5"
    labels = [1, 2, 0, 4]
    for type in types:
        input = np.array([[5, 4, 2, 5], [3, 7, 8, 2], [1, 5, 1, 1]], type)
        output = ndimage.maximum_position(input, labels, 1)
        assert_equal(output, (0, 0))
Пример #8
0
def test_maximum_position06():
    labels = [1, 2, 0, 4]
    for type in types:
        input = np.array([[5, 4, 2, 5], [3, 7, 8, 2], [1, 5, 1, 1]], type)
        output = ndimage.maximum_position(input, labels, [1, 2])
        assert_equal(output[0], (0, 0))
        assert_equal(output[1], (1, 1))
Пример #9
0
def test_maximum_position03():
    "maximum position 3"
    input = np.array([[5, 4, 2, 5],
                            [3, 7, 8, 2],
                            [1, 5, 1, 1]], bool)
    output = ndimage.maximum_position(input)
    assert_equal(output, (0, 0))
Пример #10
0
def test_maximum_position01():
    labels = np.array([1, 0], bool)
    for type in types:
        input = np.array([[1, 2], [3, 4]], type)
        output = ndimage.maximum_position(input,
                                                    labels=labels)
        assert_equal(output, (1, 0))
Пример #11
0
def test_maximum_position02():
    for type in types:
        input = np.array([[5, 4, 2, 5],
                                [3, 7, 8, 2],
                                [1, 5, 1, 1]], type)
        output = ndimage.maximum_position(input)
        assert_equal(output, (1, 2))
Пример #12
0
def estimate_pk_parms_2d(x, y, f, pktype):
    """
    Calculate initial parameter values for 2-dimensional peak fitting.

    Parameters
    ----------
    x : array_like
        (n, ) ndarray of coordinate positions for dimension 1
        (numpy.meshgrid formatting).
    y : array_like
        (n, ) ndarray of coordinate positions for dimension 2
        (numpy.meshgrid formatting).
    f : array_like
        (n, ) ndarray of intensity measurements at coordinate
        positions x and y.
    pktype : str
        type of analytic function that will be used to fit the data; current
        options are "gaussian", "gaussian_rot" (gaussian with arbitrary axes)
        and "split_pvoigt_rot" (split psuedo voigt with arbitrary axes).

    Returns
    -------
    p -- (m) ndarray containing initial guesses for parameters for the input
    peak type (see peakfunction help for more information).
    """

    bg0 = np.mean([f[0, 0], f[-1, 0], f[-1, -1], f[0, -1]])
    bg1x = (np.mean([f[-1, -1], f[0, -1]]) - np.mean([f[0, 0], f[-1, 0]])) \
        / (x[0, -1] - x[0, 0])
    bg1y = (np.mean([f[-1, -1], f[-1, 0]]) - np.mean([f[0, 0], f[0, -1]])) \
        / (y[-1, 0] - y[0, 0])

    fnobg = f - (bg0 + bg1x * x + bg1y * y)

    labels, numlabels = imgproc.label(fnobg > 0.5*np.max(fnobg))

    # looks for the largest peak
    areas = np.zeros(numlabels)
    for ii in np.arange(1, numlabels + 1, 1):
        areas[ii - 1] = np.sum(labels == ii)

    peakIndex = np.argmax(areas) + 1

    FWHMx = np.max(x[labels == peakIndex]) - np.min(x[labels == peakIndex])
    FWHMy = np.max(y[labels == peakIndex]) - np.min(y[labels == peakIndex])

    coords = imgproc.maximum_position(fnobg, labels=labels, index=peakIndex)
    A = imgproc.maximum(fnobg, labels=labels, index=peakIndex)
    x0 = x[coords]
    y0 = y[coords]

    if pktype == 'gaussian':
        p = [A, x0, y0, FWHMx, FWHMy, bg0, bg1x, bg1y]
    elif pktype == 'gaussian_rot':
        p = [A, x0, y0, FWHMx, FWHMy, 0., bg0, bg1x, bg1y]
    elif pktype == 'split_pvoigt_rot':
        p = [A, x0, y0, FWHMx, FWHMx, FWHMy, FWHMy,
             0.5, 0.5, 0.5, 0.5, 0., bg0, bg1x, bg1y]
    p = np.array(p)
    return p
Пример #13
0
def test_maximum_position01():
    labels = np.array([1, 0], bool)
    for type in types:
        input = np.array([[1, 2], [3, 4]], type)
        output = ndimage.maximum_position(input,
                                                    labels=labels)
        assert_equal(output, (1, 0))
Пример #14
0
def test_maximum_position02():
    for type in types:
        input = np.array([[5, 4, 2, 5],
                                [3, 7, 8, 2],
                                [1, 5, 1, 1]], type)
        output = ndimage.maximum_position(input)
        assert_equal(output, (1, 2))
Пример #15
0
 def run(self, ips, imgs, para=None):
     lab = WindowsManager.get(para['lab']).ips.get_img()
     if lab.dtype != np.uint8 and lab.dtype != np.uint16:
         IPy.alert('Label image must be in type 8-bit or 16-bit')
         return
     index = range(1, lab.max() + 1)
     data = [index]
     img = ips.get_img()
     if img is lab: img = img > 0
     if para['mode'] == 'Center':
         pos = np.round(ndimage.center_of_mass(img, lab, index), 2)[:, ::-1]
         data.append(pos[:, 0])
         data.append(pos[:, 1])
     if para['mode'] == 'Max':
         pos = np.round(ndimage.maximum_position(img, lab, index),
                        2)[:, ::-1]
         data.append(pos[:, 0])
         data.append(pos[:, 1])
     if para['mode'] == 'Min':
         pos = np.round(ndimage.minimum_position(img, lab, index),
                        2)[:, ::-1]
         data.append(pos[:, 0])
         data.append(pos[:, 1])
     body = [tuple(i) for i in pos]
     ips.roi = PointRoi(body)
Пример #16
0
def VegetationClassify(Elev_arr, River_arr): 

  from rpy2.robjects.packages import importr
  rpart = importr("rpart")

  # Read the dictionary from the pickle file
  pkl_file = open('decision_tree.pkl','rb')
  rpy2.set_default_mode(rpy2.NO_CONVERSION)
  traing_data = pickle.load(pkl_file)
  pkl_file.close()

  # Create Decision tree for predicting landcover class
  # create the decision tree using rpart 
  fit = rpart(formula='Class ~ Elevation + RiverDistance + Slope \
      + Aspect_x + Aspect_y',data = traing_data, method = "class")

  # calculate River distance using River_arr
  River_dist_arr = dist.CityBlock(River_arr)  
  # claculate slope and aspect
  (Slope_arr, Aspect_arr) = Slope_aspect.Slope_aspect(Elev_arr)

  (x_len, y_len) = Elev_arr.shape
  # Alloctae vegetation array for holding predicted landcover values
  Veg_arr = numpy.zeros((x_len, y_len), dtype = "uint8")

  # Normalize the elevation data
  minimum_elev = numpy.min(Elev_arr)
  factor = numpy.max(Elev_arr) - minimum_elev
  Elev_arr = (Elev_arr[:,:] - minimum_elev)*100/factor

  # Create various list to hold test data
  Elevation = []
  Slope = []
  RiverDistance = []
  Aspect_x = []
  Aspect_y = []

  # Append the data into respective list
  for i in range(0,x_len):
    for j in range(0,y_len):
      Elevation.append(int(Elev_arr[i][j]))
      Slope.append(int(Slope_arr[i][j]))
      RiverDistance.append(int(River_dist_arr[i][j]))
      Aspect_x.append(int(Aspect_arr[i][j][0]))
      Aspect_y.append(int(Aspect_arr[i][j][1]))

  # Create dictionary so as to apply R's predict command on it 
  Test_data ={'Elevation':Elevation ,'Slope':Slope ,'RiverDistance':RiverDistance,\
             'Aspect_x':Aspect_x,'Aspect_y':Aspect_y}

  rpy2.set_default_mode(rpy2.BASIC_CONVERSION)
  # values contain probability values of the predicted landcover classes
  values = rpy2.r.predict(fit,newdata=Test_data,method="class")
  for i in range(0,x_len):
    for j in range(0,y_len):
      # Get the class having max probability for each test data point
      a = ndimage.maximum_position(values[i*x_len + j])
      Veg_arr[i,j] = (a[0]*25) # Assign them some value to facilitate visualization
  return Veg_arr
Пример #17
0
def measure_labeled_regions(data,
                            labels,
                            tag='IMAGE',
                            measure_positions=True,
                            measure_values=True,
                            fits_offset=True,
                            bbox_offset=True):
    """Measure source properties in image.

    Sources are defined by a label image.

    Parameters
    ----------
    TODO

    Returns
    -------
    TODO
    """
    import scipy.ndimage as nd
    from astropy.table import Table, Column
    # Measure all segments
    nsegments = labels.max()
    index = np.arange(1, nsegments + 1)  # Measure all sources
    # Measure stuff
    sum = nd.sum(data, labels, index)
    max = nd.maximum(data, labels, index)
    mean = nd.mean(data, labels, index)
    x, y = _split_xys(nd.center_of_mass(data, labels, index))
    xpeak, ypeak = _split_xys(nd.maximum_position(data, labels, index))
    xmin, xmax, ymin, ymax = _split_slices(nd.find_objects(labels))
    area = _measure_area(labels)
    # Use FITS convention, i.e. start counting at 1
    FITS_OFFSET = 1 if fits_offset else 0
    # Use SExtractor convention, i.e. slice max is inside
    BBOX_OFFSET = -1 if bbox_offset else 0
    # Create a table
    table = Table()
    table.add_column(Column(data=index, name='NUMBER'))

    if measure_positions:
        table.add_column(Column(data=x + FITS_OFFSET, name='X_IMAGE'))
        table.add_column(Column(data=y + FITS_OFFSET, name='Y_IMAGE'))
        table.add_column(Column(data=xpeak + FITS_OFFSET, name='XPEAK_IMAGE'))
        table.add_column(Column(data=ypeak + FITS_OFFSET, name='YPEAK_IMAGE'))
        table.add_column(Column(data=xmin + FITS_OFFSET, name='XMIN_IMAGE'))
        table.add_column(
            Column(data=xmax + FITS_OFFSET + BBOX_OFFSET, name='XMAX_IMAGE'))
        table.add_column(Column(data=ymin + FITS_OFFSET, name='YMIN_IMAGE'))
        table.add_column(
            Column(data=ymax + FITS_OFFSET + BBOX_OFFSET, name='YMAX_IMAGE'))
        table.add_column(Column(data=area, name='AREA'))

    if measure_values:
        table.add_column(Column(data=max, name=tag + '_MAX'))
        table.add_column(Column(data=sum, name=tag + '_SUM'))
        table.add_column(Column(data=mean, name=tag + '_MEAN'))

    return table
Пример #18
0
def test_maximum_position05():
    labels = [1, 2, 0, 4]
    for type in types:
        input = np.array([[5, 4, 2, 5],
                                [3, 7, 8, 2],
                                [1, 5, 1, 1]], type)
        output = ndimage.maximum_position(input, labels, 1)
        assert_equal(output, (0, 0))
Пример #19
0
def test_maximum_position07():
    # Test float labels
    labels = np.array([1.0, 2.5, 0.0, 4.5])
    for type in types:
        input = np.array([[5, 4, 2, 5], [3, 7, 8, 2], [1, 5, 1, 1]], type)
        output = ndimage.maximum_position(input, labels, [1.0, 4.5])
        assert_equal(output[0], (0, 0))
        assert_equal(output[1], (0, 3))
Пример #20
0
def interpolate_image(img, frac, verbose):

    x = np.arange(0 + 0.5, img.shape[0] + 0.5,
                  1)  # put the coordinates in the center of pixels.
    y = np.arange(0 + 0.5, img.shape[1] + 0.5, 1)
    #fimg = interpolate.interp2d(x, y, img, kind='cubic') # this is 2d function of image
    fimg = interpolate.RectBivariateSpline(x, y, img, kx=3, ky=3)

    if verbose:
        print("original: ", img.shape)
        print(" expected center original: ", np.asarray(img.shape) // 2)
        print(" center original: ", ndimage.maximum_position(img))
        print(" sum original: ", np.nansum(img))

    center = np.asarray(img.shape) // 2 + 0.5
    xy = np.asarray(
        [np.arange(0 + 0.5, img.shape[ii] + 0.5, 1) for ii in range(2)])
    nsteps = np.floor((np.asarray(img.shape) // 2) / frac).astype("int")
    delta = np.asarray(
        [np.arange(frac, (nsteps[ii] + 1) * frac, frac) for ii in range(2)])
    xynew1 = np.asarray([center[ii] + delta[ii] for ii in range(2)])
    xynew2 = np.asarray([center[ii] - delta[ii] for ii in range(2)])
    xynew = np.asarray([
        np.concatenate(
            (np.flip(xynew2[ii]), np.asarray([center[ii]]), xynew1[ii]))
        for ii in range(2)
    ])

    img_interp = fimg(xynew[0], xynew[1]) * frac**2  # adjust sum

    if verbose:
        print("interpolated: ", img_interp.shape)
        print(" expected center interpolated: ",
              np.asarray(img_interp.shape) // 2)
        print(" center interpolated: ", ndimage.maximum_position(img_interp))
        print(" sum interpolated: ", np.nansum(img_interp))

    if not list(ndimage.maximum_position(img_interp)) == list(
            np.asarray(img_interp.shape) // 2):
        print("Warning: shift in center position possible")
        print("  expected center interpolated: ",
              np.asarray(img_interp.shape) // 2)
        print("  center interpolated: ", ndimage.maximum_position(img_interp))

    return (img_interp)
Пример #21
0
    def calculate_centroids_properties(self, shot, x, y, tof, tot, labels):
        """
        Calculates the properties of the centroids from labeled data points.

        ATTENTION! The order of the points can have an impact on the result due to errors in
        the floating point arithmetics.

        Very simple example:
        arr = np.random.random(100)
        arr.sum() - np.sort(arr).sum()
        This example shows that there is a very small difference between the two sums. The inaccuracy of
        floating point arithmetics can depend on the order of the values. Strongly simplified (3.2 + 3.4) + 2.7
        and 3.2 + (3.4 + 2.7) can be unequal for floating point numbers.

        Therefore there is no guarantee for strictly equal results. Even after sorting. The error we observed
        can be about 10^-22 nano seconds.

        Currently this is issue exists only for the TOF-column as the other columns are integer-based values.
        """
        label_index, cluster_size = np.unique(labels, return_counts=True)
        tot_max = np.array(
            nd.maximum_position(tot, labels=labels,
                                index=label_index)).flatten()

        tot_sum = nd.sum(tot, labels=labels, index=label_index)
        tot_mean = nd.mean(tot, labels=labels, index=label_index)
        cluster_x = np.array(
            nd.sum(x * tot, labels=labels, index=label_index) /
            tot_sum).flatten()
        cluster_y = np.array(
            nd.sum(y * tot, labels=labels, index=label_index) /
            tot_sum).flatten()
        cluster_tof = np.array(
            nd.sum(tof * tot, labels=labels, index=label_index) /
            tot_sum).flatten()
        cluster_totMax = tot[tot_max]
        cluster_totAvg = tot_mean
        cluster_shot = shot[tot_max]

        if self._cent_timewalk_lut is not None:
            # cluster_tof -= self._timewalk_lut[(cluster_tot / 25).astype(np.int) - 1]
            # cluster_tof *= 1e6
            cluster_tof -= (
                self._cent_timewalk_lut[np.int(cluster_totMax // 25) - 1] *
                1e3)
            # TODO: should totAvg not also be timewalk corrected?!
            # cluster_tof *= 1e-6

        return (
            cluster_shot,
            cluster_x,
            cluster_y,
            cluster_tof,
            cluster_totAvg,
            cluster_totMax,
            cluster_size,
        )
Пример #22
0
def getbiggestcc(mask):
    """GETBIGGESTCC get the biggest connected component in binary image
    """
    # Find connected components
    label_im, nb_labels = ndimage.label(mask)
    sizes = ndimage.sum(mask, label_im, range(nb_labels + 1))
    max_pos = ndimage.maximum_position(sizes)
    masked_sizes = sizes > (ndimage.maximum(sizes) - 1)  # ==max_pos#
    return masked_sizes[label_im]
Пример #23
0
def maximum_position(power_masked, kx, ky):
    # estimate the maximum by parabolic->gaussian interpolation of pixels
    dkx, dky = kx[0,1]-kx[0,0], ky[1,0]-ky[0,0]
    iy_max, ix_max = ndimage.maximum_position(power_masked)
    data_roi = power_masked.data[iy_max-1:iy_max+2, ix_max-1:ix_max+2]
    x0, y0, z0 = utils.parab_interpolation(np.log(data_roi), 1, 1)
    k_vec = np.array([(x0+ix_max-1)*dkx + kx[0, 0], (y0+iy_max-1)*dky + ky[0, 0]])
    amp = np.exp(z0)
    return k_vec, amp
Пример #24
0
def test_extrema02():
    labels = np.array([1, 2])
    for type in types:
        input = np.array([[1, 2], [3, 4]], type)
        output1 = ndimage.extrema(input, labels=labels, index=2)
        output2 = ndimage.minimum(input, labels=labels, index=2)
        output3 = ndimage.maximum(input, labels=labels, index=2)
        output4 = ndimage.minimum_position(input, labels=labels, index=2)
        output5 = ndimage.maximum_position(input, labels=labels, index=2)
        assert_equal(output1, (output2, output3, output4, output5))
Пример #25
0
def calculate_nema_uniformity(imagearray,
                              resamplesize,
                              results,
                              domecorrection=False):
    """ Wrapper function for flood calculation according to NEMA recommendations.
        Input:
          imagearray     : NxN numpy input array
          resamplesize   : downsample size (MxM), typically (64,64)
          results        : instance of PluginData-class (container for generated results)
          domecorrection : Perform dome correction? [True, False]

        Dome correction can be used for intrinsic uniformity measurements (e.g. with
        Siemens camera's) where the distance between point-source and detector is
        smaller than 5 times the maximum FOV dimension.
    """

    if domecorrection == True:
        print 'Performing dome-correction...'
        imagearray = dome_correction(imagearray)

    IUufov = 0
    IUcfov = 0
    DUxufov = 0
    DUyufov = 0
    DUxcfov = 0
    DUycfov = 0

    imshape = np.shape(imagearray)

    try:
        ufov, cfov = nema_data_preprocess(imagearray, resamplesize)
    except:
        print "warning: could not preprocess ufov, cfov"
        ufov, cfov = np.ones((resamplesize))

    ufov.fill_value = 0
    cfov.fill_value = 0

    #unifcalc = lambda arr: 100*(ma.max(arr) - ma.min(arr))/(ma.max(arr) + ma.min(arr))
    unifxy_min = lambda arr: ndimage.minimum_position(arr)
    unifxy_max = lambda arr: ndimage.maximum_position(arr)

    IUufov = 100 * unifcalc(ufov)
    IUufov_min = unifxy_min(ufov)
    IUufov_max = unifxy_max(ufov)
    IUcfov = 100 * unifcalc(cfov)
    IUcfov_min = unifxy_min(cfov)
    IUcfov_max = unifxy_max(cfov)

    DUxufov_val, DUyufov_val, DUxufov_coord, DUyufov_coord = diff_data(ufov)
    DUxcfov_val, DUycfov_val, DUxcfov_coord, DUycfov_coord = diff_data(cfov)

    output = DUxufov_val, DUyufov_val, DUxufov_coord, DUyufov_coord, DUxcfov_val, DUycfov_val, DUxcfov_coord, DUycfov_coord, IUufov, IUcfov, ufov, cfov

    return output
Пример #26
0
def test_extrema01():
    "extrema 1"
    labels = np.array([1, 0], bool)
    for type in types:
        input = np.array([[1, 2], [3, 4]], type)
        output1 = ndimage.extrema(input, labels=labels)
        output2 = ndimage.minimum(input, labels=labels)
        output3 = ndimage.maximum(input, labels=labels)
        output4 = ndimage.minimum_position(input, labels=labels)
        output5 = ndimage.maximum_position(input, labels=labels)
        assert_equal(output1, (output2, output3, output4, output5))
Пример #27
0
def test_maximum_position06():
    "maximum position 6"
    labels = [1, 2, 0, 4]
    for type in types:
        input = np.array([[5, 4, 2, 5],
                                [3, 7, 8, 2],
                                [1, 5, 1, 1]], type)
        output = ndimage.maximum_position(input, labels,
                                                    [1, 2])
        assert_equal(output[0], (0, 0))
        assert_equal(output[1], (1, 1))
Пример #28
0
def test_maximum_position07():
    # Test float labels
    labels = np.array([1.0, 2.5, 0.0, 4.5])
    for type in types:
        input = np.array([[5, 4, 2, 5],
                          [3, 7, 8, 2],
                          [1, 5, 1, 1]], type)
        output = ndimage.maximum_position(input, labels,
                                          [1.0, 4.5])
        assert_equal(output[0], (0, 0))
        assert_equal(output[1], (0, 3))
Пример #29
0
def find_max(image):
    """image = kapteyn.maputils.FITSimage"""
    from scipy.ndimage import maximum_position
    if isinstance(image, str):
        image = fits.open(image)
    proj = wcs.WCS(image.header)
    data = image.data
    data[np.isnan(data)] = -np.inf
    y, x = maximum_position(data)
    GLON, GLAT = proj.wcs_pix2world(x, y, 0)
    val = data[int(y), int(x)]
    return GLON, GLAT, val
Пример #30
0
def measure_labeled_regions(data, labels, tag='IMAGE',
                            measure_positions=True, measure_values=True,
                            fits_offset=True, bbox_offset=True):
    """Measure source properties in image.

    Sources are defined by a label image.

    Parameters
    ----------
    TODO

    Returns
    -------
    TODO
    """
    import scipy.ndimage as nd
    from astropy.table import Table, Column
    # Measure all segments
    nsegments = labels.max()
    index = np.arange(1, nsegments + 1)  # Measure all sources
    # Measure stuff
    sum = nd.sum(data, labels, index)
    max = nd.maximum(data, labels, index)
    mean = nd.mean(data, labels, index)
    x, y = _split_xys(nd.center_of_mass(data, labels, index))
    xpeak, ypeak = _split_xys(nd.maximum_position(data, labels, index))
    xmin, xmax, ymin, ymax = _split_slices(nd.find_objects(labels))
    area = _measure_area(labels)
    # Use FITS convention, i.e. start counting at 1
    FITS_OFFSET = 1 if fits_offset else 0
    # Use SExtractor convention, i.e. slice max is inside
    BBOX_OFFSET = -1 if bbox_offset else 0
    # Create a table
    table = Table()
    table.add_column(Column(data=index, name='NUMBER'))

    if measure_positions:
        table.add_column(Column(data=x + FITS_OFFSET, name='X_IMAGE'))
        table.add_column(Column(data=y + FITS_OFFSET, name='Y_IMAGE'))
        table.add_column(Column(data=xpeak + FITS_OFFSET, name='XPEAK_IMAGE'))
        table.add_column(Column(data=ypeak + FITS_OFFSET, name='YPEAK_IMAGE'))
        table.add_column(Column(data=xmin + FITS_OFFSET, name='XMIN_IMAGE'))
        table.add_column(Column(data=xmax + FITS_OFFSET + BBOX_OFFSET, name='XMAX_IMAGE'))
        table.add_column(Column(data=ymin + FITS_OFFSET, name='YMIN_IMAGE'))
        table.add_column(Column(data=ymax + FITS_OFFSET + BBOX_OFFSET, name='YMAX_IMAGE'))
        table.add_column(Column(data=area, name='AREA'))

    if measure_values:
        table.add_column(Column(data=max, name=tag + '_MAX'))
        table.add_column(Column(data=sum, name=tag + '_SUM'))
        table.add_column(Column(data=mean, name=tag + '_MEAN'))

    return table
Пример #31
0
def calculate_nema_uniformity (imagearray, resamplesize, results, domecorrection=False):
    """ Wrapper function for flood calculation according to NEMA recommendations.
        Input:
          imagearray     : NxN numpy input array
          resamplesize   : downsample size (MxM), typically (64,64)
          results        : instance of PluginData-class (container for generated results)
          domecorrection : Perform dome correction? [True, False]

        Dome correction can be used for intrinsic uniformity measurements (e.g. with
        Siemens camera's) where the distance between point-source and detector is
        smaller than 5 times the maximum FOV dimension.
    """

    if domecorrection == True:
        print 'Performing dome-correction...'
        imagearray = dome_correction(imagearray)

    IUufov = 0
    IUcfov = 0
    DUxufov = 0
    DUyufov = 0 
    DUxcfov = 0
    DUycfov = 0
    
    imshape = np.shape(imagearray)
    
    try:
         ufov, cfov = nema_data_preprocess(imagearray,resamplesize)
    except:
         print "warning: could not preprocess ufov, cfov"
         ufov, cfov = np.ones((resamplesize))

    ufov.fill_value=0
    cfov.fill_value=0

    #unifcalc = lambda arr: 100*(ma.max(arr) - ma.min(arr))/(ma.max(arr) + ma.min(arr))
    unifxy_min = lambda arr: ndimage.minimum_position(arr) 
    unifxy_max = lambda arr: ndimage.maximum_position(arr) 

    IUufov = 100*unifcalc(ufov)
    IUufov_min = unifxy_min(ufov)
    IUufov_max = unifxy_max(ufov)
    IUcfov = 100*unifcalc(cfov)
    IUcfov_min = unifxy_min(cfov)
    IUcfov_max = unifxy_max(cfov) 


    DUxufov_val,DUyufov_val, DUxufov_coord, DUyufov_coord = diff_data(ufov)
    DUxcfov_val,DUycfov_val, DUxcfov_coord, DUycfov_coord = diff_data(cfov)

    output = DUxufov_val, DUyufov_val, DUxufov_coord, DUyufov_coord, DUxcfov_val, DUycfov_val, DUxcfov_coord, DUycfov_coord, IUufov, IUcfov, ufov, cfov

    return output
Пример #32
0
def test_extrema01():
    labels = np.array([1, 0], bool)
    for type in types:
        input = np.array([[1, 2], [3, 4]], type)
        output1 = ndimage.extrema(input, labels=labels)
        output2 = ndimage.minimum(input, labels=labels)
        output3 = ndimage.maximum(input, labels=labels)
        output4 = ndimage.minimum_position(input,
                                                     labels=labels)
        output5 = ndimage.maximum_position(input,
                                                     labels=labels)
        assert_equal(output1, (output2, output3, output4, output5))
Пример #33
0
def test_extrema04():
    labels = [1, 2, 0, 4]
    for type in types:
        input = np.array([[5, 4, 2, 5], [3, 7, 8, 2], [1, 5, 1, 1]], type)
        output1 = ndimage.extrema(input, labels, [1, 2])
        output2 = ndimage.minimum(input, labels, [1, 2])
        output3 = ndimage.maximum(input, labels, [1, 2])
        output4 = ndimage.minimum_position(input, labels, [1, 2])
        output5 = ndimage.maximum_position(input, labels, [1, 2])
        assert_array_almost_equal(output1[0], output2)
        assert_array_almost_equal(output1[1], output3)
        assert_array_almost_equal(output1[2], output4)
        assert_array_almost_equal(output1[3], output5)
Пример #34
0
 def derive_shift(self, **kwargs):
     """ """
     from scipy import ndimage
     corr2d = self.get_correlate2d(**{**{"mode":"full"},**kwargs})
     centroid_x = corr2d.shape[0]/2
     centroid_y = corr2d.shape[1]/2
     self._offset_info = {"corr2d_centroid":np.asarray(corr2d.shape)/2-0.5,
                          "corr2d_maximum":np.asarray(ndimage.maximum_position(corr2d)),
                          "corr2d":corr2d
                          }
     self._offset = (self._offset_info["corr2d_centroid"] - self._offset_info["corr2d_maximum"]
                         )[::-1]
     return self._offset
def process_evt(row):
    #global crop_size

    ### Get channel max ###
    arr_ref = np.array(row.EB_adc6, dtype=np.float32).reshape(n_rows, n_cols)
    r, c = maximum_position(arr_ref)

    ### Row object can be cast as python dict ###
    ### Note down out of range maxima ###
    row_dict = row.asDict()
    if c < w or c >= n_cols - w or r < w or r >= n_rows - w:
        evt_out = {
            k: np.full(crop_size, -999, dtype=np.float32).tolist()
            for k, arr in row_dict.iteritems()
        }
        evt_out['keep'] = False
        return Row(**evt_out)

    ### Initialize output dict as cropped input Row dict ###
    evt_out = {
        k: crop_around_max(arr, r, c)
        for k, arr in row_dict.iteritems()
    }
    #evt_out = {k:np.array(arr, dtype=np.float32).flatten() for k,arr in row_dict.iteritems()}
    '''
    ### Process Energy ###
    dict_en = ['EBenergy', 'EBenergyRed']
    for k in dict_en:
        evt_out[k] = process_en(evt_out[k])
    
    ### Process Time ###
    dict_t = ['EBtime', 'EBtimeRed']
    for k in dict_t:
        evt_out[k] = process_t(evt_out[k])
    '''
    ### Process Digis ###
    presample = np.mean(
        [evt_out['EB_adc0'], evt_out['EB_adc1'], evt_out['EB_adc2']], axis=0)
    #presample = log_noise(presample)
    dict_adc = ['EB_adc%d' % sample for sample in range(10)]
    for k in dict_adc:
        evt_out[k] = process_digi(evt_out[k], presample)

    ### Keep event ###
    ### Pyspark only accepts list types ###
    evt_out = {k: arr.tolist() for k, arr in evt_out.iteritems()}
    evt_out['keep'] = True
    return Row(**evt_out)
Пример #36
0
def extract_harmonics(frequencies, amplitudes, threshold=None):
    """
    Extract amplitudes and frequencies of (probably) harmonic components
    :param frequencies: array of frequencies
    :param amplitudes: array of amplitudes
    :param threshold: Threshold of <valuable> component
    :return: arrays of valuable frequencies and amplitudes
    """
    th = threshold if threshold is not None else CONFIG['threshold']
    labels, num_labels = ndimage.label(amplitudes > th)
    unique_labels = np.unique(labels)
    idx = np.array(
        ndimage.maximum_position(amplitudes, labels,
                                 unique_labels[1:])).reshape((num_labels, ))

    return frequencies[idx], amplitudes[idx]
Пример #37
0
def test_extrema02():
    "extrema 2"
    labels = np.array([1, 2])
    for type in types:
        input = np.array([[1, 2], [3, 4]], type)
        output1 = ndimage.extrema(input, labels=labels,
                                            index=2)
        output2 = ndimage.minimum(input, labels=labels,
                                            index=2)
        output3 = ndimage.maximum(input, labels=labels,
                                            index=2)
        output4 = ndimage.minimum_position(input,
                                            labels=labels, index=2)
        output5 = ndimage.maximum_position(input,
                                            labels=labels, index=2)
        assert_equal(output1, (output2, output3, output4, output5))
Пример #38
0
 def _watershed(self):
     """Get positions of high correlation values using watershed
     algorithm
     """
     max_cc = self._corr.max()
     min_cc = 0.5 * max_cc
     stepsize = (max_cc - min_cc) / self._steps
     cutoff = max_cc
     positions = []
     mask = zeros(self._corr.shape, dtype=bool)
     for n in xrange(self._steps):
         cutoff -= stepsize
         greater_equal(self._corr, cutoff, mask)
         labels, nfeatures = label(mask)
         positions += list(maximum_position(self._corr, labels, range(1, nfeatures + 1)))
     self._positions = set(positions)
Пример #39
0
def translate_back(outputs, threshold=0.7, pos=0):
    """Translate back. Thresholds on class 0, then assigns the maximum class to
    each region. ``pos`` determines the depth of character information returned:
        * `pos=0`: Return list of recognized characters
        * `pos=1`: Return list of position-character tuples
        * `pos=2`: Return list of character-probability tuples
     """
    labels, n = measurements.label(outputs[:, 0] < threshold)
    mask = np.tile(labels.reshape(-1, 1), (1, outputs.shape[1]))
    maxima = measurements.maximum_position(outputs, mask,
                                           np.arange(1,
                                                     np.amax(mask) + 1))
    if pos == 1: return maxima  # include character position
    if pos == 2:
        return [(c, outputs[r, c])
                for (r, c) in maxima]  # include character probabilities
    return [c for (r, c) in maxima]  # only recognized characters
Пример #40
0
def test_extrema03():
    labels = np.array([[1, 2], [2, 3]])
    for type in types:
        input = np.array([[1, 2], [3, 4]], type)
        output1 = ndimage.extrema(input, labels=labels, index=[2, 3, 8])
        output2 = ndimage.minimum(input, labels=labels, index=[2, 3, 8])
        output3 = ndimage.maximum(input, labels=labels, index=[2, 3, 8])
        output4 = ndimage.minimum_position(input,
                                           labels=labels,
                                           index=[2, 3, 8])
        output5 = ndimage.maximum_position(input,
                                           labels=labels,
                                           index=[2, 3, 8])
        assert_array_almost_equal(output1[0], output2)
        assert_array_almost_equal(output1[1], output3)
        assert_array_almost_equal(output1[2], output4)
        assert_array_almost_equal(output1[3], output5)
Пример #41
0
 def _watershed(self):
     """Get positions of high correlation values using watershed
     algorithm
     """
     max_cc = self._corr.max()
     min_cc = 0.5 * max_cc
     stepsize = (max_cc - min_cc) / self._steps
     cutoff = max_cc
     positions = []
     mask = zeros(self._corr.shape, dtype=bool)
     for n in xrange(self._steps):
         cutoff -= stepsize
         greater_equal(self._corr, cutoff, mask)
         labels, nfeatures = label(mask)
         positions += maximum_position(self._corr, labels,
                                       range(1, nfeatures + 1))
     self._positions = set(positions)
Пример #42
0
def test_extrema04():
    labels = [1, 2, 0, 4]
    for type in types:
        input = np.array([[5, 4, 2, 5],
                                [3, 7, 8, 2],
                                [1, 5, 1, 1]], type)
        output1 = ndimage.extrema(input, labels, [1, 2])
        output2 = ndimage.minimum(input, labels, [1, 2])
        output3 = ndimage.maximum(input, labels, [1, 2])
        output4 = ndimage.minimum_position(input, labels,
                                                     [1, 2])
        output5 = ndimage.maximum_position(input, labels,
                                                     [1, 2])
        assert_array_almost_equal(output1[0], output2)
        assert_array_almost_equal(output1[1], output3)
        assert_array_almost_equal(output1[2], output4)
        assert_array_almost_equal(output1[3], output5)
Пример #43
0
def test_extrema03():
    labels = np.array([[1, 2], [2, 3]])
    for type in types:
        input = np.array([[1, 2], [3, 4]], type)
        output1 = ndimage.extrema(input, labels=labels,
                                            index=[2, 3, 8])
        output2 = ndimage.minimum(input, labels=labels,
                                            index=[2, 3, 8])
        output3 = ndimage.maximum(input, labels=labels,
                                            index=[2, 3, 8])
        output4 = ndimage.minimum_position(input,
                                    labels=labels, index=[2, 3, 8])
        output5 = ndimage.maximum_position(input,
                                    labels=labels, index=[2, 3, 8])
        assert_array_almost_equal(output1[0], output2)
        assert_array_almost_equal(output1[1], output3)
        assert_array_almost_equal(output1[2], output4)
        assert_array_almost_equal(output1[3], output5)
Пример #44
0
def ctc_decode(probs, sigma=1.0, threshold=0.7, kind=None, full=False):
    """A simple decoder for CTC-trained OCR recognizers.

    :probs: d x l sequence classification output
    """
    probs = asnp(probs.T)
    assert (abs(probs.sum(1)-1) < 1e-4).all(), \
        "input not normalized; did you apply .softmax()?"
    probs = ndi.gaussian_filter(probs, (sigma, 0))
    probs /= probs.sum(1)[:,newaxis]
    labels, n = ndi.label(probs[:,0]<threshold)
    mask = tile(labels[:,newaxis], (1, probs.shape[1]))
    mask[:,0] = 0
    maxima = ndi.maximum_position(probs, mask, arange(1, amax(mask)+1))
    if not full:
        return [c for r, c in sorted(maxima)]
    else:
        return [(r, c, probs[r, c]) for r, c in sorted(maxima)]
Пример #45
0
 def backward_cpu(self, inputs, grad_outputs):
     img, labels = inputs
     grad_in = np.zeros_like(img)
     for batch in xrange(img.shape[0]):
         for classIx in xrange(img.shape[1]):
             indices = np.array(
                 ndimage.maximum_position(
                     img[batch, classIx, :, :, :],
                     labels=labels[batch, :, :, :],
                     index=range(labels[batch, :, :, :].max() + 1)))
             grad_in[
                 batch, classIx, indices[:, 0], indices[:, 1],
                 indices[:, 2]] = grad_outputs[0][
                     batch,
                     classIx, :]  # grad_outputs should have the same length as the list of selected indices.
     return grad_in, np.zeros_like(
         labels
     )  # Second argument needs to be returned to match shapes of arguments in forward and backward passes.
Пример #46
0
def calculate_field_centers(rate_map, labels, center_method='maxima'):
    """Finds center of fields at labels.
    :Authors:
        Halvard Sutterud <*****@*****.**>
    """

    from scipy import ndimage
    indices = np.arange(1, np.max(labels) + 1)
    if center_method == 'maxima':
        bc = ndimage.maximum_position(rate_map, labels=labels, index=indices)
    elif center_method == 'center_of_mass':
        bc = ndimage.center_of_mass(rate_map, labels=labels, index=indices)
    else:
        raise ValueError(
            "invalid center_method flag '{}'".format(center_method))
    bc = np.array(bc)
    bc[:, [0, 1]] = bc[:, [1, 0]]  # y, x -> x, y
    return bc
Пример #47
0
def find_all_downward(data, thres, find_segs=False, diag=False):
    """
    Find all downward connected segments in data

    Parameters
    ----------
    data : ndarray
        Data to perform segmentation on.
    thres : float
        Threshold, below this nodes are considered noise.
    find_segs : bool, optional
        True to return a list of slices for the segments.
    diag : bool, optional
        True to include diagonal neighbors in connection.

    Returns
    -------
    locations : list
        List of indicies of local maximum in each segment.
    seg_slices : list, optional
        List of slices which extract a given segment from the data. Only
        returned when fig_segs is True.

    """
    # build structure array for defining feature connections
    ndim = data.ndim
    if diag:
        structure = ndimage.generate_binary_structure(ndim, ndim)
    else:
        structure = ndimage.generate_binary_structure(ndim, 1)

    # determine labeled array of segments
    labels, num_features = label_downward(data, thres, structure)

    # determine locations of segment maxima
    locations = ndimage.maximum_position(data, labels, range(1,
                                         num_features + 1))

    # find segment slices if requested and return
    if find_segs is True:
        seg_slices = ndimage.find_objects(labels)
        return locations, seg_slices
    else:
        return locations
Пример #48
0
def find_all_downward(data, thres, find_segs=False, diag=False):
    """
    Find all downward connected segments in data

    Parameters
    ----------
    data : ndarray
        Data to perform segmentation on.
    thres : float
        Threshold, below this nodes are considered noise.
    find_segs : bool, optional
        True to return a list of slices for the segments.
    diag : bool, optional
        True to include diagonal neighbors in connection.

    Returns
    -------
    locations : list
        List of indicies of local maximum in each segment.
    seg_slices : list, optional
        List of slices which extract a given segment from the data. Only
        returned when fig_segs is True.

    """
    # build structure array for defining feature connections
    ndim = data.ndim
    if diag:
        structure = ndimage.generate_binary_structure(ndim, ndim)
    else:
        structure = ndimage.generate_binary_structure(ndim, 1)

    # determine labeled array of segments
    labels, num_features = label_downward(data, thres, structure)

    # determine locations of segment maxima
    locations = ndimage.maximum_position(data, labels,
                                         range(1, num_features + 1))

    # find segment slices if requested and return
    if find_segs is True:
        seg_slices = ndimage.find_objects(labels)
        return locations, seg_slices
    else:
        return locations
def fitFirstCTFNode(pow, rpixelsize, defocus, ht):
	filter = ndimage.gaussian_filter(pow,3)
	grad = ndimage.gaussian_gradient_magnitude(filter,3)
	thr = imagefun.threshold(grad,grad.mean()+3*grad.std())
	if defocus:
		z = abs(defocus)
		s = calculateFirstNode(ht,z)
		dmean = max(0.8*s/rpixelsize, 30)
	else:
		shape = pow.shape
		r = 20
		center = ( shape[0] / 2, shape[1] / 2 ) 
		grad[center[0]-r: center[0]+r, center[1]-r: center[1]+r] = 0
		peak = ndimage.maximum_position(grad)
		dmean = math.hypot(peak[0] - center[0], peak[1] - center[1])
	drange = max(dmean / 4, 10)
	eparams = find_ast_ellipse(grad,thr,dmean,drange)
	if eparams:
		z0, zast, ast_ratio, alpha = getAstigmaticDefocii(eparams,rpixelsize, ht)
		return z0,zast,ast_ratio, alpha, eparams
Пример #50
0
def find_max(image):
    """Find position of maximum in an image.

    Parameters
    ----------
    image : `astropy.io.fits.ImageHDU`
        Input image

    Returns
    -------
    lon, lat, value : float
        Maximum value and its position
    """
    from scipy.ndimage import maximum_position
    from astropy.wcs import WCS
    proj = WCS(image.header)
    data = image.data
    data[np.isnan(data)] = -np.inf
    y, x = maximum_position(data)
    GLON, GLAT = proj.wcs_pix2world(x, y, 0)
    val = data[int(y), int(x)]
    return GLON, GLAT, val
# Threshold
#threshold = (stats.norm.isf(0.5*threshold_p_value)
#                                 /np.sqrt(components_masked.shape[0]))
threshold = .9
components_masked[np.abs(components_masked) < threshold] = 0

# Now invert the masking operation, to go back to a full 3D
# representation
components_img = masker.inverse_transform(components_masked)
components = components_img.get_data()

# Using a masked array is important to have transparency in the figures
components = np.ma.masked_equal(components, 0, copy=False)

### Visualize the results #####################################################
# Show some interesting components
import pylab as pl
from scipy import ndimage

for i in range(n_components):
    pl.figure()
    pl.axis('off')
    cut_coord = ndimage.maximum_position(np.abs(components[..., i]))[2]
    vmax = np.max(np.abs(components[:, :, cut_coord, i]))
    pl.imshow(np.rot90(mean_epi[:, :, cut_coord]), interpolation='nearest',
              cmap=pl.cm.gray)
    pl.imshow(np.rot90(components[:, :, cut_coord, i]),
              interpolation='nearest', cmap=pl.cm.jet, vmax=vmax, vmin=-vmax)

pl.show()
Пример #52
0
def main(parameter_file):
    """
    It performs the following actions:
    1. Gets the parameters, required for simulation, from parameter.yaml file. 
    2. calls DEM_creator() --> for generating DEM grid
    3. Erosion modelling 
    4. Flow modelling
    5. Landcover class allocation using decision tree
    6. Geometric feature development
    7. road mapping
    """
    time1 = time.time()
    #*****************parameter handling *************************************
    # Get the parameters from parameter.yaml file
    yaml_file  = open(parameter_file, 'r')
    stream     = yaml.load(yaml_file)
    resolution = stream['resolution']
    H          = stream['H']
    H_wt       =  stream['H_wt']
    seed       = stream['seed']
    sigma      = stream['sigma']
    elev_range = stream['elev_range']
    max_level  = stream['max_level']
    DEMcreator_option = stream['DEMcreator_option']
    output_dir = stream['output_dir']
    river_drop = stream['river_drop']
    Erosion_permission = stream['Erosion_permission']
    decision_tree = stream['decision_tree']
    counter    = stream['counter']
    elev_filename      = stream['training_elev_filename']
    landcover_filename = stream['training_landcover_filename']
    river_filename     = stream['training_river_filename']
    no_of_veg_class    = stream['no_of_veg_class']
    min_area     = stream['min_area']
    max_area     = stream['max_area']
    aspect_ratio = stream['aspect_ratio']
    agri_area_limit = stream['agri_area_limit']
    yaml_file.close() 

    #**************************print statistics***********************************
    print ("Running simulation with follwing parameters")
    print ("H: %s" % H)
    print ("H_wt: %s" % H_wt)
    print ("seed: %s" % seed)
    print ("sigma: %f" % sigma) 
    print ("elev_range: %s" % elev_range)
    print ("max_level: %s" % max_level)
    print ("DEMcreator_option: %s" % DEMcreator_option)
    print ("output_dir: %s" % output_dir)
    print ("River drop: %d" % river_drop)
    print ("counter: %d" % counter)
    print ("no of vegetation class %d" % no_of_veg_class)
    print ("min area: %f" % min_area)
    print ("max area: %f" % max_area)
    print ("aspect ratio: %f" % aspect_ratio)
    print ("agricultural area limit: %f" % agri_area_limit)
    gradient = 0   #fixed for now TODO incorporate gradient in next version
    #*****************************DEM genaration************************************
    # Generate DEM using FM2D/SS algorithm by calling DEM_creator(args...) function
    DEM_Result = DEM_generator.DEM_creator(H, H_wt, seed, elev_range,sigma,gradient,max_level, DEMcreator_option)
    pathname = os.path.dirname(sys.argv[0])
    fullpath = os.path.abspath(pathname)
    filename = fullpath + "/" + output_dir
    if not os.path.exists(filename):
        os.makedirs(filename)          # create output directory if it doesn't exist 
    DEM_arr = DEM_Result[0]
    DEM_Result = 0 #free space
    #****************************region adjustment***********************************
    # We create a temporary region that is only valid in this python session
    g.use_temp_region()
    rows = DEM_arr.shape[0]
    cols = DEM_arr.shape[1]
    n = 4928050 #some arbitrary value
    s = n - resolution*rows
    e = 609000  #some arbitrary value
    w = e - resolution*cols
    g.run_command('g.region', flags = 'ap', n = n ,s = s, e = e, w = w,res = resolution, rows = rows ,cols = cols)   
    #*************************Flow accumulation with Erosion modelling****************************
    filename = fullpath + "/ascii_files"
    if not os.path.exists(filename):
        os.makedirs(filename)
    if not Erosion_permission:
        counter = 0
        DEM_arr_to_ascii(DEM_arr,resolution)
        g.run_command('r.in.ascii', overwrite = True, flags='i', input = fullpath +'/'+'ascii_files' +'/DEM.asc', output='test_DEM')
        #Flow computation for massive grids (float version) 
        g.run_command('r.terraflow', overwrite = True, elevation = 'test_DEM@user1', filled = 'flooded_DEM',\
          direction = 'DEM_flow_direction',swatershed = 'DEM_sink_watershed', accumulation = 'DEM_flow_accum', tci = 'DEM_tci')
        g.run_command('r.out.ascii',flags='h',input='DEM_flow_accum@user1',output=fullpath +'/ascii_files'+ '/DEM_flow_accum',null='0')
        f = open(fullpath +'/ascii_files'+ '/DEM_flow_accum', 'r')
        Flow_accum_arr = numpy.loadtxt(f)
        f.close()
    for iteration in range(0,counter):
        DEM_arr_to_ascii(DEM_arr,resolution)
        #Input the DEM ascii file into grass
        g.run_command('r.in.ascii', overwrite = True, flags='i', input = fullpath +'/'+'ascii_files' +'/DEM.asc', output='test_DEM')
        #Flow computation for massive grids (float version) 
        g.run_command('r.terraflow', overwrite = True, elevation = 'test_DEM@user1', filled = 'flooded_DEM',\
          direction = 'DEM_flow_direction',swatershed = 'DEM_sink_watershed', accumulation = 'DEM_flow_accum', tci = 'DEM_tci')
        g.run_command('r.out.ascii',flags='h',input='DEM_flow_accum@user1',output=fullpath +'/ascii_files'+ '/DEM_flow_accum',null='0')
        f = open(fullpath +'/ascii_files'+ '/DEM_flow_accum', 'r')
        Flow_accum_arr = numpy.loadtxt(f)
        f.close()
        #call erosion modelling function
        DEM_arr = Erosion(Flow_accum_arr, DEM_arr, river_drop)
    output=fullpath +'/'+output_dir+ '/DEM.asc'
    arr_to_ascii(DEM_arr,output)
    output=fullpath +'/'+output_dir+ '/flow_accum.asc'
    arr_to_ascii(Flow_accum_arr,output)
    #****************************landcover allocation using decision tree********************************
    # Get slope and Aspect using grass functions
    g.run_command('r.slope.aspect',overwrite=True,elevation='test_DEM@user1',slope='DEM_Slope',aspect='DEM_Aspect')
    g.run_command('r.out.ascii',flags='h',input='DEM_Slope@user1',output=fullpath + '/ascii_files'+'/DEM_Slope',null='0')
    f = open('ascii_files/DEM_Slope', 'r')
    DEM_Slope_arr = numpy.loadtxt(f)
    f.close()
    g.run_command('r.out.ascii',flags='h',input='DEM_Aspect@user1',output=fullpath +'/ascii_files'+'/DEM_Aspect',null='0')
    f = open('ascii_files/DEM_Aspect', 'r')
    DEM_Aspect_arr = numpy.loadtxt(f)
    f.close()
    Distance_arr = dist.CityBlock(Flow_accum_arr,flag = 0)
    # Normalize the elevation values to use decision tree
    minimum_elev = numpy.min(DEM_arr)
    factor = numpy.max(DEM_arr) - minimum_elev
    Elev_arr = (DEM_arr[:,:] - minimum_elev)*100/factor
    # Create various list to hold test data
    Elevation = []
    Slope = []
    RiverDistance = []
    Aspect = []
    # Append the data into respective list
    x_len = DEM_arr.shape[0]
    y_len = DEM_arr.shape[1]
    for i in range(0,x_len):
        for j in range(0,y_len):
            Elevation.append(int(Elev_arr[i][j]))
            Slope.append(int(DEM_Slope_arr[i][j]))
            RiverDistance.append(int(Distance_arr[i][j]))
            Aspect.append(int(DEM_Aspect_arr[i][j]))
    Elev_arr = 0 #free space
    DEM_slope_arr = 0 #free space
    DEM_Aspect_arr = 0 #free space
    Distance_arr = 0 #free space
    # Create dictionary to apply R's predict command on it 
    Test_data = {'Elevation':Elevation ,'Slope':Slope ,'RiverDistance':RiverDistance,'Aspect':Aspect}
    #free spaces
    Elevation = []
    Slope = []
    RiverDistance = []
    Aspect = []
    # create decision tree from training data
    fit = DecisionTree(no_of_veg_class,elev_filename, landcover_filename, river_filename,decision_tree)
    g.run_command('g.region', flags = 'ap', n = n ,s = s, e = e, w = w,res = resolution, rows = rows ,cols = cols)
    # Alloctae vegetation array for holding predicted landcover values
    Veg_arr = numpy.zeros(DEM_arr.shape, dtype = "uint8")
    rpy.r.library("rpart")
    rpy.set_default_mode(rpy.BASIC_CONVERSION)
    # values contain probability values of the predicted landcover classes
    values = rpy.r.predict(fit,newdata=Test_data,method="class")
    Test_data = 0 #free space
    x_len = Veg_arr.shape[0]
    y_len = Veg_arr.shape[1]
    for i in range(0,x_len):
        for j in range(0,y_len):
        # Get the class having max probability for each test data point
            a = ndimage.maximum_position(values[i*y_len + j])
            Veg_arr[i,j] = (a[0]) # Assign them some value to facilitate visualization
    values = 0 #free space
    filename=fullpath +'/'+output_dir+ "/landcover.asc"
    arr_to_ascii(Veg_arr,filename)
    # Allocate and initialize Suitabilty map 
    Suitability = numpy.zeros( DEM_arr.shape, dtype = "uint8")
    for i in range(0,DEM_arr.shape[0]):
        for j in range(0,DEM_arr.shape[1]):
            #TODO can use mask here, needs to be generalised
            if Veg_arr[i][j] == 0: # Ignore
                Suitability[i][j] = 0 
            elif Veg_arr[i][j] == 25: # Deciduous woodland
                Suitability[i][j] = 60 
            elif Veg_arr[i][j] == 50: # Coniferous woodland
                Suitability[i][j] = 55 
            elif Veg_arr[i][j] == 75: # Agriculture including pasture
                Suitability[i][j] = 98 
            elif Veg_arr[i][j] == 100: # Semi-natural grassland
                Suitability[i][j] = 90 
            elif Veg_arr[i][j] == 125: # Bog and swamp
                Suitability[i][j] = 50
            elif Veg_arr[i][j] == 150: # Heath
                Suitability[i][j] = 75 
            elif Veg_arr[i][j] == 175: # Montane habitat
                Suitability[i][j] = 20 
            elif Veg_arr[i][j] == 200: # Rock and quarry
                Suitability[i][j] = 30 
            elif Veg_arr[i][j] == 225: # Urban
                Suitability[i][j] = 80
    Display_fields = Geometry.GeometricFeature(Suitability, min_area,max_area ,aspect_ratio ,agri_area_limit)
    f = open('fields_arr', 'w')
    numpy.save(f,Display_fields)
    f.close()
    pylab.imsave(output_dir+"/fields.png",Display_fields)
    time2 = time.time()
    print "time taken", time2-time1
    shutil.rmtree(fullpath+'/ascii_files')
Пример #53
0
    def execute(self, runparams):
        self.logger.fine("I'm in model.py!!")

        # get parameters qualified by input and output file paths
        params = runparams['parameters']

        # This is how to run R code directly if you plan to:
        # however, in this example, R is called from python using rpy.
        # self.run_r_code("example.R", runparams)
        """
        It imports all necessary python modules required for landscape simulation.
        It then performs the following:
        1. Gets all the parameters required for simulation from parameter.yaml file. 
        2. calls DEM_creator() --> for generating DEM grid
        3. Iteratively operate on DEM grid and do the following
           3.1 Remove single cell pits by calling Single_Cell_PitRemove()
           3.2 Get flow dirn using 9x9 window by calling Get_Flow_Dirn_using_9x9_window()  
           3.3 Get flow dirn using 3x3 window by calling Flow_Dirn_3x3() for catchment extraction
           3.4 Extract the catchment and do depression filling using CatchmentExtraction()
           3.5 Again get flow direction using Get_Flow_Dirn_using_9x9_window() after depression filling
           3.6 Perform flow accumulation by calling Flow_accumulation() 
           3.7 Do the erosion by calling Erosion()
        4. Generate a Decision tree for land_cover allocation by calling DecisionTree()
        5. Assign the vegetation class to DEM by calling VegetationClassify()
        6. Generate some agricultural field by calling GeometricFeature()
        """
        time1 = time.time()
        # Get the parameters for parameter.yaml file
        
        H = [float(x) for x in params['H'].split(',')]
        
        H_wt = [float(x) for x in params['H_wt'].split(',')]

        seed = [float(x) for x in params['seed'].split(',')]

        elev_range = [float(x) for x in params['elev_range'].split(',')] 

##        Three_DplotDEM = stream["Three_DplotDEM"]

##        elev_filename = stream["training_data_elev"]
##        landcover_filename = stream["training_data_landcover"]
##        river_filename = stream["training_data_river"]

##        next_patch_orientation_probability = stream['next_patch_orientation_probability']
        gradient_values = [params['north'], params['north_west'], params['west'], params['south_west'], params['south'], params['south_east'], params['east'], params['north_east'], params['center']]

        self.logger.fine ("Running simulation with following parameters")
        self.logger.fine ("Counter %d" % params['counter'])
        self.logger.fine ("H %s" % H)
        self.logger.fine ("H_wt %s" % H_wt)
        self.logger.fine ("seed %s" % seed)
        self.logger.fine ("elev_range %s" % elev_range)
        self.logger.fine ("river_drop %s" % params['river_drop'])
        self.logger.fine ("max_level %s" % params['max_level'])
        self.logger.fine ("DEMcreator_option %s" % params['DEMcreator_option'])
        self.logger.fine ("Gradient values %s" % gradient_values)
        self.logger.fine ("response %s" % params['response'])
        self.logger.fine ("min_area %d" % params['min_area'])
        self.logger.fine ("max_area %d" % params['max_area'])
        self.logger.fine ("aspect_ratio %s" % params['aspect_ratio'])
        self.logger.fine ("agri_area_limit %s" % params['agri_area_limit'])
     
        #Generate DEM using FM2D/SS algorithm by calling DEM_creator(args...) function")
        time0 = time.time()
        self.logger.fine("Creating DEMs")
        l_DEM_Result = Hydro_Network.DEM_creator(H, H_wt, seed, elev_range, params['max_level'], gradient_values, params['DEMcreator_option'])
        #Write result to Output file
        l_file_name = "%sOriginal_DEM" % (params['outputDir'])
        self.logger.fine(l_file_name)
        pylab.imsave(l_file_name, l_DEM_Result[0])
        for i in range(0,len(l_DEM_Result[1])):
            l_file_name = "%s%s" % (params['outputDir'],l_DEM_Result[2][i])#TODO(include parameter in filename)l_DEM_Result[3][i][0],l_DEM_Result[3][i][1])
            self.logger.fine(l_file_name)
            pylab.imsave(l_file_name, l_DEM_Result[1][i])

        l_DEM = l_DEM_Result[0]
        for iteration in range(0,params['counter']):
            #Remove sink using 3x3 window by calling Single_Cell_PitRemove(originalDEM, no_of_itr)
            l_DEM = Hydro_Network.Single_Cell_PitRemove(l_DEM, no_of_itr = 6)
            (l_x_len,l_y_len) = l_DEM.shape
            l_max_posn = ndimage.maximum_position(l_DEM)
            l_Flow_dirn_arr = numpy.zeros((l_x_len,l_y_len,2), dtype="int" )
            #l_Flow_arr will be used for the purpose of catchment extraction
            l_Flow_arr = numpy.zeros((l_x_len, l_y_len), dtype = "uint8")
            l_River_arr = numpy.ones((l_x_len, l_y_len), dtype = "int")
            l_pit_list = [] #Not required now
            ( l_pit_list, l_Flow_dirn_arr, l_DEM ) = Hydro_Network.Get_Flow_Dirn_using_9x9_window(l_DEM, l_Flow_dirn_arr, l_pit_list)
            # call Flow_Dirn_3x3(l_DEM, l_Flow_arr , l_pit_list) for the purpose of catchment extraction
            l_pit_list = [] #Required for catchment extraction
            ( l_pit_list, l_Flow_arr ) = Hydro_Network.Flow_Dirn_3x3(l_DEM, l_Flow_arr , l_pit_list) 
            #Catchment extraction, calling CatchmentExtraction(l_pit_list, l_DEM_arr, l_max_posn)
            (l_DEM, l_Found_arr, l_Catchment_boundary_arr) = Hydro_Network.CatchmentExtraction(l_pit_list, l_DEM, l_Flow_arr, l_max_posn)
            #Write result to Output file
            l_file_name = "%s/Catchment%s" % (params['outputDir'], iteration+1)
            pylab.imsave(l_file_name, l_Found_arr)
            l_file_name = "%s/Catchment_Boundary%s" % (params['outputDir'], iteration+1)
            pylab.imsave(l_file_name, l_Catchment_boundary_arr)        
            #Assignnig flow dirnection again after catchment extraction and Depression filling
            ( l_pit_list, l_Flow_dirn_arr, l_DEM ) = Hydro_Network.Get_Flow_Dirn_using_9x9_window(l_DEM, l_Flow_dirn_arr , l_pit_list)
            #Calculate flow accumulation by Calling Flow_accumulation(l_Flow_dirn_arr ,l_River_arr , l_DEM)
            l_River_arr = Hydro_Network.Flow_accumulation(l_Flow_dirn_arr ,l_River_arr, l_DEM)
            #Write result to Output file
            l_file_name = "%s/River%s" % (params['outputDir'],iteration+1)
            pylab.imsave(l_file_name, l_River_arr)
            #"Eroding the DEM based on Distance form River ...Calling Erosion(l_River_arr,l_DEM_arr,river_drop)
            (l_DEM, l_Distance_arr) = Hydro_Network.Erosion(l_River_arr, l_DEM, params['river_drop'])  
            #Write result to Output file
            l_file_name = "%s/ErodedDEM%s" % (params['outputDir'], iteration+1)
            pylab.imsave(l_file_name, l_DEM)
            l_file_name = "%s/RiverDistance%s" % (params['outputDir'], iteration+1)
            pylab.imsave(l_file_name, l_Distance_arr)
        
        if params['Three_DplotDEM'] == 'y' or params['Three_DplotDEM'] == 'Y':
            surface_plot.plot(l_DEM)
        time2 = time.time()
        self.logger.fine ("Time taken in Erosion modeling: %3f seconds" % (time2 - time1))

        if (params['response'] == 'y') or (params['response'] == 'Y'):
            DecisionTree.DecisionTree(params['outputDir'], params['elev_filename'], params['landcover_filename'], params['river_filename'])
            time3 = time.time()
            self.logger.fine ("Time taken to generate decision tree is %3f" % (time3 - time2))
Пример #54
0
    def run(self, workspace):
        if self.show_window:
            workspace.display_data.col_labels = ("Image", "Object", "Feature", "Mean", "Median", "STD")
            workspace.display_data.statistics = statistics = []
        for image_name in [img.name for img in self.images]:
            image = workspace.image_set.get_image(image_name.value, must_be_grayscale=True)
            for object_name in [obj.name for obj in self.objects]:
                # Need to refresh image after each iteration...
                img = image.pixel_data
                if image.has_mask:
                    masked_image = img.copy()
                    masked_image[~image.mask] = 0
                else:
                    masked_image = img
                objects = workspace.object_set.get_objects(object_name.value)
                nobjects = objects.count
                integrated_intensity = np.zeros((nobjects,))
                integrated_intensity_edge = np.zeros((nobjects,))
                mean_intensity = np.zeros((nobjects,))
                mean_intensity_edge = np.zeros((nobjects,))
                std_intensity = np.zeros((nobjects,))
                std_intensity_edge = np.zeros((nobjects,))
                min_intensity = np.zeros((nobjects,))
                min_intensity_edge = np.zeros((nobjects,))
                max_intensity = np.zeros((nobjects,))
                max_intensity_edge = np.zeros((nobjects,))
                mass_displacement = np.zeros((nobjects,))
                lower_quartile_intensity = np.zeros((nobjects,))
                median_intensity = np.zeros((nobjects,))
                mad_intensity = np.zeros((nobjects,))
                upper_quartile_intensity = np.zeros((nobjects,))
                cmi_x = np.zeros((nobjects,))
                cmi_y = np.zeros((nobjects,))
                max_x = np.zeros((nobjects,))
                max_y = np.zeros((nobjects,))
                for labels, lindexes in objects.get_labels():
                    lindexes = lindexes[lindexes != 0]
                    labels, img = cpo.crop_labels_and_image(labels, img)
                    _, masked_image = cpo.crop_labels_and_image(labels, masked_image)
                    outlines = cpmo.outline(labels)

                    if image.has_mask:
                        _, mask = cpo.crop_labels_and_image(labels, image.mask)
                        masked_labels = labels.copy()
                        masked_labels[~mask] = 0
                        masked_outlines = outlines.copy()
                        masked_outlines[~mask] = 0
                    else:
                        masked_labels = labels
                        masked_outlines = outlines

                    lmask = masked_labels > 0 & np.isfinite(img)  # Ignore NaNs, Infs
                    has_objects = np.any(lmask)
                    if has_objects:
                        limg = img[lmask]
                        llabels = labels[lmask]
                        mesh_y, mesh_x = np.mgrid[0 : masked_image.shape[0], 0 : masked_image.shape[1]]
                        mesh_x = mesh_x[lmask]
                        mesh_y = mesh_y[lmask]
                        lcount = fix(nd.sum(np.ones(len(limg)), llabels, lindexes))
                        integrated_intensity[lindexes - 1] = fix(nd.sum(limg, llabels, lindexes))
                        mean_intensity[lindexes - 1] = integrated_intensity[lindexes - 1] / lcount
                        std_intensity[lindexes - 1] = np.sqrt(
                            fix(nd.mean((limg - mean_intensity[llabels - 1]) ** 2, llabels, lindexes))
                        )
                        min_intensity[lindexes - 1] = fix(nd.minimum(limg, llabels, lindexes))
                        max_intensity[lindexes - 1] = fix(nd.maximum(limg, llabels, lindexes))
                        # Compute the position of the intensity maximum
                        max_position = np.array(fix(nd.maximum_position(limg, llabels, lindexes)), dtype=int)
                        max_position = np.reshape(max_position, (max_position.shape[0],))
                        max_x[lindexes - 1] = mesh_x[max_position]
                        max_y[lindexes - 1] = mesh_y[max_position]
                        # The mass displacement is the distance between the center
                        # of mass of the binary image and of the intensity image. The
                        # center of mass is the average X or Y for the binary image
                        # and the sum of X or Y * intensity / integrated intensity
                        cm_x = fix(nd.mean(mesh_x, llabels, lindexes))
                        cm_y = fix(nd.mean(mesh_y, llabels, lindexes))

                        i_x = fix(nd.sum(mesh_x * limg, llabels, lindexes))
                        i_y = fix(nd.sum(mesh_y * limg, llabels, lindexes))
                        cmi_x[lindexes - 1] = i_x / integrated_intensity[lindexes - 1]
                        cmi_y[lindexes - 1] = i_y / integrated_intensity[lindexes - 1]
                        diff_x = cm_x - cmi_x[lindexes - 1]
                        diff_y = cm_y - cmi_y[lindexes - 1]
                        mass_displacement[lindexes - 1] = np.sqrt(diff_x * diff_x + diff_y * diff_y)
                        #
                        # Sort the intensities by label, then intensity.
                        # For each label, find the index above and below
                        # the 25%, 50% and 75% mark and take the weighted
                        # average.
                        #
                        order = np.lexsort((limg, llabels))
                        areas = lcount.astype(int)
                        indices = np.cumsum(areas) - areas
                        for dest, fraction in (
                            (lower_quartile_intensity, 1.0 / 4.0),
                            (median_intensity, 1.0 / 2.0),
                            (upper_quartile_intensity, 3.0 / 4.0),
                        ):
                            qindex = indices.astype(float) + areas * fraction
                            qfraction = qindex - np.floor(qindex)
                            qindex = qindex.astype(int)
                            qmask = qindex < indices + areas - 1
                            qi = qindex[qmask]
                            qf = qfraction[qmask]
                            dest[lindexes[qmask] - 1] = limg[order[qi]] * (1 - qf) + limg[order[qi + 1]] * qf
                            #
                            # In some situations (e.g. only 3 points), there may
                            # not be an upper bound.
                            #
                            qmask = (~qmask) & (areas > 0)
                            dest[lindexes[qmask] - 1] = limg[order[qindex[qmask]]]
                        #
                        # Once again, for the MAD
                        #
                        madimg = np.abs(limg - median_intensity[llabels - 1])
                        order = np.lexsort((madimg, llabels))
                        qindex = indices.astype(float) + areas / 2.0
                        qfraction = qindex - np.floor(qindex)
                        qindex = qindex.astype(int)
                        qmask = qindex < indices + areas - 1
                        qi = qindex[qmask]
                        qf = qfraction[qmask]
                        mad_intensity[lindexes[qmask] - 1] = madimg[order[qi]] * (1 - qf) + madimg[order[qi + 1]] * qf
                        qmask = (~qmask) & (areas > 0)
                        mad_intensity[lindexes[qmask] - 1] = madimg[order[qindex[qmask]]]

                    emask = masked_outlines > 0
                    eimg = img[emask]
                    elabels = labels[emask]
                    has_edge = len(eimg) > 0
                    if has_edge:
                        ecount = fix(nd.sum(np.ones(len(eimg)), elabels, lindexes))
                        integrated_intensity_edge[lindexes - 1] = fix(nd.sum(eimg, elabels, lindexes))
                        mean_intensity_edge[lindexes - 1] = integrated_intensity_edge[lindexes - 1] / ecount
                        std_intensity_edge[lindexes - 1] = np.sqrt(
                            fix(nd.mean((eimg - mean_intensity_edge[elabels - 1]) ** 2, elabels, lindexes))
                        )
                        min_intensity_edge[lindexes - 1] = fix(nd.minimum(eimg, elabels, lindexes))
                        max_intensity_edge[lindexes - 1] = fix(nd.maximum(eimg, elabels, lindexes))
                m = workspace.measurements
                for category, feature_name, measurement in (
                    (INTENSITY, INTEGRATED_INTENSITY, integrated_intensity),
                    (INTENSITY, MEAN_INTENSITY, mean_intensity),
                    (INTENSITY, STD_INTENSITY, std_intensity),
                    (INTENSITY, MIN_INTENSITY, min_intensity),
                    (INTENSITY, MAX_INTENSITY, max_intensity),
                    (INTENSITY, INTEGRATED_INTENSITY_EDGE, integrated_intensity_edge),
                    (INTENSITY, MEAN_INTENSITY_EDGE, mean_intensity_edge),
                    (INTENSITY, STD_INTENSITY_EDGE, std_intensity_edge),
                    (INTENSITY, MIN_INTENSITY_EDGE, min_intensity_edge),
                    (INTENSITY, MAX_INTENSITY_EDGE, max_intensity_edge),
                    (INTENSITY, MASS_DISPLACEMENT, mass_displacement),
                    (INTENSITY, LOWER_QUARTILE_INTENSITY, lower_quartile_intensity),
                    (INTENSITY, MEDIAN_INTENSITY, median_intensity),
                    (INTENSITY, MAD_INTENSITY, mad_intensity),
                    (INTENSITY, UPPER_QUARTILE_INTENSITY, upper_quartile_intensity),
                    (C_LOCATION, LOC_CMI_X, cmi_x),
                    (C_LOCATION, LOC_CMI_Y, cmi_y),
                    (C_LOCATION, LOC_MAX_X, max_x),
                    (C_LOCATION, LOC_MAX_Y, max_y),
                ):
                    measurement_name = "%s_%s_%s" % (category, feature_name, image_name.value)
                    m.add_measurement(object_name.value, measurement_name, measurement)
                    if self.show_window and len(measurement) > 0:
                        statistics.append(
                            (
                                image_name.value,
                                object_name.value,
                                feature_name,
                                np.round(np.mean(measurement), 3),
                                np.round(np.median(measurement), 3),
                                np.round(np.std(measurement), 3),
                            )
                        )
Пример #55
0
    def select_proposals_within_parcels(self, nmax=2, weight_string=None, compete_among_types=False, filter_threshold=75, 
                                        MU_same_weight=False, transpose_interpcl_weight=True):
        # Allow only nmax proposals per parcel in order to not disadvantage parcels with small amount of proposals.
        # It takes proposals with the highest weights.
        #parcels_with_proposals = unique(self.proposal_set['parcel_id'])
        #parcel_set = self.dataset_pool.get_dataset('parcel')
        if weight_string is not None:
            within_parcel_weights = self.proposal_set.compute_variables([weight_string], dataset_pool=self.dataset_pool)
        else:
            within_parcel_weights = self.weight
        
        egligible = logical_and(self.weight > 0, 
                                self.proposal_set['status_id'] == self.proposal_set.id_tentative)
        wegligible = where(egligible)[0]
        if wegligible.size <=0:
            return
        #parcels_with_proposals = unique(self.proposal_set['parcel_id'][wegligible])
        #min_type = {}
        #egligible_proposals = {}
        tobechosen_ind = ones(wegligible.size).astype('bool8')
        if not compete_among_types:
            for key in self.column_names:
                utypes_all = unique(self.proposal_component_set[key])
                categories = zeros(self.proposal_set.size(), dtype='int32')
                for btype in utypes_all:
                    w = where(ndimage.sum(self.proposal_component_set[key] == btype,
                                          labels=self.proposal_component_set['proposal_id'], 
                                          index=self.proposal_set.get_id_attribute()
                                          ) == self.proposal_set["number_of_components"])[0]
                    categories[w] = btype
                # categories equal zero means mix-used type with components of different type

                utypes = unique(categories[wegligible])           
                for value in utypes:
                    type_is_value_ind = categories[wegligible]==value
                    for i in range(nmax):
                        parcels_with_proposals = (unique(self.proposal_set['parcel_id'][wegligible][where(type_is_value_ind)])).astype(int32)
                        if parcels_with_proposals.size <= 0:
                            continue
                        labels = (self.proposal_set['parcel_id'][wegligible])*type_is_value_ind               
                        chosen_prop = array(maximum_position(within_parcel_weights[wegligible], 
                                            labels=labels, 
                                            index=parcels_with_proposals)).flatten().astype(int32)               
                        egligible[wegligible[chosen_prop]] = False
                        type_is_value_ind[chosen_prop] = False
        else:
            parcels_with_proposals = unique(self.proposal_set['parcel_id'][wegligible]).astype(int32)
            max_prop = array(maximum_position(within_parcel_weights[wegligible], 
                                            labels=self.proposal_set['parcel_id'][wegligible], 
                                            index=parcels_with_proposals)).flatten().astype(int32)                                            
            max_value_by_parcel = within_parcel_weights[wegligible][max_prop]
            incompetition = ones(wegligible.size, dtype='bool8')
            incompetition[max_prop] = False
            egligible[wegligible[max_prop]] = False            
            for i in range(nmax-1):
                labels = (self.proposal_set['parcel_id'][wegligible])*incompetition 
                valid_parcels = where(in1d(parcels_with_proposals, self.proposal_set['parcel_id'][wegligible][where(incompetition)]))[0]
                if valid_parcels.size <= 0:
                    break
                chosen_prop = array(maximum_position(within_parcel_weights[wegligible], 
                                            labels=labels, 
                                            index=parcels_with_proposals[valid_parcels])).flatten().astype(int32)
                percent = within_parcel_weights[wegligible][chosen_prop]/(max_value_by_parcel[valid_parcels]/100.0)
                where_lower = where(in1d(self.proposal_set['parcel_id'][wegligible], parcels_with_proposals[valid_parcels][percent <= filter_threshold]))[0]
                egligible[wegligible[setdiff1d(chosen_prop, where_lower)]] = False   # proposals with egligible=True get eliminated, so we dont want to set it to False for the where_lower ones
                incompetition[union1d(chosen_prop, where_lower)] = False
                if incompetition.sum() <= 0:
                    break
             
            self.proposal_set['status_id'][where(egligible)] = self.proposal_set.id_eliminated_in_within_parcel_selection
            if MU_same_weight:
                # Set weights of mix-use proposals within the same parcel to the same value
                parcels = self.dataset_pool.get_dataset('parcel')
#                parcels.compute_variables(['mu_ind = parcel.aggregate(numpy.logical_or(development_project_proposal_component.building_type_id==4, development_project_proposal_component.building_type_id==12) + numpy.logical_or(development_project_proposal_component.building_type_id==3, development_project_proposal_component.building_type_id==13), intermediates=[development_project_proposal])'], 
#                                                    dataset_pool=self.dataset_pool)
#                pcl_ids = parcels.get_id_attribute()[parcels['mu_ind'] > 1]
#                is_mu = logical_and(logical_and(self.weight > 0, 
#                                self.proposal_set['status_id'] == self.proposal_set.id_tentative),
#                                       in1d(self.proposal_set['parcel_id'], pcl_ids))
#                where_mu = where(is_mu)[0]
#                if where_mu.size <= 0:
#                    return
#                trans_weights = self.weight[where_mu]
#                if transpose_interpcl_weight:
#                    trans_weights = log(trans_weights)
#                pcl_idx = parcels.get_id_index(self.proposal_set['parcel_id'][where_mu])
#                upcl_idx = unique(pcl_idx)
#                weight_mean = array(ndimage_mean(trans_weights, labels=pcl_idx,  index=upcl_idx))
#                if transpose_interpcl_weight:
#                    weight_mean = exp(weight_mean)
#                weight_mean_tmp = zeros(upcl_idx.max()+1).astype(weight_mean.dtype)
#                weight_mean_tmp[upcl_idx]=weight_mean
#                self.weight[where_mu]=weight_mean_tmp[pcl_idx]
                self.proposal_set.compute_variables(['is_mfres = development_project_proposal.aggregate(numpy.logical_or(development_project_proposal_component.building_type_id==4, development_project_proposal_component.building_type_id==12))'],
                                                    dataset_pool=self.dataset_pool)
                parcels.compute_variables(['mu_ind = (parcel.aggregate(development_project_proposal.is_mfres)>0) * (parcel.mix_split_id > 0)'], 
                                                    dataset_pool=self.dataset_pool)
                pcl_ids = parcels.get_id_attribute()[parcels['mu_ind'] > 0]
                egligible_props = logical_and(self.weight > 0, logical_and(
                                self.proposal_set['status_id'] == self.proposal_set.id_tentative,
                                self.proposal_set['is_mfres']>0))
                where_prop_to_modify = where(logical_and(egligible_props,
                                       in1d(self.proposal_set['parcel_id'], pcl_ids)))[0]
                if where_prop_to_modify.size <= 0:
                    return
                upcl = unique(self.proposal_set['parcel_id'][where_prop_to_modify])               
                npcl_to_modify = int(upcl.size/10.0)
                if npcl_to_modify == 0:
                    return
                pcls_to_modify = sample_noreplace(upcl, npcl_to_modify)
                where_prop_to_modify_final = where(logical_and(egligible_props,
                                       in1d(self.proposal_set['parcel_id'], pcls_to_modify)))[0]
                trans_weights = self.weight[where_prop_to_modify_final]
                if transpose_interpcl_weight:
                    trans_weights = log(trans_weights)
                #trans_weights = 1.2*trans_weights
                if transpose_interpcl_weight:
                    trans_weights = exp(trans_weights)
                self.weight[where_prop_to_modify_final] = trans_weights
            return
            
Пример #56
0
def estimate_pk_parms_2d(x,y,f,pktype):
    """
    Gives initial guess of parameters for analytic fit of two dimensional peak
    data.

    Required Arguments:
    x -- (n x 0) ndarray of coordinate positions for dimension 1 (numpy.meshgrid formatting)
    y -- (n x 0) ndarray of coordinate positions for dimension 2 (numpy.meshgrid formatting)
    f -- (n x 0) ndarray of intensity measurements at coordinate positions x and y
    pktype -- string, type of analytic function that will be used to fit the data,
    current options are "gaussian", "gaussian_rot" (gaussian with arbitrary axes) and 
    "split_pvoigt_rot" (split psuedo voigt with arbitrary axes)
    

    Outputs:
    p -- (m) ndarray containing initial guesses for parameters for the input peaktype
    (see peakfunction help for more information)
    """


    
    bg0=np.mean([f[0,0],f[-1,0],f[-1,-1],f[0,-1]])
    bg1x=(np.mean([f[-1,-1],f[0,-1]])-np.mean([f[0,0],f[-1,0]]))/(x[0,-1]-x[0,0])
    bg1y=(np.mean([f[-1,-1],f[-1,0]])-np.mean([f[0,0],f[0,-1]]))/(y[-1,0]-y[0,0])
    
    fnobg=f-(bg0+bg1x*x+bg1y*y)    
    
    labels,numlabels=imgproc.label(fnobg>np.max(fnobg)/2.)
    
    #looks for the largest peak
    areas=np.zeros(numlabels)
    for ii in np.arange(1,numlabels+1,1):
        areas[ii-1]= np.sum(labels==ii)
    
    peakIndex=np.argmax(areas)+1  
    
    
#    #currently looks for peak closest to center
#    dist=np.zeros(numlabels)
#    for ii in np.arange(1,numlabels+1,1):
#        dist[ii-1]= ######
#    
#    peakIndex=np.argmin(dist)+1
    
    FWHMx=np.max(x[labels==peakIndex])-np.min(x[labels==peakIndex])
    FWHMy=np.max(y[labels==peakIndex])-np.min(y[labels==peakIndex])
    
    coords=imgproc.maximum_position(fnobg, labels=labels, index=peakIndex)
    A=imgproc.maximum(fnobg, labels=labels, index=peakIndex)
    x0=x[coords]
    y0=y[coords]
    
    if pktype=='gaussian':
        p=[A,x0,y0,FWHMx,FWHMy,bg0,bg1x,bg1y]
    elif pktype=='gaussian_rot':
        p=[A,x0,y0,FWHMx,FWHMy,0.,bg0,bg1x,bg1y]
    elif pktype=='split_pvoigt_rot':
        p=[A,x0,y0,FWHMx,FWHMx,FWHMy,FWHMy,0.5,0.5,0.5,0.5,0.,bg0,bg1x,bg1y]
        
    p=np.array(p)
    return p
    def execute(self, runparams):
        # the logging levels are (in descending order): severe, warning, info, config, fine,
        # finer, finest
        # by default tzar will log all at info and above to the console, and all logging to a logfile.
        # if the --verbose flag is specified, all logging will also go to the console.
        # self.logger.fine("I'm in model.py!!")


        params = runparams['parameters']

        
        # get parameters qualified by input and output file paths
        # This is only required if you want to read / write input / output files from python.
        #qualifiedparams = runparams.getQualifiedParams(self.inputpath, self.outputpath)
        qualifiedparams = params
        # gets the variables, with (java) decimal values converted to python decimals
        # this is useful if you want to use arithmetic operations within python.
        # variables = self.get_decimal_params(runparams)
        variables = params


        

# Line below is for testing outside the framework
# variables = dict(window_size=5, ascii_dem="Output/DEM.asc", output_features="Output/surfaceFeatures.srf", landserf_output="Output/landserf_results.txt", max_level=9, sigma=1, seed=0, normalise=True, H1=0.7, H2=0.65, H3=0.4, H1wt=0.7, H2wt=0.2, H3wt=0.1, elev_min=0, elev_max=1309, erosion_num=1, river_drop=5)

        # self.run_r_code("example.R", runparams)

        # Create a holder for the results of the erosion iterations
        erosion_runs = variables['erosion_num'] # TODO not really necessary
        erodedDEMs = []
        
        # Create lists of file names and file titles - these will be populated as we go along, then written out to the 'index.html' file for quick views of results
        erodedDEMfileNames = [None] * erosion_runs
        erodedDEMfileTitles = [None] * erosion_runs
        catchmentFileNames = [None] * erosion_runs
        catchmentFileTitles = [None] * erosion_runs

        DEMinputFileNames = [None] * 3
        DEMinputFileTitles = [None] * 3
        DEMinputFileTitles[0] = "H %0.2f, wt %0.2f" % (variables['H1'],variables['H1wt'])
        DEMinputFileTitles[1] = "H %0.2f, wt %0.2f" % (variables['H2'],variables['H2wt'])
        DEMinputFileTitles[2] = "H %0.2f, wt %0.2f" % (variables['H3'],variables['H3wt'])
        # TODO - when numbers of inputs vary, make this a loop that responds to the number of H-values

        # Create two lists of up to 5 H-values and weights 
        H_values = [variables['H1'], variables['H2'], variables['H3']]
        H_weights = [variables['H1wt'], variables['H2wt'], variables['H3wt']]
        seeds = [variables['seed1'], variables['seed2'], variables['seed3']]
        elev_range = [variables['elev_min'], variables['elev_max']]

        # Call the DEM creator method which will create a composite elevation model
        generated_DEMs = Hydro_Network.DEM_creator(H_values, H_weights, seeds, elev_range, variables['max_level'], variables['DEMcreator_option'])

        for i in range(0,len(generated_DEMs[1])):
            file_name = "%s/%s" % (qualifiedparams['output_dir'],generated_DEMs[2][i])
            pylab.imsave(file_name, generated_DEMs[1][i])
            DEMinputFileNames[i] = "%s.png" % (generated_DEMs[2][i])

        # Run the hydro erosion the specified number of times.
        erodedDEMs.append(generated_DEMs[0])

        wsz = variables['window_size']
        
        # Open file to write out Landserf results
        f = open(qualifiedparams['landserf_output'], 'w')
        
        f.write("FractalDimension,VariogramGradient,VariogramIntercept,Moran,Kurtosis,Skew,")
        
        for x in range(0,variables['window_count']):
            
            # Write headers
            f.write(("Pits%d,Channels%d,Passes%d,Ridges%d,Peaks%d,Planes%d") % (wsz,wsz,wsz,wsz,wsz,wsz))
            if x<(variables['window_count']-1):
                f.write(",")
            else:
                f.write("\n")
            wsz = wsz + variables['window_step']
        # Close file
        f.close()

        for i in range(1,(erosion_runs+1)):

            newDEM = erodedDEMs[i-1]
                                                
            # Create file names for writing out to HTML
            erodedDEMfileNames[i-1] = "Combined_eroded_DEM%d" % (i)
            catchmentFileNames[i-1] = "Catchment%d" % (i)
            erodedDEMfileTitles[i-1] = "Erosion step %d" % (i)
            catchmentFileTitles[i-1] = "Catchments %d" % (i)
          
            #Remove sink using 3x3 window by calling Single_Cell_PitRemove(originalDEM, no_of_itr)
            newDEM = Hydro_Network.Single_Cell_PitRemove(newDEM, no_of_itr = 6)
            (x_len,y_len) = newDEM.shape
            max_posn = ndimage.maximum_position(newDEM)
            Flow_dirn_arr = numpy.zeros((x_len,y_len,2), dtype="int" )
            #Flow_arr will be used for the purpose of catchment extraction
            Flow_arr = numpy.zeros((x_len, y_len), dtype = "uint8")
            River_arr = numpy.ones((x_len, y_len), dtype = "int")
            pit_list = [] #Not required now
            ( pit_list, Flow_dirn_arr, DEM ) = Hydro_Network.Get_Flow_Dirn_using_9x9_window(newDEM, Flow_dirn_arr, pit_list)
            # call Flow_Dirn_3x3(DEM, Flow_arr , pit_list) for the purpose of catchment extraction
            pit_list = [] #Required for catchment extraction
            ( pit_list, Flow_arr ) = Hydro_Network.Flow_Dirn_3x3(newDEM, Flow_arr , pit_list)
            
            #Catchment extraction, calling CatchmentExtraction(pit_list, DEM_arr, max_posn)
            (newDEM, Found_arr, Catchment_boundary_arr) = Hydro_Network.CatchmentExtraction(pit_list, newDEM, Flow_arr, max_posn)
            #Write result to Output file
            file_name = "%s/%s" % (qualifiedparams['output_dir'], catchmentFileNames[i-1])
            pylab.imsave(file_name, Found_arr)
            catchmentFileNames[i-1] += '.png'
            
            #file_name = "%s/Catchment_Boundary%s" % (qualifiedparams['output_dir'], i)
            #pylab.imsave(file_name, Catchment_boundary_arr)
            
            #Assignnig flow dirnection again after catchment extraction and Depression filling
            ( pit_list, Flow_dirn_arr, newDEM ) = Hydro_Network.Get_Flow_Dirn_using_9x9_window(newDEM, Flow_dirn_arr , pit_list)
            
            #Calculate flow accumulation by Calling Flow_accumulation(Flow_dirn_arr ,River_arr , DEM)
            River_arr = Hydro_Network.Flow_accumulation(Flow_dirn_arr ,River_arr, newDEM)
            #Write result to Output file
            #file_name = "%s/River%s" % (qualifiedparams['output_dir'],i)
            #pylab.imsave(file_name, River_arr)
            
            #"Eroding the DEM based on Distance from River ...Calling Erosion(River_arr,DEM_arr,river_drop)
            (newDEM, Distance_arr) = Hydro_Network.Erosion(River_arr, newDEM, variables['river_drop'])  
            #Write result to Output file
            file_name = "%s/%s" % (qualifiedparams['output_dir'], erodedDEMfileNames[i-1])
            pylab.imsave(file_name, newDEM)
            erodedDEMfileNames[i-1] += '.png'
            #file_name = "%s/RiverDistance%s" % (qualifiedparams['output_dir'], i)
            #pylab.imsave(file_name, Distance_arr)

            # Add this DEM to the list of eroded results
            erodedDEMs.append(newDEM)

            # Generate Landserf stats for this phase
            Morphometry.calculate_surface_features(qualifiedparams['ascii_dem'], erodedDEMs[i], qualifiedparams['output_features'], variables['window_size'], variables['window_count'], variables['window_step'], qualifiedparams['landserf_output']) 

            
        # Now we should have the whole sequence of erosions - let's save them and see how it looks
        DEM_filename = "%s/DEM_before_erosion" % (qualifiedparams['output_dir'])
        pylab.imsave(DEM_filename, erodedDEMs[0])

        #---------------------------------------------------------------------------
        # Write out details to HTML tables and hyperlinks
        index_file = "%s/index.html" % qualifiedparams['output_dir']
        indexF = SummaryFileWriter.open_file(index_file)   
        SummaryFileWriter.writeHTMLTop("Run results", indexF)

        SummaryFileWriter.writeURL("Collated Landserf output", "output.csv", indexF)
        SummaryFileWriter.writeURL("Run parameters", "parameters.yaml", indexF)
        SummaryFileWriter.writeURL("Run log", "logging.log", indexF)

        SummaryFileWriter.writeHTMLTable("Input DEMs", DEMinputFileNames, DEMinputFileTitles, 8, indexF)
        SummaryFileWriter.writeHTMLTable("Erosion steps", erodedDEMfileNames, erodedDEMfileTitles, 8, indexF)
        SummaryFileWriter.writeHTMLTable("Catchment evolution", catchmentFileNames, catchmentFileTitles, 8, indexF)

        SummaryFileWriter.writeHTMLBottom(indexF)
def RiverNetwork(H1, H1wt, H2, H2wt, H3, H3wt, elev_min, elev_max):

  print "Generating Digital Elevation Maps using FM2D algorithm"

  #Generate first DEM with gradient = 1 (i.e. TRUE) and high H value 
  DEM_arr1 = MapGeneration_pure_python.midPointFm2d(max_level = 9, sigma = 1, H = H1, addition = True,\
                        wrap = False, gradient = 1,seed = 0, normalise=True,lbound=elev_min, ubound=elev_max)
  pylab.imsave("Output/DigitalElevationModel1",DEM_arr1)

  #Generate second DEM with gradient = 0 (i.e. FLASE) and medium H value 
  DEM_arr2 = MapGeneration_pure_python.midPointFm2d(max_level = 9, sigma = 1, H = H2, addition = True,\
                        wrap = False, gradient = 0,seed = 65, normalise = True,lbound=elev_min, ubound=elev_max)
  pylab.imsave("Output/DigitalElevationModel2",DEM_arr2)

  #Generate third DEM with gradient = 0 (i.e. FLASE) and medium H value 
  DEM_arr3 = MapGeneration_pure_python.midPointFm2d(max_level = 9, sigma = 1, H = H3, addition = True,\
                        wrap = False, gradient = 0,seed = 6, normalise = True,lbound=elev_min, ubound=elev_max)
  pylab.imsave("Output/DigitalElevationModel3",DEM_arr3)

  DEM_arr = DEM_arr1
  (x_len,y_len) = DEM_arr.shape
  #Get the co-ordinates having highest elev , required for catchment extraction
  (max_x, max_y) = ndimage.maximum_position(DEM_arr)

  for i in range(0,x_len):
    for j in range(0,y_len):
      #Combine 3 DEM's 
      DEM_arr[i][j] = (H1wt * DEM_arr1[i][j]) + (H2wt * DEM_arr2[i][j]) + (H3wt * DEM_arr3[i][j])
      

  print "Iteratively removing sink using 3x3 window"
  for p in range(0,6):
    for i in range(1,x_len-1):
      for j in range(1,y_len-1):
        #Remove pits by 3 x 3 window
        A = min(DEM_arr[i-1][j-1],DEM_arr[i-1][j],DEM_arr[i-1][j+1],\
                DEM_arr[i][j-1],DEM_arr[i][j+1],DEM_arr[i+1][j-1],
                DEM_arr[i+1][j],DEM_arr[i+1][j+1])
        if DEM_arr[i][j] < A:
          DEM_arr[i][j] = A + 1

  #Initialize various arrays to hold flow direction, Flow accumulation,catchment info etc
  Flow_arr = numpy.zeros((x_len,y_len) , dtype = "uint8" )
  # River_arr will hold the River_accumulation matrix
  River_arr = numpy.ones((x_len,y_len) , dtype = "int" )
  # Catchment_boundary_arr will hold the Catchment boundaries
  Catchment_boundary_arr = numpy.zeros((x_len,y_len) , dtype = "uint8" )
  # Found_arr will hold the catchment with different labels
  Found_arr = numpy.zeros((x_len,y_len),dtype = "uint8")
  # Pour_point_arr keeps track of pour point of a catchment on the map
  Pour_point_arr = numpy.zeros((x_len,y_len),dtype = "uint8")
  Pour_point_list = [] #keep track of Pour_point in a list

  pit_list = [] #contains all the pit in DEM
  print "Assigning Flow Directions"
  for i in range(1,x_len-1): 
    for j in range(1,y_len-1):
      #Assign Flow direction
      (value,dirn) =max(((DEM_arr[i][j] - DEM_arr[i-1][j-1])/1.41,3),\
                    (DEM_arr[i][j]-DEM_arr[i-1][j],2),((DEM_arr[i][j]-DEM_arr[i-1][j+1])/1.41,1),\
                    (DEM_arr[i][j] - DEM_arr[i][j-1],4),(0,8),(DEM_arr[i][j] - DEM_arr[i][j+1],0),\
                    ((DEM_arr[i][j] - DEM_arr[i+1][j-1])/1.41,5),(DEM_arr[i][j] - DEM_arr[i+1][j],6),\
                    ((DEM_arr[i][j] - DEM_arr[i+1][j+1])/1.41,7))
      Flow_arr[i][j] = dirn
      if dirn == 8:
        # If there is a pit append it to the pit_list
        pit_list.append((i,j))

  label = 0 # will be used to assign labels to differnet catchments

#_____________Catchment Extraction_____________________________________________
  print "Extracting Catchment and filling Depressions"
  while len(pit_list) >= 1:
  #_______________For each and every pit in the DEM do _________________________
    stack = []
    pit = pit_list.pop(0)
    stack.append(pit)
    label = label + 1 #increase the label being assigned to the catchment 
    #_______________________Identify catchment for each and every pit
    catchment_pixels = []
    catchment_pixels.append((DEM_arr[pit[0],pit[1]],pit[0],pit[1]))
    while len(stack) > 0:
      (p,q) = stack.pop(0)
      Found_arr[p][q] = label
      #Pop an element from stack check if its adjacent pixels exist and contribute 
      # its flow to the central pixel(pixel popped) then append it into list, continue
      # this while stack gets empty
      if pixel_exist(p-1,q-1,x_len,y_len):
        if Flow_arr[p-1][q-1] == 7:
          stack.append((p-1,q-1))
          catchment_pixels.append((DEM_arr[p-1,q-1],p-1,q-1))
      if pixel_exist(p-1,q,x_len,y_len):
        if Flow_arr[p-1][q] == 6 :
          catchment_pixels.append((DEM_arr[p-1,q],p-1,q))
          stack.append((p-1,q))
      if pixel_exist(p-1,q+1,x_len,y_len):
        if Flow_arr[p-1][q+1] == 5:
          catchment_pixels.append((DEM_arr[p-1,q+1],p-1,q+1))
          stack.append((p-1,q+1))
      if pixel_exist(p,q-1,x_len,y_len):
        if Flow_arr[p][q-1] == 0 :
          catchment_pixels.append((DEM_arr[p,q-1],p,q-1))
          stack.append((p,q-1))
      if pixel_exist(p,q+1,x_len,y_len):
        if Flow_arr[p][q+1] == 4:
          catchment_pixels.append((DEM_arr[p,q+1],p,q+1))
          stack.append((p,q+1))
      if pixel_exist(p+1,q-1,x_len,y_len):
        if Flow_arr[p+1][q-1] == 1:
          catchment_pixels.append((DEM_arr[p+1,q-1],p+1,q-1))
          stack.append((p+1,q-1))
      if pixel_exist(p+1,q,x_len,y_len):
        if Flow_arr[p+1][q] == 2 :
          catchment_pixels.append((DEM_arr[p+1,q],p+1,q))
          stack.append((p+1,q))
      if pixel_exist(p+1,q+1,x_len,y_len):
        if Flow_arr[p+1][q+1] == 3 :
          catchment_pixels.append((DEM_arr[p+1,q+1],p+1,q+1))
          stack.append((p+1,q+1))
    # Find catchment Outlet
    pour_point = (max_x, max_y)
    flag = 0
    for i in range(0,len(catchment_pixels)):
      (p,q) = ( catchment_pixels[i][1],catchment_pixels[i][2] )
      label = Found_arr[p][q]
      # Catchment Outlet will be the minimum catchment boundary pixel
      if (Found_arr[p-1][q-1] != label or Found_arr[p-1][q] != label or Found_arr[p-1][q+1] != label or 
         Found_arr[p][q-1] != label or Found_arr[p][q+1] != label or Found_arr[p+1][q-1] != label or
         Found_arr[p+1][q] != label or Found_arr[p+1][q+1] != label):# if pixel lie on boundary of catchment
        Catchment_boundary_arr[p][q] = 255
        if DEM_arr[ pour_point[0] ][ pour_point[1] ] > DEM_arr[p][q]:#if height of boundary is less then update pour point
          pour_point = (p,q)
          flag = 1
    if flag == 1:
      Pour_point_list.append((DEM_arr[pour_point],pour_point[0],pour_point[1]))
      Pour_point_arr[pour_point] = 255
      for i in range(0,len(catchment_pixels)):
        if catchment_pixels[i][0] < DEM_arr[pour_point]:
          #fill the depression in the catchment
          DEM_arr[catchment_pixels[i][1],catchment_pixels[i][2]] = DEM_arr[pour_point]

  print "Assignnig flow dirnection again after Depression filling"
  for i in range(1,x_len-1):
    for j in range(1,y_len-1):
    # Again assign Flow direction again after filling the depressions
      (value, dirn ) = max( ((DEM_arr[i][j] - DEM_arr[i-1][j-1])/1.41,3),(DEM_arr[i][j] - DEM_arr[i-1][j],2),((DEM_arr[i][j] - DEM_arr[i-1][j+1])/1.41,1),\
                            (DEM_arr[i][j] - DEM_arr[i][j-1],4),(0,8),(DEM_arr[i][j] - DEM_arr[i][j+1],0),\
                            ((DEM_arr[i][j] - DEM_arr[i+1][j-1])/1.41,5),(DEM_arr[i][j] - DEM_arr[i+1][j],6),((DEM_arr[i][j] - DEM_arr[i+1][j+1])/1.41,7))
      Flow_arr[i][j] = dirn
      if value <= 0:
        Flow_arr[i][j] = 8

  # Calculate flow accumulation by calling Generate_River function
  print "Performing Flow accumulation"
  River_arr  = Flow_accum.Generate_River( Flow_arr,River_arr,DEM_arr)

  Distance_arr = city_block_dist_erosion.CityBlock(River_arr)
  # Create a mask for differnet distances used for DEM erosion
  print "Eroding DEM"
  mask4 = [ Distance_arr <= 15 ]
  mask5 = [ Distance_arr > 3 ]
  mask3 = [Distance_arr == 3]
  mask2 = [Distance_arr == 2]
  mask1 = [Distance_arr == 1]
  mask0 = [Distance_arr == 0]
  max_flow_accum = numpy.max(River_arr)

# TODO - maybe change the block below - have already combined the DEMs. Weight erosion more simply
  for i in range(0,x_len):
    for j in range(0,y_len):
      #Erode the landscape using diffent weighing factor for different distances from 
      #river while combining 3 DEM's 
      if mask0[0][i][j] == True:
        DEM_arr[i][j] = 0.3*DEM_arr[i][j] + 0.45*DEM_arr2[i][j] + 0.19*DEM_arr3[i][j]
      elif mask1[0][i][j] == True:
        DEM_arr[i][j] = 0.3*DEM_arr[i][j] + 0.46*DEM_arr2[i][j] + 0.20*DEM_arr3[i][j]
      elif mask2[0][i][j] == True:
        DEM_arr[i][j] = 0.3*DEM_arr[i][j] + 0.46*DEM_arr2[i][j] + 0.21*DEM_arr3[i][j]
      elif mask3[0][i][j] == True:
        DEM_arr[i][j] = 0.3*DEM_arr[i][j] + 0.46*DEM_arr2[i][j] + 0.23*DEM_arr3[i][j]
      elif mask4[0][i][j] == True and mask5[0][i][j] == True:
        DEM_arr[i][j] = 0.3*DEM_arr[i][j] + 0.47*DEM_arr2[i][j] + 0.23*DEM_arr3[i][j]
      else:     
        DEM_arr[i][j] = 0.3*DEM_arr[i][j] + 0.50*DEM_arr2[i][j] + 0.25*DEM_arr3[i][j]

#Output different statistics for display and further use
  print "printing statistics ...see the Output Folder"
  numpy.save("River.npy",River_arr) 
  numpy.save("DEM.npy",DEM_arr)
  pylab.imsave("Output/River",River_arr)
  pylab.imsave("Output/Catchment",Found_arr)
  pylab.imsave("Output/CatchmentBoundary",Catchment_boundary_arr)
  pylab.imsave("Output/Combined_eroded_DEM",DEM_arr)
  pylab.imsave("Output/RiverDistance",Distance_arr)

  return DEM_arr
  def train(self, inputData, epochs, test_proportion=0.25, verbose=True):
    # Sort the known emotions into image piles, to assosciate a cluster with an
    # emotion after training, store that assosciation in emo_cluster
    cluster_identification_images = {
      Emotion.SAD: [],
      Emotion.SMILING: [],
      Emotion.CALM: [],
      Emotion.ASTONISHED: []
    }

    # Dictionary to return with performance metrics etc.
    train_perf = {}

    train_faces = []
    test_faces = []
    for entry in inputData:
      if (random() > test_proportion):
        # Build training set with 75% of inputData
        (emotion, img) = entry
        cluster_identification_images[emotion].append(img)
        train_faces.append(img)
      else:
        # Build test set with the other 25%
        test_faces.append(entry)

    i = 0
    #while (self.som.neighbours > 0.5 or i < epochs):
    while (i < epochs):
      for img in train_faces:
        # Not sure if this can be done with som.activateOnDataset()
        self.som.activate(img)
        self.som.backward()

      i += 1

      if verbose:
        if not (i % 20):
          print "SOM neighbors: %f" % self.som.neighbours
          print "SOM winner err: %f" % self.som.winner_error

    # Finished training SOM
    train_perf['epochs'] = i
    train_perf['final_som_winner_err'] = self.som.winner_error

    # Correlate N SOM clusters with N emotions
    training_error = []
    for emotion in cluster_identification_images.keys():
      emo_count = zeros((self.som.nNeurons, self.som.mNeurons))

      for img in cluster_identification_images[emotion]:
        self.som.activate(img)
        emo_count[self.som.winner[0]][self.som.winner[1]] += 1
      dominant_node = maximum_position(emo_count)
      training_error.append( 1.0 - 
        (1.0*emo_count[dominant_node[0]][dominant_node[1]]/
        len(cluster_identification_images[emotion]))
      )
      self.emo_clusters[dominant_node[0]][dominant_node[1]] = emotion

    # Record training error
    train_perf['training_error'] = training_error
    train_perf['avg_training_error'] = mean(training_error)

    train_perf['emo_clusters'] = self.emo_clusters

    # Start the testing set
    if verbose: print "Testing:"
    error_count = 0

    for entry in test_faces:
      (expectd_emo, img) = entry

      determined_emo = self.classify(img, verbose=False)
      if (expectd_emo != determined_emo):
        error_count += 1

        if verbose: print "{>_<} Expected %s, got %s" % \
            (Emotion.to_s[determined_emo], Emotion.to_s[expectd_emo])
      else:
        if verbose: print "{^-^} Classified a %s face correctly." % \
            Emotion.to_s[determined_emo]

    train_perf['avg_testing_error'] = (1.0*error_count / len(test_faces))

    if verbose: print train_perf
    return train_perf
Пример #60
0
def main():
    """
    It imports all necessary python modules required for landscape simulation.
    It then performs the following:
    1. Gets all the parameters required for simulation from parameter.yaml file. 
    2. calls DEM_creator() --> for generating DEM grid
    3. Iteratively operate on DEM grid and do the following
       3.1 Remove single cell pits by calling Single_Cell_PitRemove()
       3.2 Get flow dirn using 9x9 window by calling Get_Flow_Dirn_using_9x9_window()  
       3.3 Get flow dirn using 3x3 window by calling Flow_Dirn_3x3() for catchment extraction
       3.4 Extract the catchment and do depression filling using CatchmentExtraction()
       3.5 Again get flow direction using Get_Flow_Dirn_using_9x9_window() after depression filling
       3.6 Perform flow accumulation by calling Flow_accumulation() 
       3.7 Do the erosion by calling Erosion()
    4. Generate a Decision tree for land_cover allocation by calling DecisionTree()
    5. Assign the vegetation class to DEM by calling VegetationClassify()
    6. Generate some agricultural field by calling GeometricFeature()
    """
    time1 = time.time()
    # Get the parameters for parameter.yaml file
    yaml_file = open('Parameters/parameters.yaml', 'r')
    stream = yaml.load(yaml_file)
    counter = stream['counter']
    H = stream['H']
    H_wt =  stream['H_wt']
    seed = stream['seed']
    elev_range = stream['elev_range']
    river_drop = stream['river_drop']
    max_level = stream['max_level']
    DEMcreator_option = stream['DEMcreator_option']
    north = stream['north']
    north_west = stream['north_west']
    west = stream['west']
    south_west = stream['south_west']
    south = stream['south']
    south_east = stream['south_east']
    east = stream['east']
    north_east = stream['north_east']
    center = stream['center']
    Three_DplotDEM = stream["Three_DplotDEM"]
    output_dir = stream['output_dir']
    response = stream['response']
    elev_filename = stream["training_data_elev"]
    landcover_filename = stream["training_data_landcover"]
    river_filename = stream["training_data_river"]
    min_area = stream['min_area']
    max_area = stream['max_area']
    aspect_ratio = stream['aspect_ratio']
    agri_area_limit = stream['agri_area_limit']
    next_patch_orientation_probability = stream['next_patch_orientation_probability']
    gradient_values = [north, north_west, west, south_west, south, south_east, east, north_east, center]
    yaml_file.close() #close the yaml parameter file
    print ("Running simulation with follwing parameters")
    print ("Counter %d" % counter)
    print ("H %s" % H)
    print ("H_wt %s" % H_wt)
    print ("seed %s" % seed)
    print ("elev_range %s" % elev_range)
    print ("river_drop %s" % river_drop)
    print ("max_level %s" % max_level)
    print ("DEMcreator_option %s" % DEMcreator_option)
    print ("Gradient values %s" % gradient_values)
    print ("output_dir %s" % output_dir)
    print ("response %s" % response)
    print ("min_area %d" % min_area)
    print ("max_area %d" % max_area)
    print ("aspect_ratio %s" % aspect_ratio)
    print ("agri_area_limit %s" % agri_area_limit)
 
    #Generate DEM using FM2D/SS algorithm by calling DEM_creator(args...) function")
    DEM_Result = Hydro_Network.DEM_creator(H, H_wt, seed, elev_range, max_level, gradient_values, DEMcreator_option)
    #Write result to Output file
    file_name = "%s/Original_DEM" % (output_dir)
    pylab.imsave(file_name, DEM_Result[0])
    for i in range(0,len(DEM_Result[1])):
        file_name = "%s/%s" % (output_dir,DEM_Result[2][i])#TODO(include parameter in filename)DEM_Result[3][i][0],DEM_Result[3][i][1])
        pylab.imsave(file_name, DEM_Result[1][i])

    DEM = DEM_Result[0]
    for iteration in range(0,counter):
        #Remove sink using 3x3 window by calling Single_Cell_PitRemove(originalDEM, no_of_itr)
        DEM = Hydro_Network.Single_Cell_PitRemove(DEM, no_of_itr = 6)
        (x_len,y_len) = DEM.shape
        max_posn = ndimage.maximum_position(DEM)
        Flow_dirn_arr = numpy.zeros((x_len,y_len,2), dtype="int" )
        #Flow_arr will be used for the purpose of catchment extraction
        Flow_arr = numpy.zeros((x_len, y_len), dtype = "uint8")
        River_arr = numpy.ones((x_len, y_len), dtype = "int")
        pit_list = [] #Not required now
        ( pit_list, Flow_dirn_arr, DEM ) = Hydro_Network.Get_Flow_Dirn_using_9x9_window(DEM, Flow_dirn_arr, pit_list)
        # call Flow_Dirn_3x3(DEM, Flow_arr , pit_list) for the purpose of catchment extraction
        pit_list = [] #Required for catchment extraction
        ( pit_list, Flow_arr ) = Hydro_Network.Flow_Dirn_3x3(DEM, Flow_arr , pit_list) 
        #Catchment extraction, calling CatchmentExtraction(pit_list, DEM_arr, max_posn)
        (DEM, Found_arr, Catchment_boundary_arr) = Hydro_Network.CatchmentExtraction(pit_list, DEM, Flow_arr, max_posn)
        #Write result to Output file
        file_name = "%s/Catchment%s" % (output_dir, iteration+1)
        pylab.imsave(file_name, Found_arr)
        file_name = "%s/Catchment_Boundary%s" % (output_dir, iteration+1)
        pylab.imsave(file_name, Catchment_boundary_arr)        
        #Assignnig flow dirnection again after catchment extraction and Depression filling
        ( pit_list, Flow_dirn_arr, DEM ) = Hydro_Network.Get_Flow_Dirn_using_9x9_window(DEM, Flow_dirn_arr , pit_list)
        #Calculate flow accumulation by Calling Flow_accumulation(Flow_dirn_arr ,River_arr , DEM)
        River_arr = Hydro_Network.Flow_accumulation(Flow_dirn_arr ,River_arr, DEM)
        #Write result to Output file
        file_name = "%s/River%s" % (output_dir,iteration+1)
        pylab.imsave(file_name, River_arr)
        #"Eroding the DEM based on Distance form River ...Calling Erosion(River_arr,DEM_arr,river_drop)
        (DEM, Distance_arr) = Hydro_Network.Erosion(River_arr, DEM, river_drop)  
        #Write result to Output file
        file_name = "%s/ErodedDEM%s" % (output_dir, iteration+1)
        pylab.imsave(file_name, DEM)
        file_name = "%s/RiverDistance%s" % (output_dir, iteration+1)
        pylab.imsave(file_name, Distance_arr)
    
    if Three_DplotDEM == 'y' or Three_DplotDEM == 'Y':
        surface_plot.plot(DEM)
    time2 = time.time()
    print ("Time taken in Erosion modeling", time2 - time1,"seconds")

    if (response == 'y') or (response == 'Y'):
        DecisionTree.DecisionTree(output_dir, elev_filename, landcover_filename, river_filename)
        time3 = time.time()
        print "Time taken to generate decision tree is " , (time3 - time2) ,"seconds"

    time3 = time.time()
    Veg_arr = VegetationClassify.VegetationClassify(DEM, River_arr)
    file_name = "%s/Landcover" % (output_dir)
    pylab.imsave(file_name, Veg_arr)
    time4 = time.time()
    print "Time taken to assign landcover is " , (time4 - time3),"seconds"
    (agri, labelled_fields) =Geometry.GeometricFeature(Veg_arr,Distance_arr, min_area, max_area, aspect_ratio, agri_area_limit, next_patch_orientation_probability)
    file_name = "%s/labelled_fields_display" % (output_dir)
    pylab.imsave(file_name, labelled_fields)
    file_name = "%s/Agriculture" % (output_dir)
    pylab.imsave(file_name, agri)
    time5 = time.time()
    print "Time taken to generate Geometric Features is " ,(time5 - time4) ,"seconds"