Ejemplo n.º 1
0
def major_axis(x, y, ndist=10, mask=1):
    dist = scipy.array([scipy.hypot(x-i,y-j) \
                        for i, j in itertools.izip(x, y)])
    shape = dist.shape
    imax = scipy.zeros(ndist, dtype=int)
    jmax = scipy.zeros(ndist, dtype=int)
    # dummy run
    for i in xrange((mask - 1) * ndist):
        ii, jj = scipy.unravel_index(scipy.argmax(dist), shape)
        dist[ii] = scipy.zeros(shape[0])
        dist[jj] = scipy.zeros(shape[0])
        dist[:, ii] = scipy.zeros(shape[1])
        dist[:, jj] = scipy.zeros(shape[1])
    for i in xrange(ndist):
        imax[i], jmax[i] = scipy.unravel_index(scipy.argmax(dist), shape)
        dist[imax[i]] = scipy.zeros(shape[0])
        dist[jmax[i]] = scipy.zeros(shape[0])
        dist[:, imax[i]] = scipy.zeros(shape[1])
        dist[:, jmax[i]] = scipy.zeros(shape[1])
    #print imax, jmax
    slopes = [(y[i]-y[j])/(x[i]-x[j]) \
              for i, j in itertools.izip(imax, jmax)]
    zeros = [y[i] - m * x[i] for i, m in itertools.izip(imax, slopes)]
    m = scipy.median(slopes)
    n = scipy.median(zeros)
    return imax, jmax, m, n
def patch_holes(data_map):
    r"""
    Fills in any areas with a non finite value by taking a linear average of
    the nearest non-zero values along each axis
    """
    #
    # getting coordinates of all valid data points
    data_vector = sp.ravel(data_map)
    inds = sp.where(sp.isfinite(data_vector))[0]
    points = sp.unravel_index(inds, data_map.shape)
    values = data_vector[inds]
    #
    # linearly interpolating data to fill gaps
    xi = sp.where(~sp.isfinite(data_vector))[0]
    msg = '\tattempting to fill %d values with a linear interpolation'
    logger.debug(msg, xi.size)
    xi = sp.unravel_index(xi, data_map.shape)
    intrp = griddata(points, values, xi, fill_value=sp.nan, method='linear')
    data_map[xi[0], xi[1]] = intrp
    #
    # performing a nearest interpolation any remaining regions
    data_vector = sp.ravel(data_map)
    xi = sp.where(~sp.isfinite(data_vector))[0]
    msg = '\tfilling %d remaining values with a nearest interpolation'
    logger.debug(msg, xi.size)
    xi = sp.unravel_index(xi, data_map.shape)
    intrp = griddata(points, values, xi, fill_value=0, method='nearest')
    data_map[xi[0], xi[1]] = intrp
    #
    return data_map
def generate_index_map(nonzero_locs, shape):
    r"""
    Determines the i,j,k indicies of the flattened array
    """
    #
    logger.info('creating index map of non-zero values...')
    x_c = sp.unravel_index(nonzero_locs, shape)[0].astype(sp.int16)
    y_c = sp.unravel_index(nonzero_locs, shape)[1].astype(sp.int16)
    z_c = sp.unravel_index(nonzero_locs, shape)[2].astype(sp.int16)
    index_map = sp.stack((x_c, y_c, z_c), axis=1)
    #
    return index_map
def generate_index_map(nonzero_locs, shape):
    r"""
    Determines the i,j,k indicies of the flattened array
    """
    #
    logger.info('creating index map of non-zero values...')
    x_c = sp.unravel_index(nonzero_locs, shape)[0].astype(sp.int16)
    y_c = sp.unravel_index(nonzero_locs, shape)[1].astype(sp.int16)
    z_c = sp.unravel_index(nonzero_locs, shape)[2].astype(sp.int16)
    index_map = sp.stack((x_c, y_c, z_c), axis=1)
    #
    return index_map
Ejemplo n.º 5
0
    def _getIntelligentInitialValues(self):
        xs, ys, zs = self._get_subSpaceArrays(
        )  #returns the full arrays if subspace not used
        logger.debug("attempting to set initial values intellgently")
        if xs is None or ys is None or zs is None:
            logger.debug("couldn't find all necessary data")
            return False
        AParab = scipy.amax(zs)
        B = scipy.average(zs[0:len(ys) / 10.0, 0:len(xs) / 10.0])
        y0Index, x0Index = scipy.unravel_index(zs.argmax(), zs.shape)
        logger.debug("index of max z is %s, %s " % (y0Index, x0Index))
        x0 = xs[x0Index]
        y0 = ys[y0Index]
        #WHEN WE IMPLEMENT ONLY FITTING A SUBSET THIS WILL HAVE TO CHANGE A BIT
        x0HalfIndex = (scipy.absolute(zs[y0Index] - AParab / 2.0)).argmin()
        y0HalfIndex = (scipy.absolute(zs[:, x0Index] - AParab / 2.0)).argmin()
        logger.debug("index of half max z is %s, %s " %
                     (y0HalfIndex, x0HalfIndex))
        x0Half = xs[x0HalfIndex]
        y0Half = ys[y0HalfIndex]
        deltaXHalf = abs(x0 - x0Half)
        deltaYHalf = abs(y0 - y0Half)
        wParabX = 1.644 * deltaXHalf
        wParabY = 1.644 * deltaYHalf

        p0 = [x0, y0, AParab, wParabX, wParabY, B]
        logger.debug("initial values guess = %s" % p0)
        return p0
def save_image_stack(nonzero_locs, img_dims, path, overwrite=False):
    r"""
    Saves a text image stack in a directory to be read by ImageJ
    """
    #
    logger.info('saving image data as .bmp stack...')
    #
    img_data = 255*sp.ones(img_dims, dtype=sp.uint8)
    x_coords, y_coords, z_coords = sp.unravel_index(nonzero_locs, img_dims)
    img_data[x_coords, y_coords, z_coords] = 0
    #
    # creating any needed directories
    try:
        os.makedirs(path)
    except FileExistsError:
        if not overwrite:
            msg = 'Image Stack destination already exists, '
            msg += ' use "-f" option to overwrite'
            raise FileExistsError(msg)
        else:
            files = glob(os.path.join(path, '*'))
            for f in files:
                os.remove(f)
    #
    # saving the image frames
    for frame in range(img_data.shape[2]):
        name = os.path.join(path, 'image-frame-{}.bmp'.format(frame))
        frame = Image.fromarray(img_data[:, :, frame].transpose())
        frame.save(name)
Ejemplo n.º 7
0
 def testKernelCoeffs(self):
     for scale in [0.35, 0.5, 0.75, 1]:
         for dim in [0,1,2,3,4]:
             dgK = mango.image.discrete_gaussian_kernel(sigma=scale, dim=dim, errtol=0.001)
             self.assertAlmostEqual(1.0, sp.sum(dgK), 8)
             mxElem = sp.argmax(dgK)
             self.assertTrue(sp.all(sp.array(dgK.shape)//2 == sp.unravel_index(mxElem, dgK.shape)))
Ejemplo n.º 8
0
def get_alignment(str_1, str_2, matrix, remove_path=False):

    max_row, max_column = scipy.unravel_index(matrix.argmax(), matrix.shape)
    path = _get_back_path(matrix, max_row, max_column)

    aligment_1 = ''
    aligment_2 = ''

    for point_index in range(1, len(path)):

        row_0, column_0 = path[point_index]
        row_1, column_1 = path[point_index - 1]

        if row_0 != row_1 and column_0 == column_1:
            aligment_1 += '-'
            aligment_2 += str_2[row_0]
        elif row_0 == row_1 and column_0 != column_1:
            aligment_1 += str_1[column_0]
            aligment_2 += '-'
        else:
            aligment_1 += str_1[column_0]
            aligment_2 += str_2[row_0]

    if remove_path:

        x0, y0 = path[0]
        x1, y1 = path[-2]

        for i in range(x1, x0 + 1):
            matrix[i, y1:y0 + 1] = 0

        matrix.eliminate_zeros()

    return aligment_1[::-1], aligment_2[::-1], path[0], len(path)
Ejemplo n.º 9
0
 def _getIntelligentInitialValues(self):
     
     xs,ys,zs = self._get_subSpaceArrays()#returns the full arrays if subspace not used
     logger.debug("attempting to set initial values intellgently")
     if xs is None or ys is None or zs is None:
         logger.debug("couldn't find all necessary data")
         return False
     A0 = scipy.amax(zs)
     B0 = scipy.average(zs[0:len(ys)//10,0:len(xs)//10])
     y0Index, x0Index = scipy.unravel_index(zs.argmax(), zs.shape)
     logger.debug("index of max z is %s, %s " % (y0Index, x0Index))
     x0 = xs[x0Index]
     y0 = ys[y0Index]
     #WHEN WE IMPLEMENT ONLY FITTING A SUBSET THIS WILL HAVE TO CHANGE A BIT  
     x0HalfIndex = (scipy.absolute(zs[y0Index]-A0/2.0)).argmin()
     y0HalfIndex = (scipy.absolute(zs[:,x0Index]-A0/2.0)).argmin()
     logger.debug("index of half max z is %s, %s " % (y0HalfIndex, x0HalfIndex))
     x0Half = xs[x0HalfIndex]
     y0Half = ys[y0HalfIndex]
     FWHMX0 = 2.0*abs(x0-x0Half)
     FWHMY0 = 2.0*abs(y0-y0Half)
     sigmaX0 = FWHMX0/2.355
     sigmaY0 = FWHMY0/2.355
     logger.debug("x0,y0 %s, %s " % (x0, y0))
     return[A0,x0,sigmaX0, y0,sigmaY0,B0]
def process_image(img_data, num_clusters, **kwargs):
    r"""
    Processes a tiff stack on retaining voxels based on node connectivity.
    The clusters are sorted by size and the large N are retained.
    """
    #
    img_dims = img_data.shape
    nonzero_locs = img_data.get_fracture_voxels()
    index_map = generate_index_map(nonzero_locs, img_dims)
    #
    # determing connectivity and removing clusters
    conns = generate_node_connectivity_array(index_map, img_data)
    del img_data, index_map
    nonzero_locs = remove_isolated_clusters(conns, nonzero_locs, num_clusters,
                                            **kwargs)
    # reconstructing 3-D array
    logger.info('reconstructing processed data back into 3-D array')
    #
    img_data = sp.zeros(img_dims, dtype=bool)
    x_coords, y_coords, z_coords = sp.unravel_index(nonzero_locs, img_dims)
    #
    del nonzero_locs
    img_data[x_coords, y_coords, z_coords] = True
    del x_coords, y_coords, z_coords
    #
    return img_data.view(FractureImageStack)
Ejemplo n.º 11
0
def fitCorrGaussian(corr, center_region):
    frame_dim_x, frame_dim_y = corr.shape

    center_guess = frame_dim_x / 2.0
    height_guess = corr.max()
    noise_guess = corr[1:frame_dim_x / 10, 1:frame_dim_y / 10].mean()

    guess_params = [
        noise_guess, height_guess - noise_guess, center_region, center_region,
        5., 5.
    ]  #, 0.]

    max_height = height_guess * 1.1
    max_width = center_region * 1.1
    max_params = [0, max_height, 0, 0, max_width, max_width]  #,360]
    use_max = [False, True, False, False, True, True]  #, True]
    corr_region = corr[center_guess - center_region:center_guess +
                       center_region, center_guess -
                       center_region:center_guess + center_region]
    guess_params[2:4] = scipy.unravel_index(corr_region.argmax(),
                                            corr_region.shape)
    fits = gaussfitter.gaussfit(corr_region,
                                params=guess_params,
                                maxpars=max_params,
                                limitedmax=use_max,
                                rotate=0)
    central_val = corr[center_guess - center_region + fits[2] -
                       1:center_guess - center_region + fits[2] + 2,
                       center_guess - center_region + fits[3] -
                       1:center_guess - center_region + fits[3] + 2].mean()

    return fits, central_val
Ejemplo n.º 12
0
    def _getIntelligentInitialValues(self):
        xs, ys, zs = self._get_subSpaceArrays(
        )  #returns the full arrays if subspace not used
        logger.debug("attempting to set initial values intellgently")
        if xs is None or ys is None or zs is None:
            logger.debug("couldn't find all necessary data")
            return False
        A = scipy.amax(zs)
        B = scipy.average(zs[0:len(ys) // 10, 0:len(xs) // 10])
        y0Index, x0Index = scipy.unravel_index(zs.argmax(), zs.shape)
        logger.debug("index of max z is %s, %s " % (y0Index, x0Index))
        x0 = xs[x0Index]
        y0 = ys[y0Index]
        #WHEN WE IMPLEMENT ONLY FITTING A SUBSET THIS WILL HAVE TO CHANGE A BIT
        x0HalfIndex = (scipy.absolute(zs[y0Index] - A / 2.0)).argmin()
        y0HalfIndex = (scipy.absolute(zs[:, x0Index] - A / 2.0)).argmin()
        logger.debug("index of half max z is %s, %s " %
                     (y0HalfIndex, x0HalfIndex))
        x0Half = xs[x0HalfIndex]
        y0Half = ys[y0HalfIndex]
        FWHMX0 = 2.0 * abs(x0 - x0Half)
        FWHMY0 = 2.0 * abs(y0 - y0Half)

        #make gaussian wings larger for thermal part (*4)
        sigmax = 4 * FWHMX0 / 2.355
        sigmay = 4 * FWHMY0 / 2.355
        wParabX = FWHMX0 / 2.0
        wParabY = FWHMY0 / 2.0
        AGauss = 0.1 * A
        AParab = 0.9 * A

        p0 = [x0, y0, AGauss, sigmax, sigmay, AParab, wParabX, wParabY, B]
        logger.debug("initial values guess = %s" % p0)
        return p0
Ejemplo n.º 13
0
def move_discharge(ji, nwet):
    sj, ej = max(ji[0] - 2, 0), min(ji[0] + 3, sh[0])
    si, ei = max(ji[1] - 2, 0), min(ji[1] + 3, sh[1])
    tmp = nwet[sj:ej, si:ei]
    if tmp.max() >= 6.0:
        j, i = sp.unravel_index(tmp.argmax(), tmp.shape)
        ji[0], ji[1] = sj + j, si + i
    return ji
Ejemplo n.º 14
0
    def __init__(self, template, spacing=[1, 1, 1], **kwargs):

        template = sp.atleast_3d(template)
        super().__init__(shape=template.shape, **kwargs)

        coords = sp.unravel_index(range(template.size), template.shape)
        self['pore.template_coords'] = sp.vstack(coords).T
        self['pore.template_indices'] = self.Ps
        self['pore.drop'] = template.flatten() == 0
        topotools.trim(network=self, pores=self.pores('drop'))
        del self['pore.drop']
Ejemplo n.º 15
0
def contour_levels(cluster, x, y, kde):
    '''This is the central function. It generates the countour plots around the
    cluster members. The points of these contour levels are used to trace the
    ZAMS for each cluster.
    '''

    fine_tune = False
    if cluster in fine_tune_list[0]:
        fine_tune = True
        indx = fine_tune_list[0].index(cluster)
        manual_levels = np.arange(fine_tune_list[1][indx][0],
                                  fine_tune_list[1][indx][1],\
                                  fine_tune_list[1][indx][2])
        x_min, x_max = fine_tune_list[2][indx][0], fine_tune_list[2][indx][1]
        y_min, y_max = fine_tune_list[2][indx][2], fine_tune_list[2][indx][3]
        lev_min, lev_num = fine_tune_list[3][indx]
    else:
        x_min, x_max = -10., 10.
        y_min, y_max = -10., 10.
        lev_min, lev_num = 0.1, 1

    # This list will hold the points obtained through the contour curves,
    # the first sublist are the x coordinates of the points and the second
    # the y coordinates.
    sequence = [[], []]

    # Store contour levels.
    if fine_tune == True:
        CS = plt.contour(x, y, kde, manual_levels)
    else:
        CS = plt.contour(x, y, kde)
    plt.clabel(CS, fontsize=9, inline=1)
    # Store level values for contour levels.
    levels = CS.levels
    #    print levels
    for i, clc in enumerate(CS.collections):
        for j, pth in enumerate(clc.get_paths()):
            cts = pth.vertices
            d = sp.spatial.distance.cdist(cts, cts)
            x_c, y_c = cts[list(sp.unravel_index(sp.argmax(d), d.shape))].T
            # Only store points that belong to contour PDF values larger
            # tnah lev_min and that belong to the uper curves, ie: do not
            # use those with index <= lev_num.
            if levels[i] > lev_min and i > lev_num:
                # Only store points within these limits.
                if x_min <= x_c[0] <= x_max and y_min <= y_c[0] <= y_max:
                    sequence[0].append(round(x_c[0], 4))
                    sequence[1].append(round(y_c[0], 4))
                if x_min <= x_c[1] <= x_max and y_min <= y_c[1] <= y_max:
                    sequence[0].append(round(x_c[1], 4))
                    sequence[1].append(round(y_c[1], 4))

    return sequence
Ejemplo n.º 16
0
 def testKernelCoeffs(self):
     for scale in [0.35, 0.5, 0.75, 1]:
         for dim in [0, 1, 2, 3, 4]:
             dgK = mango.image.discrete_gaussian_kernel(sigma=scale,
                                                        dim=dim,
                                                        errtol=0.001)
             self.assertAlmostEqual(1.0, sp.sum(dgK), 8)
             mxElem = sp.argmax(dgK)
             self.assertTrue(
                 sp.all(
                     sp.array(dgK.shape) //
                     2 == sp.unravel_index(mxElem, dgK.shape)))
Ejemplo n.º 17
0
 def _generate_pores(self):
     r"""
     Generate the pores (coordinates, numbering and types)
     """
     Nx = self._Nx
     Ny = self._Ny
     Nz = self._Nz
     Lc = self._Lc
     Np = Nx*Ny*Nz
     ind = sp.arange(0,Np)
     self['pore.all'] = sp.ones_like(ind,dtype=bool)
     pore_coords = Lc/2+Lc*sp.array(sp.unravel_index(ind, dims=(Nx, Ny, Nz), order='F'),dtype=sp.float64).T
     self['pore.coords'] = pore_coords
Ejemplo n.º 18
0
 def _generate_pores(self):
     r"""
     Generate the pores (coordinates, numbering and types)
     """
     Nx = self._Nx
     Ny = self._Ny
     Nz = self._Nz
     Lc = self._Lc
     Np = Nx*Ny*Nz
     ind = sp.arange(0,Np)
     self.set_pore_data(prop='numbering',data=ind)
     self.set_pore_info(label='all',locations=sp.ones_like(ind))
     pore_coords = Lc/2+Lc*sp.array(sp.unravel_index(ind, dims=(Nx, Ny, Nz), order='F'),dtype=sp.float64).T
     self.set_pore_data(prop='coords',data=pore_coords)
Ejemplo n.º 19
0
 def _generate_pores(self):
     r"""
     Generate the pores (coordinates, numbering and types)
     """
     Nx = self._Nx
     Ny = self._Ny
     Nz = self._Nz
     Lc = self._Lc
     Np = Nx*Ny*Nz
     ind = sp.arange(0, Np)
     self['pore.all'] = sp.ones_like(ind, dtype=bool)
     unraveled_index = sp.unravel_index(ind, dims=(Nx, Ny, Nz), order='F')
     pore_coords = Lc/2+Lc*sp.array(unraveled_index, dtype=sp.float64).T
     self['pore.coords'] = pore_coords
def patch_holes(data_map):
    r"""
    Fills in any areas with a value of -1 by taking a linear average of
    the nearest non-zero values along each axis
    """
    #
    logger.info('interpolating missing data due to zero aperture zones')
    #
    # getting coordinates of all valid data points
    data_vector = sp.ravel(data_map)
    inds = sp.where(data_vector >= 0)[0]
    points = sp.unravel_index(inds, data_map.shape)
    values = data_vector[inds]
    #
    # setting up to interpolate over all data coordinates
    intrp = sp.arange(data_map.size, dtype=int)
    intrp = sp.unravel_index(intrp, data_map.shape)
    #
    # interpolating data to fill gaps and creating the new data map
    data_vector = griddata(points, values, intrp, method='nearest')
    data_map = sp.reshape(data_vector, data_map.shape)
    #
    return data_map
Ejemplo n.º 21
0
def get_points_of_interest(arr):
    """
    Expects a binary object in the array.
    FInds the two contour points that are the farthest apart, then determines which of
    them is the base point of the RV and returns this first and the other as second
    return value.
    """
    #########
    # 1: Find points in objects contour with the largest distance between them.
    #########
    # extract only outer contour
    arr = arr - binary_erosion(arr)

    # extract all positions of the objects contour
    points = scipy.asarray(arr.nonzero()).T

    # compute pairwise distances
    distances = squareform(pdist(points, 'euclidean'))

    # get positon of largest distance
    position = scipy.unravel_index(scipy.argmax(distances),
                                   (len(points), len(points)))

    # recompute between which points the largest distance was found
    first = points[position[0]]
    second = points[position[1]]

    #logger.debug('Longest distance found between {} and {}.'.format(first, second))

    #########
    # 2: Determine which of these is the base point
    #########
    # go along perpendicular lines, find intersections with contours and keep longest only
    intersection = False
    longest_length = 0
    longest_line = line_from_points(first, second)
    segment_points = split_line_to_sections(5, first, second)
    for sp in segment_points:
        sline = perpendicular_line(longest_line, sp)
        nearest = find_nearest(sline, points, 10)
        if distance(nearest[0], nearest[1]) > longest_length:
            longest_length = distance(nearest[0], nearest[1])
            intersection = sp

    # determine which of the first two points are nearest to the longest line and return them
    if distance(intersection, first) < distance(intersection, second):
        return (first, second)
    else:
        return (second, first)
Ejemplo n.º 22
0
def canonical_permutation( x ):
    """Rearrange the rows and columns of a matrix so that the maximum
    element is at (0,0), the elements of the first row are in sorted
    order and the first columns is in sorted order of the rows.
    Returns the permutation required to make it"""

    # Find the smallest element.
    idx = x.argmin()
    r, c = sc.unravel_index( idx, x.shape )
    # Apply the sorts that bring x to the top
    colp = x[r, :].argsort()
    rowp = x[:, c].argsort()
    perm = (rowp, colp)

    return perm
Ejemplo n.º 23
0
 def compare(self, chunk, tiles):
     assert (chunk.shape[0] == self.compareChunkSize)
     
     chunk = scipy.int_(chunk)
     S = chunk.shape[0]
     # distance will contain the distance for each tile, for each position
     distances = scipy.zeros((self.shiftDim[0], self.shiftDim[1], tiles.shape[0]))
     for i in range(self.shiftDim[0]):
         for j in range(self.shiftDim[1]):
             distances[i,j,:] = self.distance(chunk, tiles[:,i:i+S,j:j+S,:])
     combinedIndex = scipy.unravel_index(scipy.argmin(distances), distances.shape)
     idx  = combinedIndex[-1]
     pos  = self.translatePos(combinedIndex[:-1])
     dist = distances[combinedIndex]
     return (idx, pos, dist)
Ejemplo n.º 24
0
 def get_fracture_voxels(self, coordinates=False):
     r"""
     Returns a vector or vectors containing all fracture voxels in
     the image stack.
     Parameters:
         coordinates (boolean) - If False then a single vector is returned
         with flattened indicies. If True then three vectors are returned
         which are the X, Y and Z coordinates of each voxel.
     """
     nonzero_locs = sp.where(sp.ravel(self))[0].astype(self.index_int_type)
     logger.debug('{} non-zero voxels in image'.format(nonzero_locs.size))
     if coordinates:
         return sp.unravel_index(nonzero_locs, self.shape)
     else:
         return nonzero_locs
def Locus(image_data):
    """Finds the index of the brightest galaxy once filters are applied to 
    input data at its stage of filtration
    
    INPUTS
    ------
    data = 2D array containing the number of counts in each pixel
    
    OUTPUTS
    -------
    [0]: data[index] = number of counts in the brightest point in the image
    [1]: index = index of the brightest point in the image"""

    index = sp.unravel_index(sp.argmax(image_data), image_data.shape)
    return [data[index], index]
def get_points_of_interest(arr):
    """
    Expects a binary object in the array.
    FInds the two contour points that are the farthest apart, then determines which of
    them is the base point of the RV and returns this first and the other as second
    return value.
    """
    #########
    # 1: Find points in objects contour with the largest distance between them.
    #########
    # extract only outer contour
    arr = arr - binary_erosion(arr)
    
    # extract all positions of the objects contour
    points = scipy.asarray(arr.nonzero()).T
    
    # compute pairwise distances
    distances = squareform(pdist(points, 'euclidean'))
    
    # get positon of largest distance
    position = scipy.unravel_index(scipy.argmax(distances), (len(points), len(points)))
    
    # recompute between which points the largest distance was found
    first = points[position[0]]
    second = points[position[1]]
    
    #logger.debug('Longest distance found between {} and {}.'.format(first, second))
    
    #########
    # 2: Determine which of these is the base point
    #########
    # go along perpendicular lines, find intersections with contours and keep longest only
    intersection = False
    longest_length = 0
    longest_line = line_from_points(first, second)
    segment_points = split_line_to_sections(5, first, second)
    for sp in segment_points:
        sline = perpendicular_line(longest_line, sp)
        nearest = find_nearest(sline, points, 10)
        if distance(nearest[0], nearest[1]) > longest_length:
            longest_length = distance(nearest[0], nearest[1])
            intersection = sp
    
    # determine which of the first two points are nearest to the longest line and return them
    if distance(intersection, first) < distance(intersection, second):
        return (first, second)
    else:
        return (second, first)
Ejemplo n.º 27
0
def makefitsurfv2(xarrs,y_acf,sensdict,simparams,yerr=None):


    youtsize = [len(x) for x in xarrs]
    ytprod = 1
    for xl in youtsize:
        ytprod = ytprod*xl

    yout = sp.zeros(youtsize,dtype=sp.float128)

    for iparam in range(ytprod):
        curind = sp.unravel_index(iparam,youtsize)
        curx = xarrs[curind[0]][curind[1]]

        yout[curind[:]] = sp.power(sp.absolute(ISRSfitfunction(curx,y_acf,sensdict,simparams,yerr)),2).sum()
    return(yout)
Ejemplo n.º 28
0
def makefitsurfv2(xarrs,y_acf,sensdict,simparams,yerr=None):


    youtsize = [len(x) for x in xarrs]
    ytprod = 1
    for xl in youtsize:
        ytprod = ytprod*xl

    yout = sp.zeros(youtsize,dtype=sp.float128)

    for iparam in range(ytprod):
        curind = sp.unravel_index(iparam,youtsize)
        curx = xarrs[curind[0]][curind[1]]

        yout[curind[:]] = sp.power(sp.absolute(ISRSfitfunction(curx,y_acf,sensdict,simparams,yerr)),2).sum()
    return(yout)
Ejemplo n.º 29
0
def cov_from_segments(gene, seg_counts, edge_counts, edge_idx, ax, sample_idx=None,
                      log=False, cmap_seg=None, cmap_edg=None, xlim=None, grid=False,
                      order='C'):
    """This function takes a gene and its corresponding segment and edge counts to
    produce a coverage overview plot."""

    if sample_idx is None:
        sample_idx = sp.arange(seg_counts.shape[1])

    norm = plt.Normalize(0, sample_idx.shape[0])

    if cmap_seg is None:
        cmap_seg = plt.get_cmap('jet')
    if cmap_edg is None:
        cmap_edg = plt.get_cmap('jet')

    ### iterate over samples
    for ii,i in enumerate(sample_idx):
        ### collect count information and add segment patches
        for j in range(gene.segmentgraph.segments.shape[1]):
            s = gene.segmentgraph.segments[:, j]
            if log:
                counts = sp.log10(seg_counts[j, i] + 1)
            else:
                counts = seg_counts[j, i]
            #ax.add_patch(patches.Rectangle((s[0], 0), s[1] - s[0], counts, fill=cmap_seg(norm(ii)),
            #             edgecolor='none', alpha=0.5))
            ax.plot(s, [counts, counts], '-', color=cmap_seg(norm(ii)), linewidth=2)

        for j in range(edge_idx.shape[0]):
            [s, t] = sp.unravel_index(edge_idx[j], gene.segmentgraph.seg_edges.shape, order=order) 
            if log:
                counts = sp.log10(edge_counts[j, i] + 1)
            else:
                counts = edge_counts[j, i]
            add_intron_patch2(ax, gene.segmentgraph.segments[1, s], gene.segmentgraph.segments[0, t], counts, color=cmap_edg(norm(ii)))

    if xlim is not None:
        ax.set_xlim(xlim)

    ### draw grid
    if grid:
        ax.grid(b=True, which='major', linestyle='--', linewidth=0.2, color='#222222')
        ax.xaxis.grid(False)

    ax.set_ylim([0, ax.get_ylim()[1]])
def generate_offset_map(nonzero_locs, shape):
    r"""
    Creates a map storing the index of the lowest y-axis pixel in an
    X-Z column.
    """
    #
    logger.info('creating initial offset map')
    #
    x_coords, y_coords, z_coords = sp.unravel_index(nonzero_locs, shape)
    data = sp.ones(shape, dtype=sp.uint16)*sp.iinfo(sp.int16).max
    data[x_coords, y_coords, z_coords] = y_coords
    #
    offset_map = sp.zeros((shape[0], shape[2]), dtype=sp.int16)
    for z_index in range(shape[2]):
        offset_map[:, z_index] = sp.amin(data[:, :, z_index], axis=1)
        offset_map[:, z_index][offset_map[:, z_index] > shape[1]] = -1
    #
    return offset_map
Ejemplo n.º 31
0
 def compare(self, chunk, tiles):
     chunk = self.normalize(chunk)
     for i in range(chunk.shape[2]):
         chunk[:,:,i] = chunk[:,:,i] - scipy.mean(chunk[:,:,i])
         chunk[:,:,i] = chunk[:,:,i] / scipy.amax(abs(chunk[:,:,i]))
     maxCorr = (-1, 0, 0)
     for ID, tile in enumerate(tiles):
         tile = self.normalize(tile)
         corr = scipy.zeros(self.shiftDim)
         colorComps = tile.shape[2] # usually 3 RGB color components
         for i in range(colorComps):
             corr = corr + signal.correlate(tile[:,:,i],chunk[:,:,i],
                                            mode='valid')
         corr = corr / colorComps
         max_idx = scipy.unravel_index(scipy.argmax(corr), self.shiftDim)
         if (corr[max_idx] > maxCorr[2]):
             #print corr[max_idx]
             maxCorr = (ID, self.translatePos(max_idx), corr[max_idx])
     return maxCorr
Ejemplo n.º 32
0
    def __init__(self, template, spacing=[1, 1, 1], **kwargs):

        template = sp.atleast_3d(template)
        if 'shape' in kwargs:
            del kwargs['shape']
            logger.warning('shape argument ignored, inferred from template')
        super().__init__(shape=template.shape, spacing=spacing, **kwargs)

        coords = sp.unravel_index(range(template.size), template.shape)
        self['pore.template_coords'] = sp.vstack(coords).T
        self['pore.template_indices'] = self.Ps
        self['pore.drop'] = template.flatten() == 0
        topotools.trim(network=self, pores=self.pores('drop'))
        del self['pore.drop']
        # remove labels pertaining to surface pores, then redo post-trim
        self.clear(mode='labels')
        self['pore.internal'] = True
        self['throat.internal'] = True
        topotools.find_surface_pores(self)
Ejemplo n.º 33
0
def _get_back_path(matrix, row, column):

    if matrix[row - 1, column - 1] <= 0:
        return [(row, column), (row - 1, column - 1)]

    matrix_part = matrix[row - 1:row + 1, column - 1:column + 1]
    matrix_part[1, 1] = 0
    same_row, same_column = scipy.unravel_index(matrix_part.argmax(),
                                                matrix_part.shape)

    if same_row and same_column:
        return [(row, column)]

    point_row = row if same_row else row - 1
    point_column = column if same_column else column - 1

    return [
        (row, column),
    ] + _get_back_path(matrix, point_row, point_column)
Ejemplo n.º 34
0
def findTemplate(origin, template_pos, template_size, search_pos, search_size, target):
    """
    Find a template image into another image by normalized cross-correlation.

    Arguments:
      - origin, ndarray: image where the template is extracted (the image is accessed as a matrix, i.e. the points (x,y)
      is found at origin[y,x])
      - template_pos, (int,int): position (x,y) of the template
      - template_size, (int,int): size (width,height) of the template
      - search_pos, (int,int): central position (x,y) of the search zone
      - search_size, (int,int): size (width,height) of the search zone
      - target, ndarray: image where the template is searched (the image is accessed as a matrix, i.e. the points (x,y)
      is found at target[y,x])
    """
    t_left = max(0, template_pos[0] - template_size[0])
    t_right = min(origin.shape[1]-1,
                  template_pos[0]
                  + template_size[0])
    t_bottom = max(0, template_pos[1] - template_size[1])
    t_top = min(origin.shape[0]-1, template_pos[1] + template_size[1])
    template = origin[t_bottom:t_top, t_left:t_right]
    if t_left == 0:
        template_size = (template_pos[0]-1, template_size[1])
    if t_bottom == 0:
        template_size = (template_size[0], template_pos[1]-1)

#  template = template / sqrt((template*template).sum().sum())

    s_left = max(0, search_pos[0] - search_size[0])
    s_right = min(target.shape[1]-1, search_pos[0] + search_size[0])
    s_bottom = max(0, search_pos[1] - search_size[1])
    s_top = min(target.shape[0]-1, search_pos[1] + search_size[1])
    target = target[s_bottom:s_top, s_left:s_right]

#  target = target / sqrt((target*target).sum().sum())

    cross = abs(normcross2d(template, target))
    pos = scipy.unravel_index(cross.argmax(), cross.shape)
    value = cross[pos]
    center = (pos[1]+s_left-template_size[1]+1,
              pos[0]+s_bottom-template_size[0]+1)
    return center, value
Ejemplo n.º 35
0
def findTemplate(origin, template_pos, template_size, search_pos, search_size,
                 target):
    """
    Find a template image into another image by normalized cross-correlation.

    Arguments:
      - origin, ndarray: image where the template is extracted (the image is accessed as a matrix, i.e. the points (x,y)
      is found at origin[y,x])
      - template_pos, (int,int): position (x,y) of the template
      - template_size, (int,int): size (width,height) of the template
      - search_pos, (int,int): central position (x,y) of the search zone
      - search_size, (int,int): size (width,height) of the search zone
      - target, ndarray: image where the template is searched (the image is accessed as a matrix, i.e. the points (x,y)
      is found at target[y,x])
    """
    t_left = max(0, template_pos[0] - template_size[0])
    t_right = min(origin.shape[1] - 1, template_pos[0] + template_size[0])
    t_bottom = max(0, template_pos[1] - template_size[1])
    t_top = min(origin.shape[0] - 1, template_pos[1] + template_size[1])
    template = origin[t_bottom:t_top, t_left:t_right]
    if t_left == 0:
        template_size = (template_pos[0] - 1, template_size[1])
    if t_bottom == 0:
        template_size = (template_size[0], template_pos[1] - 1)


#  template = template / sqrt((template*template).sum().sum())

    s_left = max(0, search_pos[0] - search_size[0])
    s_right = min(target.shape[1] - 1, search_pos[0] + search_size[0])
    s_bottom = max(0, search_pos[1] - search_size[1])
    s_top = min(target.shape[0] - 1, search_pos[1] + search_size[1])
    target = target[s_bottom:s_top, s_left:s_right]

    #  target = target / sqrt((target*target).sum().sum())

    cross = abs(normcross2d(template, target))
    pos = scipy.unravel_index(cross.argmax(), cross.shape)
    value = cross[pos]
    center = (pos[1] + s_left - template_size[1] + 1,
              pos[0] + s_bottom - template_size[0] + 1)
    return center, value
Ejemplo n.º 36
0
    def asarray(self,values):
        r'''
        Retreive values as a rectangular array, rather than the OpenPNM list format

        Parameters
        ----------
        values : array_like
            The values from the network (in a list) to insert into the array

        Notes
        -----
        This method can break on networks that have had boundaries added.  It
        will usually work IF the list of values came only from 'internal' pores.
        '''
        if sp.shape(values)[0] > self.num_pores('internal'):
            raise Exception('The received values are bigger than the original network')
        Ps = sp.array(self['pore.index'][self.pores('internal')],dtype=int)
        arr = sp.ones(self._shape)*sp.nan
        ind = sp.unravel_index(Ps,self._shape)
        arr[ind[0],ind[1],ind[2]] = values
        return arr
def find_profiles(fracture_slice):
    r"""
    Takes in a 2-D data slice and generates line traces for JRC and Df
    analysis.

    Returns a dictionary of the top, bottom and midsurface traces as well
    as the fraction of bifurcations and zero aperture zones.
    """
    #
    data = fracture_slice.slice_data
    #
    aperture = sp.sum(data, axis=1, dtype=int)
    non_zero = sp.where(data.ravel() > 0)[0]
    a1_coords, a2_coords = sp.unravel_index(non_zero, data.shape)
    #
    # getting the three profiles
    profile = sp.ones(data.shape, dtype=float) * sp.inf
    profile[a1_coords, a2_coords] = a2_coords
    bottom = sp.amin(profile, axis=1)
    bottom[~sp.isfinite(bottom)] = sp.nan
    #
    profile = sp.ones(data.shape, dtype=float) * -sp.inf
    profile[a1_coords, a2_coords] = a2_coords
    top = sp.amax(profile, axis=1)
    top[~sp.isfinite(top)] = sp.nan
    #
    mid = (bottom + top) / 2.0
    #
    # calcualting bifurcation locations
    bif_frac = top - bottom + 1  # because I store both fracture voxel indices
    bif_frac[aperture == 0] = 0  # zero aperture zones are excluded
    bif_frac = bif_frac.astype(int)
    #
    # updating attributes
    fracture_slice.top = top
    fracture_slice.bot = bottom
    fracture_slice.mid = mid
    fracture_slice.aperture = aperture
    fracture_slice.zero_ap_count = sp.where(aperture == 0)[0].size
    fracture_slice.bifurcation_count = sp.where(bif_frac != aperture)[0].size
Ejemplo n.º 38
0
 def asarray(self, values):
     r'''
     Retreive values as a rectangular array, rather than the OpenPNM list format
     
     Parameters
     ----------
     values : array_like
         The values from the network (in a list) to insert into the array
         
     Notes
     -----
     This method can break on networks that have had boundaries added.  It
     will usually work IF the list of values came only from 'internal' pores.
     '''
     if sp.shape(values)[0] > self.num_pores('internal'):
         raise Exception(
             'The received values are bigger than the original network')
     Ps = sp.array(self['pore.index'][self.pores('internal')], dtype=int)
     arr = sp.ones(self._shape) * sp.nan
     ind = sp.unravel_index(Ps, self._shape)
     arr[ind[0], ind[1], ind[2]] = values
     return arr
def save_cluster_image(cs_ids, groups, counts, locs, img_shape, img_name):
    r"""
    Saves an 8 bit image colored by cluster number
    """
    logger.info('creating tiff image file colored by cluster number')
    #
    msg = '\t{} % of nodes covered in {} colored groups'
    num_cs = min(16, groups.size)
    num = sp.sum(counts[0:num_cs]) / cs_ids.size * 100
    logger.debug(msg.format(num, num_cs))
    #
    # setting the top 16 groups separated by increments of 8 and the rest are 255
    data = sp.ones(cs_ids.size, dtype=sp.uint8) * 255
    for n, cs_id in enumerate(groups[0:num_cs - 1]):
        inds = sp.where(cs_ids == cs_id)[0]
        data[inds] = 67 + n * 8
    #
    x_coords, y_coords, z_coords = sp.unravel_index(locs, img_shape)
    img_data = sp.zeros(img_shape, dtype=sp.uint8)
    img_data[x_coords, y_coords, z_coords] = data
    # save image data
    img_data = img_data.view(FractureImageStack)
    logger.info('saving image cluster data to file' + img_name)
    img_data.save(img_name, overwrite=True)
Ejemplo n.º 40
0
    def to_array(self, values):
        r"""
        Converts the values to a rectangular array with the same shape as the
        network

        Parameters
        ----------
        values : array_like
            An Np-long array of values to convert to

        Notes
        -----
        This method can break on networks that have had boundaries added.  It
        will usually work IF the given values came only from 'internal'
        pores.

        """
        if sp.shape(values)[0] > self.num_pores('internal'):
            raise Exception('The array shape does not match the network')
        Ps = sp.array(self['pore.index'][self.pores('internal')], dtype=int)
        arr = sp.ones(self._shape) * sp.nan
        ind = sp.unravel_index(Ps, self._shape)
        arr[ind[0], ind[1], ind[2]] = values
        return arr
def generate_coordinate_arrays(aper_map, para_data_dict):
    r"""
    Generates the coordinate arrays to use in data interpolation for coverting
    paraview point data into a 2-D data map.
    """
    #
    # generating XYZ coordinates from map to interpolate to
    logger.info('calculating aperture map cell center coordinates...')
    temp = sp.arange(aper_map.data_map.size, dtype=int)
    temp = sp.unravel_index(temp, aper_map.data_map.shape[::-1])
    map_coords = sp.zeros((aper_map.data_map.size, 3), dtype=float)
    #
    # half voxel added to make map points be cell centers
    map_coords[:, 0] = temp[0] * avg_fact * voxel_size + voxel_size / 2.0
    map_coords[:, 2] = temp[1] * avg_fact * voxel_size + voxel_size / 2.0
    #
    # pulling XYZ coordinates from the data file
    logger.info('processing data file data for coordinates...')
    data_coords = sp.zeros((para_data_dict['points:0'].shape[0], 3))
    data_coords[:, 0] = para_data_dict['points:0']
    data_coords[:, 1] = para_data_dict['points:1']
    data_coords[:, 2] = para_data_dict['points:2']
    #
    return map_coords, data_coords
def generate_coordinate_arrays(aper_map, para_data_dict):
    r"""
    Generates the coordinate arrays to use in data interpolation for coverting
    paraview point data into a 2-D data map.
    """
    #
    # generating XYZ coordinates from map to interpolate to
    logger.info('calculating aperture map cell center coordinates...')
    temp = sp.arange(aper_map.data_map.size, dtype=int)
    temp = sp.unravel_index(temp, aper_map.data_map.shape[::-1])
    map_coords = sp.zeros((aper_map.data_map.size, 3), dtype=float)
    #
    # half voxel added to make map points be cell centers
    map_coords[:, 0] = temp[0] * avg_fact * voxel_size + voxel_size/2.0
    map_coords[:, 2] = temp[1] * avg_fact * voxel_size + voxel_size/2.0
    #
    # pulling XYZ coordinates from the data file
    logger.info('processing data file data for coordinates...')
    data_coords = sp.zeros((para_data_dict['points:0'].shape[0], 3))
    data_coords[:, 0] = para_data_dict['points:0']
    data_coords[:, 1] = para_data_dict['points:1']
    data_coords[:, 2] = para_data_dict['points:2']
    #
    return map_coords, data_coords
Ejemplo n.º 43
0
def clust_seqences(cluster, x, y, x_lim, y_lim, lev_num, kde, cluster_region,
                   kernel):
    '''This is the central function. It generates the countour plots around the
    cluster members. The extreme points of these contour levels are used to trace
    the ZAMS fiducial line for each cluster, according to a minimum level value
    allowed. The first method interpolates those points and then discards
    points according to the xy limits. The second method first discards points
    based on the xy limits and then interpolates the remaining ones.
    '''
    
    # This list will hold the points obtained through the contour curves,
    # the first sublist are the x coordinates of the points and the second
    # the y coordinates.
    contour_seq = [[], []]

    levels_range = np.arange(0.05,1.01,0.05)
    # Store contour levels.
    CS = plt.contour(x, y, kde, levels_range)
    
    for i,clc in enumerate(CS.collections):
        for j,pth in enumerate(clc.get_paths()):
            cts = pth.vertices
            d = sp.spatial.distance.cdist(cts,cts)
            x_c,y_c = cts[list(sp.unravel_index(sp.argmax(d),d.shape))].T
            # Only store points that belong to contour PDF values that belong
            # to the uper curves, ie: do not use those with index < lev_num.
            if i >= lev_num:
                contour_seq[0].append(round(x_c[0],4))
                contour_seq[1].append(round(y_c[0],4))
                contour_seq[0].append(round(x_c[1],4))
                contour_seq[1].append(round(y_c[1],4))

    # If the sequence is an empty list don't attempt to plot the
    # polynomial fit.
    if contour_seq:
        
        poli_order = 2 # Order of the polynome.        
        
        # Method 1.
        # 1- Obtain the sequence's fitting polinome.
        poli = np.polyfit(contour_seq[1], contour_seq[0], poli_order)
        y_pol = np.linspace(min(contour_seq[1]),
                            max(contour_seq[1]), 50)
        p = np.poly1d(poli)
        x_pol = [p(i) for i in y_pol]
        # 2- Trim the interpolated sequence to the range in xy axis.
        y_pol_trim, x_pol_trim = zip(*[(ia,ib) for (ia, ib) in \
        zip(y_pol, x_pol) if x_lim[0] <= ib <= x_lim[1] and \
        y_lim[0] <= ia <= y_lim[1]])

        # Method 2.
        # 1- Trim the sequence to the xy range.
        y_trim, x_trim = zip(*[(ia,ib) for (ia, ib) in \
        zip(contour_seq[1], contour_seq[0]) if x_lim[0] <= ib <= x_lim[1] and \
        y_lim[0] <= ia <= y_lim[1]])
        # 2- Obtain the sequence's fitting polinome.
        poli = np.polyfit(y_trim, x_trim, poli_order)
        y_trim_pol = np.linspace(min(y_trim), max(y_trim), 50)
        p = np.poly1d(poli)
        x_trim_pol = [p(i) for i in y_trim_pol]
        
    else:
        x_pol_trim, y_pol_trim, x_trim_pol, y_trim_pol = [], [], [], []

    return x_pol_trim, y_pol_trim, x_trim_pol, y_trim_pol
Ejemplo n.º 44
0
def forward_lmm_kronecker(snps,phenos,Asnps=None,Acond=None,K1r=None,K1c=None,K2r=None,K2c=None,covs=None,Acovs=None,threshold = 5e-8, maxiter = 2,qvalues=False, update_covariances = False,**kw_args):
    """
    Kronecker fixed effects test with forward selection

    Args:
        snps:   [N x S] SP.array of S SNPs for N individuals (test SNPs)
        pheno:  [N x P] SP.array of 1 phenotype for N individuals
        K:      [N x N] SP.array of LMM-covariance/kinship koefficients (optional)
                        If not provided, then linear regression analysis is performed
        covs:   [N x D] SP.array of D covariates for N individuals
        threshold:      (float) P-value thrashold for inclusion in forward selection (default 5e-8)
        maxiter:        (int) maximum number of interaction scans. First scan is
                        without inclusion, so maxiter-1 inclusions can be performed. (default 2)
        qvalues:        Use q-value threshold and return q-values in addition (default False)
        update_covar:   Boolean indicator if covariances should be re-estimated after each forward step (default False)

    Returns:
        lm:             lmix LMMi object
        resultStruct with elements:
            iadded:         array of indices of SNPs included in order of inclusion
            pvadded:        array of Pvalues obtained by the included SNPs in iteration
                            before inclusion
            pvall:   [maxiter x S] SP.array of Pvalues for all iterations
        Optional:      corresponding q-values
            qvadded
            qvall
    """

    #0. checks
    N  = phenos.shape[0]
    P  = phenos.shape[1]

    if K1r==None:
        K1r = SP.dot(snps,snps.T)
    else:
        assert K1r.shape[0]==N, 'K1r: dimensions dismatch'
        assert K1r.shape[1]==N, 'K1r: dimensions dismatch'

    if K2r==None:
        K2r = SP.eye(N)
    else:
        assert K2r.shape[0]==N, 'K2r: dimensions dismatch'
        assert K2r.shape[1]==N, 'K2r: dimensions dismatch'

    covs,Acovs = updateKronCovs(covs,Acovs,N,P)

    if Asnps is None:
        Asnps = [SP.ones([1,P])]
    if (type(Asnps)!=list):
        Asnps = [Asnps]
    assert len(Asnps)>0, "need at least one Snp design matrix"

    if Acond is None:
        Acond = Asnps
    if (type(Acond)!=list):
        Acond = [Acond]
    assert len(Acond)>0, "need at least one Snp design matrix"

    #1. run GP model to infer suitable covariance structure
    if K1c==None or K2c==None:
        vc = estimateKronCovariances(phenos=phenos, K1r=K1r, K2r=K2r, K1c=K1c, K2c=K2c, covs=covs, Acovs=Acovs, **kw_args)
        K1c = vc.getEstTraitCovar(0)
        K2c = vc.getEstTraitCovar(1)
    else:
        vc = None
        assert K1c.shape[0]==P, 'K1c: dimensions dismatch'
        assert K1c.shape[1]==P, 'K1c: dimensions dismatch'
        assert K2c.shape[0]==P, 'K2c: dimensions dismatch'
        assert K2c.shape[1]==P, 'K2c: dimensions dismatch'
    t0 = time.time()
    lm,pv = kronecker_lmm(snps=snps,phenos=phenos,Asnps=Asnps,K1r=K1r,K2r=K2r,K1c=K1c,K2c=K2c,covs=covs,Acovs=Acovs)

    #get pv
    #start stuff
    iadded = []
    pvadded = []
    qvadded = []
    time_el = []
    pvall = SP.zeros((pv.shape[0]*maxiter,pv.shape[1]))
    qvall = None
    t1=time.time()
    print ("finished GWAS testing in %.2f seconds" %(t1-t0))
    time_el.append(t1-t0)
    pvall[0:pv.shape[0],:]=pv
    imin= SP.unravel_index(pv.argmin(),pv.shape)
    score=pv[imin].min()
    niter = 1
    if qvalues:
        assert pv.shape[0]==1, "This is untested with the fdr package. pv.shape[0]==1 failed"
        qvall = SP.zeros((maxiter,snps.shape[1]))
        qv  = FDR.qvalues(pv)
        qvall[0:1,:] = qv
        score=qv[imin]
    #loop:
    while (score<threshold) and niter<maxiter:
        t0=time.time()
        pvadded.append(pv[imin])
        iadded.append(imin)
        if qvalues:
            qvadded.append(qv[imin])
        if update_covariances and vc is not None:
            vc.addFixedTerm(snps[:,imin[1]:(imin[1]+1)],Acond[imin[0]])
            vc.setScales()#CL: don't know what this does, but findLocalOptima crashes becahuse vc.noisPos=None
            vc.findLocalOptima(fast=True)
            K1c = vc.getEstTraitCovar(0)
            K2c = vc.getEstTraitCovar(1)
            lm.setK1c(K1c)
            lm.setK2c(K2c)
        lm.addCovariates(snps[:,imin[1]:(imin[1]+1)],Acond[imin[0]])
        for i in xrange(len(Asnps)):
            #add SNP design
            lm.setSNPcoldesign(Asnps[i])
            lm.process()
            pv[i,:] = lm.getPv()[0]
        pvall[niter*pv.shape[0]:(niter+1)*pv.shape[0]]=pv
        imin= SP.unravel_index(pv.argmin(),pv.shape)
        if qvalues:
            qv = FDR.qvalues(pv)
            qvall[niter:niter+1,:] = qv
            score = qv[imin].min()
        else:
            score = pv[imin].min()
        t1=time.time()
        print ("finished GWAS testing in %.2f seconds" %(t1-t0))
        time_el.append(t1-t0)
        niter=niter+1
    RV = {}
    RV['iadded']  = iadded
    RV['pvadded'] = pvadded
    RV['pvall']   = pvall
    RV['time_el'] = time_el
    if qvalues:
        RV['qvall'] = qvall
        RV['qvadded'] = qvadded
    return lm,RV
Ejemplo n.º 45
0
def _victor_purpura_multiunit_dist_for_trial_pair(
        a, b, reassignment_cost, kernel):
    # The algorithm used is based on the one given in
    #
    # Victor, J. D., & Purpura, K. P. (1996). Nature and precision of temporal
    # coding in visual cortex: a metric-space analysis. Journal of
    # Neurophysiology.
    #
    # It constructs a matrix cost[i, j_1, ... j_L] containing the minimal cost
    # when only considering the first i spikes of the merged spikes of a and
    # j_w spikes of the spike trains of b (the reference given above denotes
    # this matrix with G). In this implementation the only the one submatrix
    # for one specific i is stored as in each step only i-1 and i will be
    # accessed. That saves some memory.

    # Initialization of various variables needed by the algorithm. Also swap
    # a and b if it will save time as the algorithm is not symmetric.
    a_num_spikes = [st.size for st in a]
    b_num_spikes = [st.size for st in b]
    a_num_total_spikes = sp.sum(a_num_spikes)

    complexity_same = a_num_total_spikes * sp.prod(b_num_spikes)
    complexity_swapped = sp.prod(a_num_spikes) * sp.sum(b_num_spikes)
    if complexity_swapped < complexity_same:
        a, b = b, a
        a_num_spikes, b_num_spikes = b_num_spikes, a_num_spikes
        a_num_total_spikes = sp.sum(a_num_spikes)

    if a_num_total_spikes <= 0:
        return sp.sum(b_num_spikes)

    b_dims = tuple(sp.asarray(b_num_spikes) + 1)

    cost = sp.asfarray(sp.sum(sp.indices(b_dims), axis=0))

    a_merged = _merge_trains_and_label_spikes(a)
    b_strides = sp.cumprod((b_dims + (1,))[::-1])[:-1]
    flat_b_indices = sp.arange(cost.size)
    b_indices = sp.vstack(sp.unravel_index(flat_b_indices, b_dims))
    flat_neighbor_indices = sp.maximum(
        0, sp.atleast_2d(flat_b_indices).T - b_strides[::-1])
    invalid_neighbors = b_indices.T == 0

    b_train_mat = sp.empty((len(b), sp.amax(b_num_spikes))) * b[0].units
    for i, st in enumerate(b):
        b_train_mat[i, :st.size] = st.rescale(b[0].units)
        b_train_mat[i, st.size:] = sp.nan * b[0].units

    reassignment_costs = sp.empty((a_merged[0].size,) + b_train_mat.shape)
    reassignment_costs.fill(reassignment_cost)
    reassignment_costs[sp.arange(a_merged[1].size), a_merged[1], :] = 0.0
    k = 1 - 2 * kernel(sp.atleast_2d(
        a_merged[0]).T - b_train_mat.flatten()).simplified.reshape(
            (a_merged[0].size,) + b_train_mat.shape) + reassignment_costs

    decreasing_sequence = flat_b_indices[::-1]

    # Do the actual calculations.
    for a_idx in xrange(1, a_num_total_spikes + 1):
        base_costs = cost.flat[flat_neighbor_indices]
        base_costs[invalid_neighbors] = sp.inf
        min_base_cost_labels = sp.argmin(base_costs, axis=1)
        cost_all_possible_shifts = k[a_idx - 1, min_base_cost_labels, :] + \
            sp.atleast_2d(base_costs[flat_b_indices, min_base_cost_labels]).T
        cost_shift = cost_all_possible_shifts[
            sp.arange(cost_all_possible_shifts.shape[0]),
            b_indices[min_base_cost_labels, flat_b_indices] - 1]

        cost_delete_in_a = cost.flat[flat_b_indices]

        # cost_shift is dimensionless, but there is a bug in quantities with
        # the minimum function:
        # <https://github.com/python-quantities/python-quantities/issues/52>
        # The explicit request for the magnitude circumvents this problem.
        cost.flat = sp.minimum(cost_delete_in_a, cost_shift.magnitude) + 1
        cost.flat[0] = sp.inf

        # Minimum with cost for deleting in b
        # The calculation order is somewhat different from the order one would
        # expect from the naive algorithm. This implementation, however,
        # optimizes the use of the CPU cache giving a considerable speed
        # improvement.
        # Basically this codes calculates the values of a row of elements for
        # each dimension of cost.
        for dim_size, stride in zip(b_dims[::-1], b_strides):
            for i in xrange(stride):
                segment_size = dim_size * stride
                for j in xrange(i, cost.size, segment_size):
                    s = sp.s_[j:j + segment_size:stride]
                    seq = decreasing_sequence[-cost.flat[s].size:]
                    cost.flat[s] = sp.minimum.accumulate(
                        cost.flat[s] + seq) - seq

    return cost.flat[-1]
Ejemplo n.º 46
0
def _victor_purpura_multiunit_dist_for_trial_pair(a, b, reassignment_cost,
                                                  kernel):
    # The algorithm used is based on the one given in
    #
    # Victor, J. D., & Purpura, K. P. (1996). Nature and precision of temporal
    # coding in visual cortex: a metric-space analysis. Journal of
    # Neurophysiology.
    #
    # It constructs a matrix cost[i, j_1, ... j_L] containing the minimal cost
    # when only considering the first i spikes of the merged spikes of a and
    # j_w spikes of the spike trains of b (the reference given above denotes
    # this matrix with G). In this implementation the only the one submatrix
    # for one specific i is stored as in each step only i-1 and i will be
    # accessed. That saves some memory.

    # Initialization of various variables needed by the algorithm. Also swap
    # a and b if it will save time as the algorithm is not symmetric.
    a_num_spikes = [st.size for st in a]
    b_num_spikes = [st.size for st in b]
    a_num_total_spikes = sp.sum(a_num_spikes)

    complexity_same = a_num_total_spikes * sp.prod(b_num_spikes)
    complexity_swapped = sp.prod(a_num_spikes) * sp.sum(b_num_spikes)
    if complexity_swapped < complexity_same:
        a, b = b, a
        a_num_spikes, b_num_spikes = b_num_spikes, a_num_spikes
        a_num_total_spikes = sp.sum(a_num_spikes)

    if a_num_total_spikes <= 0:
        return sp.sum(b_num_spikes)

    b_dims = tuple(sp.asarray(b_num_spikes) + 1)

    cost = sp.asfarray(sp.sum(sp.indices(b_dims), axis=0))

    a_merged = _merge_trains_and_label_spikes(a)
    b_strides = sp.cumprod((b_dims + (1, ))[::-1])[:-1]
    flat_b_indices = sp.arange(cost.size)
    b_indices = sp.vstack(sp.unravel_index(flat_b_indices, b_dims))
    flat_neighbor_indices = sp.maximum(
        0,
        sp.atleast_2d(flat_b_indices).T - b_strides[::-1])
    invalid_neighbors = b_indices.T == 0

    b_train_mat = sp.empty((len(b), sp.amax(b_num_spikes))) * b[0].units
    for i, st in enumerate(b):
        b_train_mat[i, :st.size] = st.rescale(b[0].units)
        b_train_mat[i, st.size:] = sp.nan * b[0].units

    reassignment_costs = sp.empty((a_merged[0].size, ) + b_train_mat.shape)
    reassignment_costs.fill(reassignment_cost)
    reassignment_costs[sp.arange(a_merged[1].size), a_merged[1], :] = 0.0
    k = 1 - 2 * kernel(sp.atleast_2d(a_merged[0]).T -
                       b_train_mat.flatten()).simplified.reshape(
                           (a_merged[0].size, ) +
                           b_train_mat.shape) + reassignment_costs

    decreasing_sequence = flat_b_indices[::-1]

    # Do the actual calculations.
    for a_idx in xrange(1, a_num_total_spikes + 1):
        base_costs = cost.flat[flat_neighbor_indices]
        base_costs[invalid_neighbors] = sp.inf
        min_base_cost_labels = sp.argmin(base_costs, axis=1)
        cost_all_possible_shifts = k[a_idx - 1, min_base_cost_labels, :] + \
            sp.atleast_2d(base_costs[flat_b_indices, min_base_cost_labels]).T
        cost_shift = cost_all_possible_shifts[
            sp.arange(cost_all_possible_shifts.shape[0]),
            b_indices[min_base_cost_labels, flat_b_indices] - 1]

        cost_delete_in_a = cost.flat[flat_b_indices]

        # cost_shift is dimensionless, but there is a bug in quantities with
        # the minimum function:
        # <https://github.com/python-quantities/python-quantities/issues/52>
        # The explicit request for the magnitude circumvents this problem.
        cost.flat = sp.minimum(cost_delete_in_a, cost_shift.magnitude) + 1
        cost.flat[0] = sp.inf

        # Minimum with cost for deleting in b
        # The calculation order is somewhat different from the order one would
        # expect from the naive algorithm. This implementation, however,
        # optimizes the use of the CPU cache giving a considerable speed
        # improvement.
        # Basically this codes calculates the values of a row of elements for
        # each dimension of cost.
        for dim_size, stride in zip(b_dims[::-1], b_strides):
            for i in xrange(stride):
                segment_size = dim_size * stride
                for j in xrange(i, cost.size, segment_size):
                    s = sp.s_[j:j + segment_size:stride]
                    seq = decreasing_sequence[-cost.flat[s].size:]
                    cost.flat[s] = sp.minimum.accumulate(cost.flat[s] +
                                                         seq) - seq

    return cost.flat[-1]
Ejemplo n.º 47
0
def bar_detectability(parser,
                      dx=_XWIDTH/20.,dy=_YWIDTH/20.,
                      nx=100,ny=20,
                      ngrid=201,rrange=[0.7,1.3],
                      phirange=[-m.pi/2.,m.pi/2.],
                      saveDir='../bar/1dLarge/'):
    """
    NAME:
       bar_detectability
    PURPOSE:
       analyze the detectability of the Hercules moving group in the 
       los-distribution around the Galaxy
    INPUT:
       nx - number of plots in the x-direction
       ny - number of plots in the y direction
       dx - x-spacing
       dy - y-spacing
       ngrid - number of gridpoints to evaluate the density on
       rrange - range of Galactocentric radii to consider
       phirange - range of Galactic azimuths to consider
       saveDir - directory to save the pickles in
    OUTPUT:
       plot in plotfilename
    HISTORY:
       2010-05-09 - Written - Bovy (NYU)
    """
    (options,args)= parser.parse_args()
    if len(args) == 0:
        parser.print_help()
        return 
    
    if not options.convolve == None:
        bar_detectability_convolve(parser,dx=dx,dy=dy,nx=nx,ny=ny,ngrid=ngrid,
                                   rrange=rrange,phirange=phirange,
                                   saveDir=saveDir)
        return

    vloslinspace= (-.9,.9,ngrid)
    vloss= sc.linspace(*vloslinspace)

    picklebasename= '1d_%i_%i_%i_%.1f_%.1f_%.1f_%.1f' % (nx,ny,ngrid,rrange[0],rrange[1],phirange[0],phirange[1])

    detect= sc.zeros((nx,ny))
    losd= sc.zeros((nx,ny))
    gall= sc.zeros((nx,ny))
    for ii in range(nx):
        for jj in range(ny):
            thisR= (rrange[0]+(rrange[1]-rrange[0])/
                    (ny*_YWIDTH+(ny-1)*dy)*(jj*(_YWIDTH+dy)+_YWIDTH/2.))
            thisphi= (phirange[0]+(phirange[1]-phirange[0])/
                      (nx*_XWIDTH+(nx-1)*dx)*(ii*(_XWIDTH+dx)+_XWIDTH/2.))
            thissavefilename= os.path.join(saveDir,picklebasename+'_%i_%i.sav' %(ii,jj))
            if os.path.exists(thissavefilename):
                print "Restoring los-velocity distribution at %.2f, %.2f ..." %(thisR,thisphi)
                savefile= open(thissavefilename,'r')
                vlosd= pickle.load(savefile)
                axivlosd= pickle.load(savefile)
                savefile.close()
            else:
                print "Did not find the los-velocity distribution at at %.2f, %.2f ..." %(thisR,thisphi)
                print "returning ..."
                return
            ddx= 1./sc.sum(axivlosd)
            #skipCenter
            if not options.skipCenter == 0.:
                skipIndx= (sc.fabs(vloss) < options.skipCenter)
                indx= (sc.fabs(vloss) >= options.skipCenter)
                vlosd= vlosd/sc.sum(vlosd[indx])/ddx
                axivlosd= axivlosd/sc.sum(axivlosd[indx])/ddx
                vlosd[skipIndx]= 1.
                axivlosd[skipIndx]= 1.
            vlosd_zeroindx= (vlosd == 0.)
            axivlosd_zeroindx= (axivlosd == 0.)
            vlosd[vlosd_zeroindx]= 1.
            axivlosd[vlosd_zeroindx]= 1.
            vlosd[axivlosd_zeroindx]= 1.
            axivlosd[axivlosd_zeroindx]= 1.
            detect[ii,jj]= probDistance.kullbackLeibler(vlosd,axivlosd,ddx,nan=True)
            #los distance and Galactic longitude
            d= m.sqrt(thisR**2.+1.-2.*thisR*m.cos(thisphi))
            losd[ii,jj]= d
            if 1./m.cos(thisphi) < thisR and m.cos(thisphi) > 0.:
                l= m.pi-m.asin(thisR/d*m.sin(thisphi))
            else:
                l= m.asin(thisR/d*m.sin(thisphi))
            gall[ii,jj]= l

    #Find maximum, further than 3 kpc away
    detectformax= detect.flatten()
    detectformax[losd.flatten() < 3./8.2]= 0.
    x= sc.argmax(detectformax)
    indx = sc.unravel_index(x,detect.shape)
    maxR= (rrange[0]+(rrange[1]-rrange[0])/
           (ny*_YWIDTH+(ny-1)*dy)*(indx[1]*(_YWIDTH+dy)+_YWIDTH/2.))
    maxphi= (phirange[0]+(phirange[1]-phirange[0])/
                      (nx*_XWIDTH+(nx-1)*dx)*(indx[0]*(_XWIDTH+dx)+_XWIDTH/2.))
    print maxR, maxphi, losd[indx[0],indx[1]], detect[indx[0],indx[1]], gall[indx[0],indx[1]]*180./sc.pi

    #Now plot
    plot.bovy_print()
    plot.bovy_dens2d(detect.T,origin='lower',#interpolation='nearest',
                     xlabel=r'$\mathrm{Galactocentric\ azimuth}\ [\mathrm{deg}]$',
                     ylabel=r'$\mathrm{Galactocentric\ radius}\ /R_0$',
                     cmap='gist_yarg',xrange=sc.array(phirange)*_RADTODEG,
                     yrange=rrange,
                     aspect=(phirange[1]-phirange[0])*_RADTODEG/(rrange[1]-rrange[0]))
    #contour the los distance and gall
    #plot.bovy_text(-22.,1.1,r'$\mathrm{apogee}$',color='w',
    #                rotation=105.)
    plot.bovy_text(-18.,1.1,r'$\mathrm{APOGEE}$',color='w',
                    rotation=285.)
    levels= [2/8.2*(ii+1/2.) for ii in range(10)]
    contour(losd.T,levels,colors='0.25',origin='lower',linestyles='--',
            aspect=(phirange[1]-phirange[0])*_RADTODEG/(rrange[1]-rrange[0]),
            extent=(phirange[0]*_RADTODEG,phirange[1]*_RADTODEG,
                    rrange[0],rrange[1]))
    gall[gall < 0.]+= sc.pi*2.
    levels= [0.,sc.pi/2.,sc.pi,3.*sc.pi/2.]
    contour(gall.T,levels,colors='w',origin='lower',linestyles='--',
            aspect=(phirange[1]-phirange[0])*_RADTODEG/(rrange[1]-rrange[0]),
            extent=(phirange[0]*_RADTODEG,phirange[1]*_RADTODEG,
                    rrange[0],rrange[1]))
    levels= [-5/180.*sc.pi,250/180.*sc.pi]
    contour(gall.T,levels,colors='w',origin='lower',linestyles='-.',
            aspect=(phirange[1]-phirange[0])*_RADTODEG/(rrange[1]-rrange[0]),
            extent=(phirange[0]*_RADTODEG,phirange[1]*_RADTODEG,
                    rrange[0],rrange[1]))
    if options.skipCenter == 0.:
        plot.bovy_text(r'$\mathrm{KL\ divergence\ / \ all}\ v_{\mathrm{los}}$',
                       title=True)
    else:
        plot.bovy_text(r'$\mathrm{KL\ divergence\ / }\ |v_{\mathrm{los}}| \geq %.2f \ v_0$' % options.skipCenter,
                       title=True)
    plot.bovy_end_print(args[0])
Ejemplo n.º 48
0
def cov_from_segments(gene, seg_counts, edge_counts, edge_idx, ax, sample_idx=None,
                      log=False, cmap_seg=None, cmap_edg=None, xlim=None, grid=False,
                      order='C'):
    """This function takes a gene and its corresponding segment and edge counts to
    produce a coverage overview plot."""

    if sample_idx is None:
        sample_idx = [sp.arange(seg_counts.shape[1])]

    norm = plt.Normalize(0, len(sample_idx))

    if cmap_seg is None:
        cmap_seg = plt.get_cmap('jet') 
    if cmap_edg is None:
        cmap_edg = plt.get_cmap('jet')

    line_patches = []
    fill_patches = []

    ### iterate over segments
    for j in range(gene.segmentgraph.segments.shape[1]):
        s = gene.segmentgraph.segments[:, j]
        ### iterate over samples
        for c, curr_idx in enumerate(sample_idx):
            #for i in curr_idx:
            if log:
                counts = sp.log10(seg_counts[j, curr_idx] + 1)
            else:
                counts = seg_counts[j, curr_idx]

            ### plot segment over all samples (including uncertainty region)
            if counts.shape[0] == 1:
                ax.plot(s, [counts[0], counts[0]], '-', color=cmap_seg(norm(c)), linewidth=0.5)
                #line_patches.append(mlines.Line2D(s, [counts[0], counts[0]], color=cmap_seg(norm(c)), linewidth=2, transform=None))
            elif counts.shape[0] > 1:
                stderr = spst.sem(counts)
                mean = sp.mean(counts)
                #ax.fill_between(s, mean, mean+stderr, color=cmap_seg(norm(c)), alpha=0.3)
                ax.fill_between(s, mean-stderr, mean+stderr, color=cmap_seg(norm(c)), alpha=0.2, edgecolor='none', linewidth=0)
                #fill_patches.append(mpatches.Rectangle(s, mean-stderr, mean+stderr, color=cmap_seg(norm(c)), alpha=0.3, transform=None))
                ax.plot(s, [mean, mean], '-', color=cmap_seg(norm(c)), linewidth=0.5)
                #line_patches.append(mlines.Line2D(s, [mean, mean], color=cmap_seg(norm(c)), linewidth=2, transform=None))

                #ax.plot(s, [mean+stderr, mean+stderr], ':', color=cmap_seg(norm(c)), linewidth=1)
                #ax.plot(s, [mean-stderr, mean-stderr], ':', color=cmap_seg(norm(c)), linewidth=1)

    #for line in line_patches:
    #    ax.add_line(line)
    #for patch in fill_patches:
    #    ax.add_patch(patch)

    ### iterate over intron edges
    for j in range(edge_idx.shape[0]):
        ### iterate over samples
        for c, curr_idx in enumerate(sample_idx):
            [s, t] = sp.unravel_index(edge_idx[j], gene.segmentgraph.seg_edges.shape, order=order) 
            if log:
                counts = sp.log10(edge_counts[j, curr_idx] + 1)
            else:
                counts = edge_counts[j, curr_idx]
            mean = sp.mean(counts)
            add_intron_patch2(ax, gene.segmentgraph.segments[1, s], gene.segmentgraph.segments[0, t], mean, color=cmap_edg(norm(c)))

    if xlim is not None:
        ax.set_xlim(xlim)

    ### draw grid
    if grid:
        ax.grid(b=True, which='major', linestyle='--', linewidth=0.2, color='#222222')
        ax.xaxis.grid(False)

    ax.set_ylim([0, ax.get_ylim()[1]])
Ejemplo n.º 49
0
			beta_fit = sparray(ans[0], dtype = "single")
			#y_resid = ans[1]

			## Evaluating the fit
			Y_pred = dot(X_test,beta_fit)

			MSE[f2_vec.index(f2),p_vec.index(p)] += spsum((Y_pred-Y_test)**2)
			print("CVfold,f2,p:", [CVfold,f2,p])
			sys.stdout.flush()


print("MSE:", MSE)
sys.stdout.flush()

## The index of the minimum:
i,j = sp.unravel_index(MSE.argmin(), MSE.shape)

#for i in range(3):
#	for j in range(6):
#		if MSE[i,j]==MSE.min():
#			min_index_0 = i
#			min_index_1 = j

f2 = f2_vec[i]
p = p_vec[j]
print("f2:",f2)
print("p:",p)
print("min(MSE):", min(MSE))

print("Step 2a took", round((time.time()-step)/60,2),"minutes")
Ejemplo n.º 50
0
    figure = triangle.corner(samples, quantiles=[0.16, 0.5, 0.84])

    figure.savefig(str(percent) + '%' + "triangle.pdf")
    plt.close()

    # plot the walker positions over interations

    plot_walkers_iter(ndim, paramnames, chain)

    autocorr_time = sampler.get_autocorr_time()

    # get the maximum likelihood value and parameter values
    f.write('\n\tMean acceptance: %d \n' %
            (sp.mean(sampler.acceptance_fraction) * 100.))
    ML = sp.unravel_index(lnlike_flat.argmax(), lnlike_flat.shape)
    par_ML = samples[ML]
    # calculate the parameter medians and +/- 1 sigma uncertainties
    # for a normal distribution 68% of scores between +/- 1 sigma
    # this corresponds to percentiles of 16 (-1 sig) and 84 (+1 sig)

    f.write('\n\tAutocorrelation time: \n%s' % autocorr_time)
    f.write(
        '\n\tMaximum likelihood parameters = \n%s\nML index = %s \nML value = %s \n'
        % (par_ML, ML, lnlike_flat[ML]))
    # f.write('Median, Upper, Lower = \n%s\n' % vmr)
    f.write('PERCENTAGE COMPLETE: %s%% \n' % percent)

    # plot spectra and temperature profile if in retrieval

    plot_spectra(percent, samples, runname, y, yerr, vflag, gflag, ntemp, nvmr,
Ejemplo n.º 51
0
 def best_match(self, inputs):
     return scipy.unravel_index(scipy.argmax(scipy.multiply(self.nodes, inputs).sum(axis=2)), (self.height,self.width))
Ejemplo n.º 52
0
def max(arr):
    maxi = np.argmax(arr)
    return sp.unravel_index(maxi,arr.shape), arr.max()
Ejemplo n.º 53
0
hval=hist(distance, td, weights=redcount)
xlabel(r'D, $\AA$', position=(0.9,0), size='xx-large', va='center')
gca().yaxis.set_ticklabels([])
grid()

subplot(322)
title('Distance vs angle', position=(0.05,0.95), color='white', ha='left', va='top', size='xx-large')
zda=array([histogram(angle[abs(distance-x)<=0.5*args.dstep], bins=ta, weights=redcount[abs(distance-x)<=0.5*args.dstep], density=args.normden)[0] for x in td])
contourf(0.5*(ta[1:]+ta[:-1]),td,array(zda),50)
xlabel(r'$\alpha$, $\degree$', position=(0.9,0), size='xx-large', va='center')
ylabel(r'D, $\AA$', size='xx-large')
grid()

if args.dakernel:
    xv,yv=meshgrid(0.5*(ta[1:]+ta[:-1]), td)
    ti, tj = unravel_index(argmax(zda),zda.shape)
    xo = xv[ti,tj]
    yo = yv[ti,tj]
    sx = 2*sum(sum(zda>zda.max()*exp(-1)))/pi*args.astep**2
    sy = 2*sum(sum(zda>zda.max()*exp(-1)))/pi*args.dstep**2
    Anorm=zda.sum()*sqrt(pi*(sx+sy))*args.dstep*args.astep
    p = [Anorm, xo, yo, sx, sy]
    print("Round 1:")
    print("Initial parameters: D/sigmaD = %.2f / %.2f, A/sigmaA = %.1f / %.1f" % (p[2], sqrt(p[4]/2), p[1], sqrt(p[3]/2)))
    pp = fmin(lsqwgauss, p, (xv, yv, zda))
    print("Refined parameters: D/sigmaD = %.2f / %.2f, A/sigmaA = %.1f / %.1f" % (pp[2], sqrt(pp[4]/2), pp[1], sqrt(pp[3]/2)))
    print("Round 2:")
    zv = f2gauss((xv,yv),pp)
    vv = zv.max()*array([exp(-8), exp(-4.5), exp(-2), exp(-0.5)])
    contour(xv,yv,zv,vv,colors='white')
    ppp = fmin(lsqwgauss, pp, (xv, yv, zda))