def remove_background(self, binary_image, fg_mask, prc_inner):
        # assuming white CCs for binary image, white fg region in fg mask ...

        # labeling
        cc_labels, count_labels = sci_mes.label(binary_image)

        # finding the size of each CC and how many of these pixels also form part of the mask ...
        cc_index = list(range(0, count_labels + 1))
        cc_sizes = sci_mes.sum(binary_image, cc_labels, cc_index) / 255
        cc_in_mask = sci_mes.sum(fg_mask, cc_labels, cc_index) / 255

        # get proportion inside of the mask
        cc_prc_in_mask = cc_in_mask.copy()
        cc_prc_in_mask[0] = 0
        cc_prc_in_mask[1:] /= cc_sizes[1:]

        ccs_to_keep = cc_prc_in_mask >= prc_inner
        valid_mask = ccs_to_keep[cc_labels]

        result = valid_mask.astype(np.uint8) * 255
        # cv2.imshow("image", binary_image)
        # cv2.imshow("mask", fg_mask)
        # cv2.imshow("result", result)
        # cv2.waitKey()

        # print(sci_mes.sum(binary_image, binary_image, list(range(0, count_labels))))
        return result
def get_cluster(Mc,Sc,Cc,Tc,maxT,maxC,maxM,jobnamelist,nameexpc,rawdatafolder,thresholdv=15):
                filenamcec=get_filenamecurimg_movie(rawdatafolder,Tc,maxT,Cc,maxC,Sc,jobnamelist,Mc,maxM,nameexpc)
                filenamcec=SquenceName+'_'+filenamcec
                filenamec=os.path.join(rawdatafolder,filenamcec)

                try:
                    imgc=Image.open(filenamec)
                    imarr=np.array(imgc,dtype=np.float)
                    #print imarr
                    imarr=imarr[:,:,1]
                    #print 'iiiiiiiiiiiiiiiiiiiiiiiiii'
                   # print imarr
                    
                except:
                    print "unable to open..."
                    print filenamec
                
                imarrts=np.copy(imarr)
                imarrts[imarrts < thresholdv] = 0
                imarrts[imarrts >= thresholdv] = 1
                imarr[:,:]=scipy.ndimage.filters.median_filter(imarr,3)[:,:]
                
                #get clusters on image
                sbindiagnoal=scipy.ndimage.morphology.generate_binary_structure(2,2)  ##This only generates a 3elements' 2d matrix!!
                labeled_array, numberclusters_curimg = measurements.label(imarr, structure=sbindiagnoal) #We can use imarr or imarrts!!
                centermass=np.zeros([numberclusters_curimg,6]) #x, y, mass, total size, extension, name
                Imgtreshhold=""
                for ilc in range(1,numberclusters_curimg):
                    #get center of mass for different clusters...
                    curpos=measurements.center_of_mass(imarr, labels=labeled_array,index=ilc)
                    centermass[ilc,0]=curpos[0]
                    centermass[ilc,1]=curpos[1]
                    centermass[ilc,2]=measurements.sum(imarr, labels=labeled_array, index=ilc)
                    centermass[ilc,3]=measurements.sum(imarrts, labels=labeled_array, index=ilc)
                    centermass[ilc,4]=np.sqrt(centermass[ilc,3])
                    
                    
                    
                    if np.nanmax(centermass[ilc,3])>40:
                        Imgtreshhold="*"
                
                
                disttreshold=10
                #go through clusters, give names and merge if too close...
                for ilc in range(0,numberclusters_curimg):
                    for ilc2 in range(centermass.shape[0]-1,ilc,-1):
                        distc=np.sqrt(np.power(centermass[ilc2,0]-centermass[ilc,0],2.)+np.power(centermass[ilc2,1]-centermass[ilc,1],2.))
                        if distc<disttreshold:
                            centermass[ilc,0]=(centermass[ilc,0]*centermass[ilc,2]+centermass[ilc2,0]*centermass[ilc2,2])/(centermass[ilc,2]+centermass[ilc2,2])
                            centermass[ilc,1]=(centermass[ilc,1]+centermass[ilc2,1])/2.
                            centermass[ilc,2]=(centermass[ilc,2]+centermass[ilc2,2])
                            centermass[ilc,3]=(centermass[ilc,3]+centermass[ilc2,3])
                            centermass[ilc,4]=np.sqrt(centermass[ilc,3])
                            
                            centermass = np.delete(centermass,(ilc2), axis=0)
                return centermass
예제 #3
0
파일: Tomo2Mesh.py 프로젝트: vitst/tomoPost
 def erode_converge(self, bin_image):
     print("Erode array", flush=True)
     size = bin_image.shape[0] * bin_image.shape[1] * bin_image.shape[2]
     next_size = measurements.sum(bin_image)
     while (next_size < size):
         print("size: {}  next: {}".format(size, next_size), flush=True)
         bin_image = self.erode_NN(bin_image)
         size = next_size
         next_size = measurements.sum(bin_image)
 
     return bin_image
예제 #4
0
def dropletize(img, threshold=10.0, dilate=1, return_all=False):
    """
    A simple and very effective threshold-based droplet algorithm.

    Works only in the case where droplets form disjoint regions (sparse case).
    The algorithm thresholds the image and looks for connected regions above
    the threshold. Each connected region becomes a droplet.

    Parameters
    ----------
    img : np.ndarray
        The two-D image to search for droplets in.

    threhsold : float
        The threshold for the image. Should be between 0 and 1/2 a photon
        in detector gain units.

    dilate : int
        Optionally extend the droplet regions this amount around the thresholded
        region. This lets you be sure you capture all intesity if using
        an aggresive threshold.

    return_coms : bool
        Whether or not to return the droplet positions (centers of mass).

    Returns
    -------
    adus : list of floats
        The summed droplet intensities in detector gain units (ADUs).

    coms : np.ndarray
        The x,y positions of each droplet found.
    """

    bimg = (img > threshold)
    if dilate > 0:
        bimg = smf.binary_dilation(bimg, iterations=dilate)
    limg, numlabels = smt.label(bimg)

    adus = smt.sum(img, labels=limg, index=np.arange(2, numlabels))

    if return_all:
        coms = np.array(
            smt.center_of_mass(img, labels=limg, index=np.arange(2,
                                                                 numlabels)))
        size = smt.sum(np.ones_like(img),
                       labels=limg,
                       index=np.arange(2, numlabels))
        return adus, coms, size

    else:
        return adus
예제 #5
0
    def remove_background(binary_images,
                          ROI_polygon,
                          min_overlap,
                          verbose=False,
                          save_prefix=None):
        compressed_output = []

        #background_mask = np.ones(ROI_polygon.shape, ROI_polygon.dtype) * 255
        #background_mask = (ROI_polygon == 0)

        for idx, image in enumerate(binary_images):
            if verbose:
                print("Processed: " + str(idx) + " of " +
                      str(len(binary_images)),
                      end="\r")

            # get the connected components from the original image
            cc_labels, count_labels = ms.label(image)

            # Add the values of pixels on the binary ROI mask per CC
            label_list = range(count_labels + 1)
            size_sums = ms.sum(image, cc_labels, label_list)
            overlap_sums = ms.sum(ROI_polygon, cc_labels, label_list)

            size_sums[0] = 1  # avoid division by zero warning

            # compute the overlap percentage between each CC and the ROI mask
            prop_sums = overlap_sums / size_sums

            # will delete any CC that does not overlap enough with the ROI mask
            to_delete = prop_sums < min_overlap

            delete_mask = to_delete[cc_labels]
            image[delete_mask] = 0

            # erase background
            #image[background_mask] = 0

            # add to buffer
            flag, output_image = cv2.imencode(".png", image)
            compressed_output.append(output_image)

            # debug, save to disk
            if save_prefix is not None:
                cv2.imwrite(save_prefix + "_content_" + str(idx) + ".png",
                            image)

        if verbose:
            print("")

        return compressed_output
예제 #6
0
파일: droplet.py 프로젝트: tjlane/speckle
def dropletize(img, threshold=10.0, dilate=1, return_all=False):
    """
    A simple and very effective threshold-based droplet algorithm.

    Works only in the case where droplets form disjoint regions (sparse case).
    The algorithm thresholds the image and looks for connected regions above
    the threshold. Each connected region becomes a droplet.

    Parameters
    ----------
    img : np.ndarray
        The two-D image to search for droplets in.

    threhsold : float
        The threshold for the image. Should be between 0 and 1/2 a photon
        in detector gain units.

    dilate : int
        Optionally extend the droplet regions this amount around the thresholded
        region. This lets you be sure you capture all intesity if using
        an aggresive threshold.

    return_coms : bool
        Whether or not to return the droplet positions (centers of mass).

    Returns
    -------
    adus : list of floats
        The summed droplet intensities in detector gain units (ADUs).

    coms : np.ndarray
        The x,y positions of each droplet found.
    """

    bimg = (img > threshold)
    if dilate > 0:
        bimg = smf.binary_dilation(bimg, iterations=dilate)
    limg, numlabels = smt.label(bimg)

    adus = smt.sum(img, labels=limg, index=np.arange(2,numlabels))

    if return_all:
        coms = np.array(smt.center_of_mass(img, labels=limg,
                                        index=np.arange(2,numlabels))) 
        size = smt.sum(np.ones_like(img), labels=limg, index=np.arange(2,numlabels))
        return adus, coms, size

    else:
        return adus
예제 #7
0
def Case1(AvgFlux):
    ExpectedFluxUnder = np.median(AvgFlux)

    #find a standard Aperture
    StdAper = (AvgFlux > ExpectedFluxUnder)
    lw, num = measurements.label(
        StdAper)  # this numbers the different apertures distinctly
    area = measurements.sum(
        StdAper, lw,
        index=np.arange(lw.max() +
                        1))  # this measures the size of the apertures
    StdAper = area[lw].astype(
        int)  # this replaces the 1s by the size of the aperture
    StdAper = (StdAper >=
               np.max(StdAper)) * 1  #make the standard aperture as 1.0

    #Finding the background aperture
    BkgAper = 1.0 - StdAper

    BkgFrame = (BkgAper * AvgFlux)
    BkgFrame = BkgFrame[np.nonzero(BkgFrame)]
    BkgStd = np.std(BkgFrame)
    BkgMedian = np.median(
        BkgFrame
    )  #negative values for background are sometimes seen, which means that will be added to the flux values rather than subtracted
    Sigma = 5.0  #Usual value is 5
    CutoffLower = BkgMedian - Sigma * BkgStd  #5 sigma cutoff for excluding really unusual pixel

    #New method
    BkgFrame = BkgFrame[np.nonzero((BkgFrame > CutoffLower) * 1.0)]
    #BkgNewMean = np.median(BkgFrame)
    BkgNewMean = np.abs(np.median(BkgFrame))
    BkgNewStd = np.std(BkgFrame)

    Sigma = 2.0  ###Important for determining the aperture
    ExpectedFluxUnder = BkgNewMean + Sigma * BkgNewStd + 15.0  #15.0 to consider the case where the background is really small

    #find a standard Aperture
    StdAper = 1.0 * (AvgFlux > ExpectedFluxUnder)
    lw, num = measurements.label(
        StdAper)  # this numbers the different apertures distinctly
    area = measurements.sum(
        StdAper, lw,
        index=np.arange(lw.max() +
                        1))  # this measures the size of the apertures
    StdAper = area[lw].astype(
        int)  # this replaces the 1s by the size of the aperture
    StdAper = (StdAper >= np.max(StdAper)) * 1  #
    return StdAper
예제 #8
0
def getTargetOutput(image, targetItem, out_map):
    # Find item labeled output from CRF
    item_label = np.zeros(out_map.shape,dtype="uint8")
    item_index = (out_map == targetItem)
    item_label[item_index]=1
    # Label segments and calculate max area
    lw, num = measurements.label(np.asarray(item_label))
    area = measurements.sum(item_label, lw, index=arange(lw.max() + 1))
    best_val = np.argmax(area)
    # Mask image, create single segment
    image_mask = np.zeros(out_map.shape,dtype="uint8")
    best_index = (lw == best_val)
    image_mask[best_index] = 1
    image_mask = np.swapaxes(image_mask,0,1)
    #image_mask = np.swapaxes(image_mask,0,1)
    final_mask = np.zeros(image.shape, dtype="uint8")
    # Hack, dont know why repmat wont work...
    final_mask[:,:,0] = image_mask
    final_mask[:,:,1] = image_mask
    final_mask[:,:,2] = image_mask
    final_mask_index = (final_mask == 0)
    image_out = np.copy(image)
    image_out[final_mask_index] = 0

    return image_out
def update(p):
    p = pSlider.val
    z = r<p
    im1.set_data(z)
    im1.set_clim(z.min(), z.max())
    lw, num = measurements.label(z)

    # labeled clusters
    b = arange(lw.max() + 1) # create an array of values from 0 to lw.max() + 1
    shuffle(b) # shuffle this array
    shuffledLw = b[lw] # replace all values with values from b
    im2.set_data(shuffledLw) # show image clusters as labeled by a shuffled lw    
    im2.set_clim(shuffledLw.min(), shuffledLw.max())
    
    # calculate area
    area = (measurements.sum(z, lw, index=range(lw.max() + 1))).astype(int)
    areaImg = area[lw]
    im3.set_data(areaImg)
    im3.set_clim(areaImg.min(), areaImg.max())
    sliced = measurements.find_objects(areaImg == areaImg.max())
    if(len(sliced) > 0):
        sliceX = sliced[0][1]
        sliceY = sliced[0][0]
        ontopplot.set_xdata([sliceX.start, sliceX.start, sliceX.stop, sliceX.stop, sliceX.start])
        ontopplot.set_ydata([sliceY.start, sliceY.stop, sliceY.stop, sliceY.start, sliceY.start])
    else:        
        ontopplot.set_xdata([0])
        ontopplot.set_ydata([0])
    
    
    draw()
예제 #10
0
def find_blob_centers(predictions, resolution, blob_prediction_threshold,
                      blob_size_threshold):

    # smooth out "U-Net noise"
    # print("Median-filtering prediction...")
    # predictions = median_filter(predictions, size=3)

    print("Finding blobs...")
    start = time.time()
    blobs = predictions > blob_prediction_threshold
    labels, num_blobs = label(blobs)
    print("%.3fs" % (time.time() - start))
    print("Found %d blobs" % num_blobs)

    print("Finding centers, sizes, and maximal values...")
    start = time.time()
    label_ids = np.arange(1, num_blobs + 1)
    centers = measurements.center_of_mass(blobs, labels, index=label_ids)
    sizes = measurements.sum(blobs, labels, index=label_ids)
    maxima = measurements.maximum(predictions, labels, index=label_ids)
    print("%.3fs" % (time.time() - start))

    centers = {
        label: {
            'center': center,
            'score': max_value
        }
        for label, center, size, max_value in zip(
            label_ids, centers, sizes, maxima) if size >= blob_size_threshold
    }

    return (centers, labels)
예제 #11
0
    def _measure(self, measurement_type):
        areas = spm.sum(np.ones(self.labels.shape), self.labels,
                        range(1, self.num_objs))

        intensity = measure_stack(self.stack, self.labels, self.num_objs,
                                  measurement_type)
        return areas, intensity
예제 #12
0
def OldCase4(AvgFlux, X, Y):
    #Convolve with a laplacian
    LaplacianStencil = np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]])
    Laplacian = convolve(AvgFlux, LaplacianStencil)
    StdAper = (Laplacian < -10)
    lw, num = measurements.label(
        StdAper)  # this numbers the different apertures distinctly
    area = measurements.sum(
        StdAper, lw,
        index=np.arange(lw.max() +
                        1))  # this measures the size of the apertures
    StdAper = area[lw].astype(
        int)  # this replaces the 1s by the size of the aperture
    StdAper = (StdAper >= np.max(StdAper)) * 1
    pl.figure(figsize=(16, 7))
    pl.subplot(121)
    pl.imshow(AvgFlux,
              cmap='gray',
              norm=colors.PowerNorm(gamma=1. / 2.),
              interpolation='none')
    pl.plot(X, Y, "ko")
    pl.colorbar()
    pl.subplot(122)
    pl.imshow(StdAper)
    pl.colorbar()
    pl.plot(X, Y, "ko")
    pl.show()

    return StdAper
예제 #13
0
def mask_lake_img(img, offset=None, water_mask=None):
    if water_mask is None:
        water_mask = np.where(img < offset, 1, 0)
    visited, label = measurements.label(water_mask)
    area = measurements.sum(water_mask, visited, index=np.arange(label + 1))
    largest_element = np.argmax(area)
    return np.where(visited == largest_element, 1, 0)
예제 #14
0
def Pi():
    # L = 100
    for L in (50, 100, 200):
        p = linspace(0.5, 0.7, 50)
        nx = len(p)
        Ni = zeros(nx)
        N = 1000
        for i in range(N):
            z = rand(L, L)
            for ip in range(nx):
                m = z < p[ip]
                lw, num = measurements.label(m)
                labelList = arange(lw.max() + 1)
                area = measurements.sum(m, lw, labelList)
                maxLabel = labelList[where(area == area.max())]
                sliced = measurements.find_objects(lw == maxLabel)
                if (len(sliced) > 0):
                    sliceX = sliced[0][1]
                    sliceY = sliced[0][0]
                    dx = sliceX.stop - sliceX.start
                    dy = sliceY.stop - sliceY.start
                    maxsize = max(dx, dy)
                    if (maxsize >= L):  # Percolation
                        Ni[ip] = Ni[ip] + 1
        Pi = Ni / N
        plot(p, Pi)
    show()
예제 #15
0
def calculate_sizes_zero_for_empty(lattice_snapshot, fname, loglog_scale=None):
    #
    # Calculate degree distribution for the final configuration
    #


    #print(final_lattice)

    ls = lattice_snapshot.copy()
    if -1 in ls:
        ls[ls!=-1]=1
        ls[ls==-1]=0
    lw, num = measurements.label(ls)


    plt.figure()
    lns = plt.plot()
    plt.title("size distribution")
    area = measurements.sum(ls, lw, index=arange(lw.max() + 1))
    area = area[area>1]
    plt.hist(area, bins=100)
    average_object_size = np.mean(area)
    median_object_size = np.median(area)
    print("average object size = ", average_object_size)
    print("median object size = ", median_object_size)
    if loglog_scale:
            plt.xscale('log')
            plt.yscale('log')


    plt.savefig(fname)
예제 #16
0
    def find_roll(self):
        """
        Determine the roll of the phantom based on the phantom air bubbles on the HU slice.
        :return:
        """
        SOI = self.SOI_cleaned['HU']
        invSOI = ptg.invert(SOI)
        labels, no_roi = meas.label(invSOI)
        roi_sizes = [meas.sum(invSOI, labels, index=item) for item in range(1,no_roi+1)]
        air_bubbles = [idx+1 for idx, item in enumerate(roi_sizes) if item < np.median(roi_sizes)*1.5 and item > np.median(roi_sizes)*(1/1.5)]
        if len(air_bubbles) != 2:
            self.phan_roll = 0

            #raise RuntimeWarning, "Roll unable to be determined; assuming 0"
        else:
            air_bubble_CofM = meas.center_of_mass(invSOI, labels, air_bubbles)
            y_dist = air_bubble_CofM[0][0] - air_bubble_CofM[1][0]
            x_dist = air_bubble_CofM[0][1] - air_bubble_CofM[1][1]
            angle = np.arctan2(y_dist, x_dist) * 180/np.pi
            if angle < 0:
                roll = abs(angle) - 90
            else:
                roll = angle - 90
            self.phan_roll = roll

            self._roll_found = True
예제 #17
0
    def get_clusters(self):
        """
        Get clusters and respective sizes
        """
        # first regroup similar ages to same value
        for node in self.age_dict.keys():
            self.cluster_colour(node)

        # collect in a dict all the normalised ages
        self.get_nodes_w_colour()

        # get the different groups all the different groups
        groups = np.unique(list(self.colour_dict.values()))

        # combine all the cluster sizes per iteration
        area_dist_per_itr = []

        # draw the array
        for group in groups:
            self.draw_array(group_nr=group)
            lw, num = measurements.label(self.array)
            area = measurements.sum(self.array, lw, index=arange(lw.max() + 1))
            # make sure to not include a zero in the array
            area = area[area != 0]
            area_dist_per_itr = area_dist_per_itr + (list(area))
            # make sure to reset the array
            self.reset_array()

        # append it to the collector dict where each key corresponds to the time_step
        self.cluster_size[self.time_step] = area_dist_per_itr
예제 #18
0
    def find_roll(self):
        """
        Determine the roll of the phantom based on the phantom air bubbles on the HU slice.
        :return:
        """
        SOI = self.SOI_cleaned['HU']
        invSOI = ptg.invert(SOI)
        labels, no_roi = meas.label(invSOI)
        roi_sizes = [
            meas.sum(invSOI, labels, index=item)
            for item in range(1, no_roi + 1)
        ]
        air_bubbles = [
            idx + 1 for idx, item in enumerate(roi_sizes)
            if item < np.median(roi_sizes) *
            1.5 and item > np.median(roi_sizes) * (1 / 1.5)
        ]
        if len(air_bubbles) != 2:
            self.phan_roll = 0

            #raise RuntimeWarning, "Roll unable to be determined; assuming 0"
        else:
            air_bubble_CofM = meas.center_of_mass(invSOI, labels, air_bubbles)
            y_dist = air_bubble_CofM[0][0] - air_bubble_CofM[1][0]
            x_dist = air_bubble_CofM[0][1] - air_bubble_CofM[1][1]
            angle = np.arctan2(y_dist, x_dist) * 180 / np.pi
            if angle < 0:
                roll = abs(angle) - 90
            else:
                roll = angle - 90
            self.phan_roll = roll

            self._roll_found = True
예제 #19
0
def find_maxima(predictions,
                voxel_size,
                radius,
                sigma=None,
                min_score_threshold=0):
    '''Find all points that are maximal within a sphere of ``radius`` and are
    strictly higher than min_score_threshold. Optionally smooth the prediction
    with sigma.'''

    # smooth predictions
    if sigma is not None:
        print("Smoothing predictions...")
        sigma = tuple(float(s) / r for s, r in zip(sigma, voxel_size))
        print("voxel-sigma: %s" % (sigma, ))
        start = time.time()
        predictions = gaussian_filter(predictions, sigma, mode='constant')
        print("%.3fs" % (time.time() - start))

    print("Finding maxima...")
    start = time.time()
    radius = tuple(
        int(math.ceil(float(ra) / re)) for ra, re in zip(radius, voxel_size))
    print("voxel-radius: %s" % (radius, ))
    max_filtered = maximum_filter(predictions, footprint=sphere(radius))

    maxima = max_filtered == predictions
    print("%.3fs" % (time.time() - start))

    print("Applying NMS...")
    start = time.time()
    predictions_filtered = np.zeros_like(predictions)
    predictions_filtered[maxima] = predictions[maxima]
    print("%.3fs" % (time.time() - start))

    print("Finding blobs...")
    start = time.time()
    blobs = predictions_filtered > min_score_threshold
    labels, num_blobs = label(blobs, output=np.uint64)
    print("%.3fs" % (time.time() - start))

    print("Found %d points after NMS" % num_blobs)

    print("Finding centers, sizes, and maximal values...")
    start = time.time()
    label_ids = np.arange(1, num_blobs + 1)
    centers = measurements.center_of_mass(blobs, labels, index=label_ids)
    sizes = measurements.sum(blobs, labels, index=label_ids)
    maxima = measurements.maximum(predictions, labels, index=label_ids)
    print("%.3fs" % (time.time() - start))

    centers = {
        label: {
            'center': center,
            'score': max_value
        }
        for label, center, size, max_value in zip(label_ids, centers, sizes,
                                                  maxima)
    }

    return (centers, labels, predictions)
예제 #20
0
def hist_clustersize(dic, keys, tr, clim, norm=True, bars=False, 
                     fig_name=None, higher=True):
    plt.figure(figsize=(7,5), dpi=200)
    for k in keys:
        h = np.zeros((len(dic),clim[1]-clim[0]+1))
        for j in range(len(dic)):
            x = np.array(dic[j][k])
            if higher:
                x[x<tr[k][0]] = 0.0
                x[x>tr[k][0]] = 1.0
            else:
                x[x<tr[k][0]] = 1.0
                x[x>tr[k][0]] = 0.0
            xs, n_clusters = measurements.label(x)
            print(n_clusters, 'clusters found')
            a  = measurements.sum(x, xs, index=arange(xs.max() + 1))
            ma = np.max(a) 
            h[j,:]=np.asarray([np.sum(a==i) for i in np.arange(clim[0],clim[1]+1,1)])
        hm = np.mean(h,axis=0)
        if norm:
            hm = hm/np.sum(hm)
            plt.ylim([0.0,1.0])
        if bars:
            plt.bar(np.arange(clim[0]-0.4, clim[1], 1.0 ),hm)
        else:
            plt.plot(np.arange(clim[0], clim[1]+1, 1.0 ),hm, 'ob-', ms=10)
        plt.xlim([clim[0]-0.5,clim[1]+0.5])
        plt.xticks(range(clim[0], clim[1]+1,1))
        plt.xlabel('Cluster size')
        plt.ylabel('Number of clusters')
        #plt.title(k)
        if fig_name!=None:
            plt.savefig(fig_name, format='pdf', dpi=200)
예제 #21
0
def visualize_clusters2():
    pVals = [0.57, 0.58, 0.59, 0.6]
    L = 100
    z = np.random.random((L, L))

    fig, axs = plt.subplots(2, 2)
    cmap = plt.cm.viridis
    cmap.set_under('black')
    idx = 0
    for i in range(2):
        for j in range(2):
            p = pVals[idx]
            print(p)
            system = z < p
            labels, n_features = measurements.label(system)
            area = measurements.sum(system,
                                    labels,
                                    index=arange(labels.max() + 1))
            areaImg = area[labels]
            axs[i, j].imshow(areaImg,
                             origin='lower',
                             cmap=cmap,
                             vmin=0.1,
                             interpolation='none')
            axs[i, j].set_xticks([])
            axs[i, j].set_yticks([])
            axs[i, j].set_title(f"p={p:0.2f}")
            idx += 1

    # colorbar()
    show()
예제 #22
0
    def getFrame(self, shouldLabel=True):
        success, readFrame = self._segmentedCap.read()

        segReadFrame = cv2.cvtColor(readFrame, cv2.COLOR_BGR2GRAY)

        if (shouldLabel):
            #labeledFrame = connected_components(np.uint16(segReadFrame))
            #labeledFrame = labeledFrame.eval(session = self._session)
            labeledFrame, n = label(np.uint16(segReadFrame))

            n = len(np.unique(labeledFrame))
            initialLabelsInds = list(range(n))

            area = measurements.sum(labeledFrame != 0,
                                    labeledFrame,
                                    index=list(range(n)))
            badAreas = np.where((area < 5) | (area > 400))[0]
            labeledFrame[np.isin(labeledFrame, badAreas)] = 0

            labelsInds = set(list(initialLabelsInds)).difference(
                set(list(badAreas)))
        else:
            labeledFrame = segReadFrame
            labelsInds = []

        success, rawReadFrame = self._rawCap.read()

        return segReadFrame, rawReadFrame, labeledFrame, labelsInds
예제 #23
0
def locate_sources(img):
    """
    Extract sources from an image.
    """

    # -- get average intensity of pixels across rgb channels
    img_a = img.mean(-1)

    # -- get medians and standard deviations of luminosity images
    med = np.median(img_a)
    sig = img_a.std()

    # -- get the thresholded images
    thr = img_a > (med + 5.0 * sig)

    # -- label the sources
    labs = spm.label(thr)

    # -- get the source sizes
    lsz = spm.sum(thr, labs[0], range(1, labs[1] + 1))

    # -- get the positions of the sources
    ind = (lsz > 25.) & (lsz < 500.)

    # -- get center of masses for all the labelled sources in the image
    return np.array(
        spm.center_of_mass(thr, labs[0],
                           np.arange(1, labs[1] + 1)[ind])).T
예제 #24
0
def _get_map_cluster_sizes(map_):
    labels, num = measurements.label(map_)
    area = measurements.sum(map_, labels, index=np.arange(1, num + 1))
    if not len(area):
        return [0]
    else:
        return area.astype(int)
예제 #25
0
def fix_elevations(z, riv_i, riv_j, ch_depth, sea_level, slope, dx, max_rand, SLRR):

    test_elev = z - sea_level
    max_cell_h = slope * dx
    riv_prof = test_elev[riv_i, riv_j]
    test_elev[riv_i, riv_j] += 2*ch_depth

    # set new subaerial cells to marsh elevation
    test_elev[test_elev == 0] = max_cell_h

    # make mask for depressions
    ocean_mask = test_elev < max_cell_h
    labeled_ponds, ocean = measurements.label(ocean_mask)

    # # fill in underwater spots that are below SL (?)
    # below_SL = [z <= sea_level]
    # underwater_cells, big_ocean = measurements.label(below_SL)
    # underwater_cells[underwater_cells == big_ocean] = 0
    # test_elev[underwater_cells > 0] = max_cell_h + SLRR + (np.random.rand() * max_rand)

    # create an ocean and shoreline mask
    ocean_and_shore = np.copy(labeled_ponds)

    # create an ocean mask
    # ocean_cells = np.copy(ocean_and_shore)
    # ocean_and_shore[test_elev > 0] = 0

    # create mask for pond cells and fix them
    area = measurements.sum(ocean_mask, labeled_ponds, index=np.arange(labeled_ponds.max() + 1))
    areaPonds = area[labeled_ponds]
    labeled_ponds[areaPonds == areaPonds.max()] = 0

    #finish creating ocean and shoreline mask
    ocean_and_shore[areaPonds != areaPonds.max()] = 0

    # something here to get rid of ocean cells
    test_elev[labeled_ponds > 0] = max_cell_h + SLRR + (np.random.rand() * max_rand)

    # raise cells close to sea level above it
    test_elev[(test_elev >= max_cell_h) & (test_elev <= (max_cell_h + SLRR))] = \
        (max_cell_h + SLRR + (np.random.rand() * max_rand))

    riv_buffer = np.zeros_like(test_elev)
    riv_buffer[riv_i, riv_j] = 1
    riv_buffer[riv_i[1:]-1, riv_j[1:]] = 1

    for i in xrange(1, test_elev.shape[0]-1):
        for j in xrange(test_elev.shape[1]):
            if (not ocean_and_shore[i, j]
                and not ocean_and_shore[i-1, j]
                and not ocean_and_shore[i+1, j]
                and not riv_buffer[i, j]):
                if test_elev[i+1, j] >= test_elev[i, j]:
                    test_elev[i, j] = test_elev[i+1, j] + (np.random.rand() * slope)
    
    test_elev[riv_i, riv_j] = riv_prof

    z = test_elev + sea_level

    return z
def hist_clustersize(dic, keys, tr, clim, norm=True, bars=False, fig_name=None, higher=True):
    plt.figure(figsize=(7,5), dpi=200)
    for k in keys:
        h = np.zeros((len(dic),clim[1]-clim[0]+1))
        for j in range(len(dic)):
            x = np.array(dic[j][k])
            if higher:
	        x[x<tr[k][0]] = 0.0
                x[x>tr[k][0]] = 1.0
            else:
	        x[x<tr[k][0]] = 1.0
                x[x>tr[k][0]] = 0.0
            xs, n_clusters = measurements.label(x)
            print n_clusters, 'clusters found'
            a  = measurements.sum(x, xs, index=arange(xs.max() + 1))
            ma = np.max(a) 
            h[j,:] = np.asarray([np.sum(a==i) for i in np.arange(clim[0],clim[1]+1,1)])
        hm = np.mean(h,axis=0)
        if norm:
            hm = hm/np.sum(hm)
            plt.ylim([0.0,1.0])
        if bars:
            plt.bar(np.arange(clim[0]-0.4, clim[1], 1.0 ),hm)
        else:
            plt.plot(np.arange(clim[0], clim[1]+1, 1.0 ),hm, 'ob-', ms=10)
        plt.xlim([clim[0]-0.5,clim[1]+0.5])
        plt.xticks(range(clim[0], clim[1]+1,1))
        plt.xlabel('Cluster size')
        plt.ylabel('Number of clusters')
        #plt.title(k)
        if fig_name!=None:
            plt.savefig(fig_name, format='pdf', dpi=200)
    def compute_stable_background(complete_sum,
                                  high_threshold,
                                  low_threshold,
                                  min_bg_cc_size,
                                  close_radius,
                                  debug_save_prefix=None):

        height, width = complete_sum.shape

        # threshold and obtain low confidence and high confidence background images
        high_image = (complete_sum >= high_threshold).astype(np.uint8) * 255
        low_image = (complete_sum >= low_threshold).astype(np.uint8) * 255

        # Filtering high threshold image
        # first, do a morphological closing on the image
        struct_elem = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                ksize=(close_radius,
                                                       close_radius))
        high_closed = cv2.morphologyEx(high_image, cv2.MORPH_CLOSE,
                                       struct_elem)

        # filtering very small CC in the background image (very likely to be false positives)
        components, count_labels = sci_mes.label(high_closed)
        sizes = sci_mes.sum(high_closed, components,
                            range(count_labels + 1)) / 255.0

        for idx in range(1, count_labels + 1):
            if sizes[idx] < min_bg_cc_size:
                high_closed[components == idx] = 0

        # compute the connected components on the low confidence image ...
        components, count_labels = sci_mes.label(low_image)

        # now delete components from low threshold image that do not overlap the closed version of the high threshold image
        background_model = low_image.copy()
        for idx in range(count_labels):
            cc_mask = components == idx
            if high_closed[cc_mask].sum() == 0:
                # no overlap between current CC and the high confident background image ...
                background_model[cc_mask] = 0

        background_expanded = cv2.morphologyEx(background_model,
                                               cv2.MORPH_DILATE, struct_elem)
        background_mask = (background_expanded > 0)

        # Debug
        if debug_save_prefix is not None:
            #tempo_result = np.zeros((height, width, 3), dtype=np.uint8)
            #tempo_result[:,:, 1] = complete_sum * 220 + 32
            tempo_result = complete_sum * 255

            cv2.imwrite(debug_save_prefix + "_sum.png", tempo_result)
            cv2.imwrite(debug_save_prefix + "_low_t.png", low_image)
            cv2.imwrite(debug_save_prefix + "_hi_t.png", high_image)
            cv2.imwrite(debug_save_prefix + "_hi_t_closed.png", high_closed)
            cv2.imwrite(debug_save_prefix + "_model.png", background_model)
            cv2.imwrite(debug_save_prefix + "_model_exp.png",
                        background_expanded)

        return background_mask
예제 #28
0
def masswalk(L, p):
    ncount = 0
    perc = []
    while (len(perc) == 0):
        ncount = ncount + 1
        if (ncount > 1000):
            print("Couldn't make percolation cluster...")
            mass = 0
            break

        z = rand(L, L) < pc
        lw, num = measurements.label(z)
        perc_x = intersect1d(lw[0, :], lw[-1, :])
        perc = perc_x[where(perc_x > 0)]

    if len(perc) > 0:
        labelList = arange(num + 1)
        area = measurements.sum(z, lw, index=labelList)
        areaImg = area[lw]
        maxArea = area.max()
        zz = (lw == perc[0])

        l, r = walk(zz)
        zzz = l * r  # Find points where both l and r are non-zero
        zadd = zz + zzz
        mass = count_nonzero(zzz)

        return mass
예제 #29
0
def declutter_by_area(array, area):
    """
    Remove clusters with area less or equal to area.
    """

    # Create array
    if isinstance(array, np.ma.MaskedArray):
        nonzero = np.greater(array.filled(fill_value=0), 0.1)
    else:
        nonzero = np.greater(array, 0.1)

    logging.debug('Starting size declutter.')
    labels, count1 = measurements.label(nonzero)
    logging.debug('Found {} clusters.'.format(count1))
    areas = measurements.sum(nonzero, labels, labels)
    index = np.less_equal(areas, area)
    nonzero[index] = False
    labels, count2 = measurements.label(nonzero)
    logging.debug(
        'Removed {} clusters with area <= {}.'.format(
            count1 - count2,
            area,
        ), )

    if isinstance(array, np.ma.MaskedArray):
        array.data[index] = 0
    else:
        array[index] = 0
예제 #30
0
def compute_checkbox_position(blank_im):
    binary = convert_to_binary(255 - blank_im)
    labels, n = morph.label(binary)
    h, w = binary.shape
    minsize = 40

    # find small dash in img
    sums = measurements.sum(binary, labels, range(n + 1))
    sums = sums[labels]
    good = minimum(binary, 1 - (sums > 0) * (sums < minsize))

    junk_cc = np.bitwise_xor(good, binary)
    # temporary fix: add bottom line
    junk_cc[h-1:, :] = np.ones((1, w))
    junk_cc = morph.r_dilation(junk_cc, (7,7))
    junk_cc = morph.r_closing(junk_cc, (9,9))

    # find hole using morphology
    hole = morph.fill_hole(junk_cc)
    hole = hole - junk_cc

    # locate holes position
    labels, n = morph.label(hole)
    objects = morph.find_objects(labels)
    objects = sorted(objects, key=lambda b: sl.center(b))
    area_thres = 0.4 * (amax([sl.area(b) for b in objects]) if len(objects) > 0 else 0)
    boxes = [[b[0].start, b[1].start, b[0].stop, b[1].stop] for b in objects if sl.area(b) > area_thres]

    return boxes, convert_binary_to_normal_im(hole)
예제 #31
0
def nsp2():
    nsamp = 100
    L = 200
    p = 0.58
    allarea = array([])
    for i in range(nsamp):
        z = rand(L, L)
        m = z < p
        lw, num = measurements.label(m)
        labelList = arange(lw.max() + 1)
        area = measurements.sum(m, lw, labelList)
        allarea = append(allarea, area)
    n, sbins = histogram(allarea, bins=int(max(allarea)))
    s = 0.5 * (sbins[1:] + sbins[:-1])
    # nsp = n/(L*nsamp)
    # i = nonzero(n)
    # subplot(2,1,1)
    # plot(s[i],nsp[i],'o')
    # xlabel('$s$')
    # ylabel('$n(s,p)$')
    # subplot(2,1,2)
    # loglog(s[i],nsp[i],'o')
    # xlabel('$s$')
    # ylabel('$n(s,p)$')
    M = nsamp
    a = 1.2
    logamax = ceil(log(max(s)) / log(a))
    logbins = a**arange(0, logamax)
    nl, nlbins = histogram(allarea, bins=logbins)
    ds = diff(logbins)
    sl = 0.5 * (logbins[1:] + logbins[:-1])
    nsl = nl / (M * L**2 * ds)
    loglog(sl, nsl, '.b')
    show()
예제 #32
0
def spanning_cluster_density(perc_matrix):
	# Calculate and return the spanning cluster density
	total_area = 0
	Lx, Ly = perc_matrix.shape
	Lmin = min(Lx, Ly)


	lw, num = measurements.label(perc_matrix)
	labels = arange(lw.max()+1)

	area = measurements.sum(perc_matrix, lw, index=labels)
	
	for l in labels:
		if area[l] > Lmin:
		 	sliced = measurements.find_objects(lw == l)
		 	sliceX = sliced[0][1]
		 	sliceY = sliced[0][0]

		 	width = sliceX.stop - sliceX.start
		 	height = sliceY.stop - sliceY.start

		 	if width == Lx or height == Ly:
		 		total_area += area[l]

	return total_area/Lx/Ly
예제 #33
0
def OldCase3(AvgFlux):
    ExpectedFluxUnder = 175
    StdAper = (AvgFlux>ExpectedFluxUnder)
    lw, num = measurements.label(StdAper) # this numbers the different apertures distinctly
    area = measurements.sum(StdAper, lw, index=np.arange(lw.max() + 1)) # this measures the size of the apertures
    StdAper = area[lw].astype(int) # this replaces the 1s by the size of the aperture
    StdAper = (StdAper >= np.max(StdAper))*1 #make the standard aperture as 1.0
    return StdAper
    def maskOutLake(watermask):
        import numpy as np
        from scipy.ndimage import measurements

        visited, label = measurements.label(watermask)
        area = measurements.sum(watermask, visited, index=np.arange(label + 1))
        largestElement = np.argmax(area)
        return np.where(visited == largestElement, 1, 0)
예제 #35
0
    def __init__(self, name, crop, min_area_cutoff, max_area_cutoff, dates):
        self.name = name
        self.dates = dates
        self.crop = crop

        somma_cristalli = np.zeros(13)
        media = np.zeros(13)

        cristalli = np.array([])
        numeroC_array = np.array([])
        numero_cristalli = np.array([])
        media_aree_array = np.array([])

        k = 0
        for date in dates:
            img_path = "../campioni/" + name + "/" + date + "/img/"  #sono una cartella sotto
            temperatures = os.listdir(img_path)
            media_array = np.array([])
            media_aree_array = np.array([])
            numeroC_array = np.array([])
            for temp in temperatures:
                images = os.listdir(img_path + temp)
                lunghezza = 0
                cristalli = np.array([])
                numero_cristalli = np.array([])
                for i in images:
                    file_path = img_path + temp + "/" + i

                    im = Image.open(file_path).convert("L")
                    I = np.asarray(im)

                    lx, ly = I.shape
                    crop_I = I[lx - self.crop[k][0]:lx - self.crop[k][1],
                               ly - self.crop[k][2]:ly - self.crop[k][3]]

                    lw, num = measurements.label(crop_I)
                    area = (measurements.sum(crop_I, lw, range(num + 1)) /
                            255) * 5.3 * 5.3

                    area = area[(area <= max_area_cutoff)]
                    area = area[(area > min_area_cutoff)]

                    cristalli = np.append(cristalli, area[1:])
                    lunghezza = len(area) + lunghezza
                    numero_cristalli = np.append(numero_cristalli, lunghezza)

                media_singola_aree = np.average(cristalli)
                media_aree_array = np.append(media_aree_array,
                                             media_singola_aree)
                numeroC_array = np.append(numeroC_array,
                                          lunghezza)  #(forse ok)

            media = media_aree_array * numeroC_array + media
            somma_cristalli = numeroC_array + somma_cristalli
            k = k + 1

        self.temp = temperatures
        self.media_aree = media / somma_cristalli
예제 #36
0
파일: TrjCor.py 프로젝트: vitst/tomoPost
    def clean_image(self, bin_image, mark="  "):
        print("{}  A0 ** Number: {}".format(mark, measurements.sum(bin_image)),
              flush=True)

        bin_image = binary_fill_holes(bin_image).astype(np.uint8)

        print("{}  A1 ** Number: {}".format(mark, measurements.sum(bin_image)),
              flush=True)

        bin_image = binary_erosion(bin_image, iterations=2).astype(np.uint8)

        print("{}  A ** Number: {}".format(mark, measurements.sum(bin_image)),
              flush=True)

        bin_image = binary_fill_holes(bin_image).astype(np.uint8)

        print("{}  B ** Number: {}".format(mark, measurements.sum(bin_image)),
              flush=True)

        bin_image = binary_dilation(bin_image, iterations=2).astype(np.uint8)

        print("{}  C ** Number: {}".format(mark, measurements.sum(bin_image)),
              flush=True)

        bin_image = self.erode_converge(bin_image)

        print("{}  D ** Number: {}".format(mark, measurements.sum(bin_image)),
              flush=True)

        bin_image = self.clean_not_attached(bin_image)

        print("{}  E ** Number: {}".format(mark, measurements.sum(bin_image)),
              flush=True)

        return bin_image.astype(np.uint8)
예제 #37
0
def remove_noise(line,minsize=8):
    """Remove small pixels from an image."""
    if minsize==0: return line
    bin = (line>0.5*amax(line))
    labels,n = morph.label(bin)
    sums = measurements.sum(bin,labels,range(n+1))
    sums = sums[labels]
    good = minimum(bin,1-(sums>0)*(sums<minsize))
    return good
예제 #38
0
파일: linerec.py 프로젝트: AI42/ocropy
def non_noise_components(seg,threshold=0.1):
    """Estimate the number of non-noise connected components in a character
    image. This computes the size of all connected components, and it considers
    all components of size less then `threshold` times the size of the largest
    component to be noise."""
    seg = 1*(seg>0)
    labels,n = morph.label(seg)
    totals = measurements.sum(seg,labels,range(1,n+1))
    return sum(totals>amax(totals)*threshold)
예제 #39
0
def findCenter(frame, threshold):    
    '''Take a frame, and find the pupil center'''
    img = frame[...,0]
    h,w = img.shape
    
    # Threshold the image and adapt - if necessary - the threshold
    (bw, threshold) = adaptThreshold(img, threshold)
        
    # Flip b/w, and convert to uint8
    im_bw = np.array(~bw, dtype=np.uint8)*255
    
    algorithmNr = 0
    
    if algorithmNr == 0:
        labelled_array, num_features = spm.label(im_bw)
        sizes = spm.sum(bw, labelled_array, range(num_features))
        center = spm.center_of_mass(bw, labelled_array, np.argmax(sizes))    
        center = [center[1], center[0]]
    
    elif algorithmNr == 1:
        # Fill the corners
        # Note that the mask has to be 2 pixels larger than the image!
        mask = np.zeros( (h+2,w+2), dtype=np.uint8)
        cv2.floodFill(im_bw, mask, (1,h-1), 255)
        
        # After the floodfill operation, the "mask" indicates which areas have
        # been filled. It therefore has to be re-set for the next filling task.
        mask = np.zeros( (h+2,w+2), dtype=np.uint8)
        cv2.floodFill(im_bw, mask, (w-1,h-1), 255)
        
        # Fill the holes in the pupil
        wHoles = im_bw.copy()
        mask = np.zeros( (h+2,w+2), dtype=np.uint8)
        cv2.floodFill(wHoles, mask, (1,1), 0)
        im_bw[wHoles==255] = 0
        
        # Close the image, to remove the eye-brows    
        radius = 25
        strEl = np.zeros((2*radius+1, 2*radius+1), dtype=np.uint8)
        cv2.circle(strEl, (radius,radius), radius, 255, -1)
        
        closed = cv2.morphologyEx(im_bw, cv2.MORPH_CLOSE, strEl)
        
        # find the edge
        edgeThresholds = (1000, 1000)
        edge = cv2.Canny(closed, edgeThresholds[0], edgeThresholds[1], apertureSize=5)
        
        # find the center of the edge
        edge_y, edge_x = np.where(edge==255)
        center = np.array([np.mean(edge_x), np.mean(edge_y)])
        
    if np.any(np.isnan(center)):
        center = np.zeros(2)
        
    return (center, threshold)
예제 #40
0
 def getLMRectStats(self, rect):
     xx1, xx2, yy1, yy2 = self._lmRectToPix(rect)
     if xx1 is not None:
         subset = self.image.image()[xx1:xx2, yy1:yy2]
         subset, mask = self.image.optimalRavel(subset)
         mmin, mmax = measurements.extrema(subset, labels=mask, index=None if mask is None else False)[:2]
         mean = measurements.mean(subset, labels=mask, index=None if mask is None else False)
         std = measurements.standard_deviation(subset, labels=mask, index=None if mask is None else False)
         ssum = measurements.sum(subset, labels=mask, index=None if mask is None else False)
         return xx1, xx2, yy1, yy2, mmin, mmax, mean, std, ssum, subset.size
     return None
예제 #41
0
def clusterArea(Lx,Ly,p):
    lattice,labelMatrix= showMatrix(Lx,Ly,p)
    figure()
    areaList = measurements.sum(lattice, labelMatrix, index= arange(labelMatrix.max() + 1) )
    areaLabelMatrix = areaList[labelMatrix]
    imshow(areaLabelMatrix, origin='lower', interpolation='nearest')
    colorbar()
    title("Clusters by area")
    show()

    
예제 #42
0
def cluster(field):
    
    field_abs = np.absolute(field) # reduce field to active/inactive traders
    field_abs = field_abs == 1 # get field of True/False values
    lw, num = measurements.label(field_abs) # lw: matrix with cluster numbers, num: total number of clusters 
    area = measurements.sum(field_abs, lw, index=np.arange(1,num+1)) # area: matrix of cluster size
    
    cluster_ones = np.zeros(num) # define empty array
    for i in range(1,num+1): # loop clusters
        cluster_ones[i-1] = (np.where(np.logical_and(lw==i,field==1))[0]).size # get numberof +1 in cluster
       
    return lw, area, num, cluster_ones
예제 #43
0
def _get_map_cluster_sizes(map_):
    labels, num = measurements.label(map_,structure=np.ones([3,3,3]))
    area = measurements.sum(map_, labels, index=np.arange(1, num + 1))
    # TODO: So here if a given map didn't have any super-thresholded features,
    # we get 0 into our histogram.  BUT for the other maps, where at least 1 voxel
    # passed the threshold we might get multiple clusters recorded within our
    # distribution.  Which doesn't quite cut it for being called a FW cluster level.
    # MAY BE it should count only the maximal cluster size (a single number)
    # per given permutation (not all of them)
    if not len(area):
        return [0]
    else:
        return area.astype(int)
예제 #44
0
    def calcCluster(self):
        grid_abs = np.absolute(self.grid) # reduce field to active/inactive traders
        grid_abs = grid_abs == 1 # get field of True/False values
        
        # lw: matrix with cluster numbers, num: total number of clusters, area: matrix of cluster size 
        lw, num = measurements.label(grid_abs) 
        area = measurements.sum(grid_abs, lw, index=np.arange(1,num+1))  
    
        cluster_ones = np.zeros(num) # define empty array
        for i in range(1,num+1): # loop clusters
            cluster_ones[i-1] = (np.where(np.logical_and(lw==i,self.grid==1))[0]).size # get number of +1 states in cluster

        return lw, area, num, cluster_ones
예제 #45
0
def numberDensity(nSamples, Lx, Ly, p):
    latticeArea = Lx*Ly
    Bins = logspace(0, log10(latticeArea))
    nBins = len(Bins)
    n = zeros(nBins - 1)    
    
    for sample in range(nSamples):
                r = rand(Lx,Ly)
                lattice = r < p
                labelMatrix, nClusters = measurements.label(lattice)
                labelList = arange(labelMatrix.max() + 1) 
                
                if(Lx==1):
                    verticalPercolation   = set()
                else:
                    verticalPercolation   = set(labelMatrix[0,:]) & set(labelMatrix[-1,:])
                    
                if(Ly==1):
                    horisontalPercolation = set()
                else:
                    horisontalPercolation = set(labelMatrix[:,0]) & set(labelMatrix[:,-1])
                 
                if(Lx==1 and Ly==1):
                    horisontalPercolation = set(labelMatrix[:,0]) & set(labelMatrix[:,-1])
                    verticalPercolation   = set(labelMatrix[0,:]) & set(labelMatrix[-1,:])
                    
                    
                spanningClusterLabelList = horisontalPercolation|verticalPercolation
                FiniteClusterLabelList = array(list(set(labelList) - spanningClusterLabelList))
                
                
                if(len(FiniteClusterLabelList) > 0.0):
                    areaList  = measurements.sum(lattice, labelMatrix, index = FiniteClusterLabelList)  
                else: 
                    areaList = zeros(len(FiniteClusterLabelList))
                
                
                
                data = histogram(areaList, bins = Bins)
                nValues = data[0]
                sValues = data[1]    
                
                ds = sValues[1:]-sValues[:-1]  
                nValues = nValues.astype(float)/(ds*len(areaList))           
                
                nValues = nValues.astype(float) /latticeArea
                n += nValues
                
    n = n/nSamples    
    
    return(Bins,n) 
예제 #46
0
def find_aperture(dates,fluxes,plot=True,starname='',outputfolder='',kepmag='na',cutoff_limit=2.):
  #
  # This definition reads a 2D array of fluxes (over time) and creates an aperture mask which can later be used to select those pixels for inclusion in light curve
  #

  # first sum all the flux over the different times, this assumes limited movement throughout the time series
  flux = np.nansum(fluxes,axis=0)

  # define which cutoff flux to use for including pixel in mask
  cutoff = cutoff_limit*np.median(flux) # perhaps a more elaborate way to define this could be found in the future but this seems to work pretty well.

  # define the aperture based on cutoff and make it into array of 1 and 0
  aperture =  np.array([flux > cutoff]) #scipy.zeros((np.shape(flux)[0],np.shape(flux)[1]), int)
  aperture = np.array(1*aperture)
  #print aperture
  outline_all = make_aperture_outline(aperture[0]) # an outline (ONLY for figure) of what we are including if we would make no breakups

  # this cool little trick allows us to measure distinct blocks of apertures, and only select the biggest one
  lw, num = measurements.label(aperture) # this numbers the different apertures distinctly
  area = measurements.sum(aperture, lw, index=np.arange(lw.max() + 1)) # this measures the size of the apertures
  aperture = area[lw].astype(int) # this replaces the 1s by the size of the aperture
  aperture = (aperture >= np.max(aperture))*1 # remake into 0s and 1s but only keep the largest aperture

  outline = make_aperture_outline(aperture[0]) # a new outline (ONLY for figure)

  if plot: # make aperture figure
    if not os.path.exists(outputfolder):
      os.makedirs(outputfolder)
    cmap = mpl.cm.get_cmap('Greys', 20)
    pl.figure('Aperture_' + str(starname))
    pl.imshow(flux,norm=LogNorm(),interpolation="none")#,cmap=cmap)
    pl.plot(outline_all[:, 0], outline_all[:, 1],color='green', zorder=10, lw=2.5)
    pl.plot(outline[:, 0], outline[:, 1],color='red', zorder=10, lw=2.5)#,label=str(kepmag))

    #pl.colorbar(orientation='vertical')
    pl.xlabel('X',fontsize=15)
    pl.ylabel('Y',fontsize=15)
    pl.legend()
    #pl.xlim([-1,18])
    #pl.ylim([-1,16])
    #pl.xticks([0,5,10,15])
    #pl.yticks([0,5,10,15])
    pl.tight_layout()
    pl.savefig(os.path.join(outputfolder,'aperture_' + str(starname)+'.pdf'))
    #pl.close()
    #pl.show()
  return aperture
예제 #47
0
 def __init__(self,L,p):
     self.r = rand(L,L)
     self.z = self.r<p
     self.lw, self.num = measurements.label(self.z)
     self.labelList = arange(self.num + 1)
     self.area = measurements.sum(self.z, self.lw, index=self.labelList)
     self.maxArea = self.area.max()
     self.maxLabels = self.labelList[where(self.area == self.maxArea)]
     lw = self.lw
     firstRow = lw[0,:]
     lastRow = lw[-1,:]
     firstColumn = lw[:,0]
     lastColumn = lw[:,-1]
     topBottomSpanningClusters = intersect1d(firstRow, lastRow)
     leftRightSpanningClusters = intersect1d(firstColumn, lastColumn)
     self.spanningClusters = union1d(topBottomSpanningClusters, leftRightSpanningClusters)
     self.spanningClusters = delete(self.spanningClusters, where(self.spanningClusters == 0))
     self.isPercolating = (len(self.spanningClusters) > 0)
예제 #48
0
def getcardlocations(im):
    '''Return the bbox coordinates for each card in the image.'''

    (width, height) = im.size

    # convert to grayscale (0 - 255)
    gim = im.convert('1')

    # pixels that aren't white are black
    pixels = np.reshape(np.array(gim.getdata()), (height, width))
    pixels = pixels > 240

    # get (two) connected components
    labarr, labcount = ndimage.label(pixels)
    sizes = ndimage.sum(pixels, labarr, range(labcount + 1))

    # ignore small regions - get just the two cards
    totalpixels = height/2 * width
    
    #remove based on size of image
    mask_size = sizes < 0.01 * totalpixels
    remove_pixel = mask_size[labarr]
    labarr[remove_pixel] = 0

    # relabel
    labarr, labcount = ndimage.label(labarr)

    # get locations of all cards
    cardlocs = []
    for label in range(labcount):
        locs = np.where(labarr == (label+1))
        # get bounding box
        vmin = min(locs[0])
        vmax = max(locs[0])
        hmin = min(locs[1])
        hmax = max(locs[1])
        cardlocs.append((vmin, hmin, vmax, hmax))

    # display labeled image
    #plt.imshow(labarr)
    #plt.show()

    return cardlocs
예제 #49
0
def clusterDensity(nSamples, Lx, Ly, p):
    PValues  = 0
    PiValues = 0
    latticeArea = Lx*Ly
    for sample in range(nSamples):
            r = rand(Lx,Ly)
            lattice = r < p
            labelMatrix, nClusters = measurements.label(lattice)
            
            if(Lx==1):
                verticalPercolation = set()
            else:
                verticalPercolation   = set(labelMatrix[0,:]).intersection(labelMatrix[-1,:])
                
            if(Ly==1):
                horisontalPercolation = set()
            else:
                horisontalPercolation = set(labelMatrix[:,0]).intersection(labelMatrix[:,-1])
             
            if(Lx==1 and Ly==1):
                horisontalPercolation = set(labelMatrix[:,0]).intersection(labelMatrix[:,-1])
                verticalPercolation   = set(labelMatrix[0,:]).intersection(labelMatrix[-1,:])
                
                
            spanningClusterLabelList = array(list(horisontalPercolation|verticalPercolation))
            spanningClusterLabelList = delete(spanningClusterLabelList, where(spanningClusterLabelList == 0.0))
            
            if(len(spanningClusterLabelList) > 0.0):
                PiValues += 1.0
                areaList  = measurements.sum(lattice, labelMatrix, index = spanningClusterLabelList)               
                
                for area in areaList:                
                    PValues += area / latticeArea         
        
        
    PiValues /= nSamples
    PValues  /= nSamples
    
    return(PiValues, PValues) 
예제 #50
0
파일: calc.py 프로젝트: islenv/openradar
def declutter_by_area(array, area):
    """
    Remove clusters with area less or equal to area.
    """

    # Create array
    if isinstance(array, np.ma.MaskedArray):
        nonzero = np.greater(array.filled(fill_value=0), 0.1)
    else:
        nonzero = np.greater(array, 0.1)

    logging.debug("Starting size declutter.")
    labels, count1 = measurements.label(nonzero)
    logging.debug("Found {} clusters.".format(count1))
    areas = measurements.sum(nonzero, labels, labels)
    index = np.less_equal(areas, area)
    nonzero[index] = False
    labels, count2 = measurements.label(nonzero)
    logging.debug("Removed {} clusters with area <= {}.".format(count1 - count2, area))

    if isinstance(array, np.ma.MaskedArray):
        array.data[index] = 0
    else:
        array[index] = 0
#        title("Matrix")
        
        # Show image of labeled clusters (shuffled)
        lw, num = measurements.label(z)
#        subplot(1,3,2)
#        b = arange(lw.max() + 1) # create an array of values from 0 to lw.max() + 1
#        shuffle(b) # shuffle this array
#        shuffledLw = b[lw] # replace all values with values from b
#        imshow(shuffledLw, origin='lower', interpolation='nearest') # show image clusters as labeled by a shuffled lw
#        colorbar()
#        title("Labeled clusters")
        
        # Calculate areas
#        subplot(1,3,3)
        labelList = arange(lw.max() + 1)
        area = measurements.sum(z, lw, index=labelList)
#        areaImg = area[lw]
#        im3 = imshow(areaImg, origin='lower', interpolation='nearest')
#        colorbar()
#        title("Clusters by area")
        
        # Bounding boxes
        maxArea = area.max()
        maxLabels = labelList[where(area == maxArea)]
#        print "Found " + str(len(maxLabels)) + " clusters of size " + str(area.max())
        if area.max() <= 0:
            continue
        
        for label in maxLabels:
            sliced = measurements.find_objects(lw == label)
            if(len(sliced) > 0):
예제 #52
0
    def _call(self, ds):
        if len(ds) > 1:
            # average all samples into one, assuming we got something like one
            # sample per subject as input
            avgr = mean_sample()
            ds = avgr(ds)
        # threshold input; at this point we only have one sample left
        thrd = ds.samples[0] > self._thrmap
        # mapper default
        mapper = IdentityMapper()
        # overwrite if possible
        if hasattr(ds, 'a') and 'mapper' in ds.a:
            mapper = ds.a.mapper
        # reverse-map input
        othrd = _verified_reverse1(mapper, thrd)
        # TODO: what is your purpose in life osamp? ;-)
        osamp = _verified_reverse1(mapper, ds.samples[0])
        # prep output dataset
        outds = ds.copy(deep=False)
        outds.fa['featurewise_thresh'] = self._thrmap
        # determine clusters
        labels, num = measurements.label(othrd,structure=np.ones([3,3,3]))
        area = measurements.sum(othrd,
                                labels,
                                index=np.arange(1, num + 1)).astype(int)
        com = measurements.center_of_mass(
            osamp, labels=labels, index=np.arange(1, num + 1))
        maxpos = measurements.maximum_position(
            osamp, labels=labels, index=np.arange(1, num + 1))
        # for the rest we need the labels flattened
        labels = mapper.forward1(labels)
        # relabel clusters starting with the biggest and increase index with
        # decreasing size
        ordered_labels = np.zeros(labels.shape, dtype=int)
        ordered_area = np.zeros(area.shape, dtype=int)
        ordered_com = np.zeros((num, len(osamp.shape)), dtype=float)
        ordered_maxpos = np.zeros((num, len(osamp.shape)), dtype=float)
        for i, idx in enumerate(np.argsort(area)):
            ordered_labels[labels == idx + 1] = num - i
            # kinda ugly, but we are looping anyway
            ordered_area[i] = area[idx]
            ordered_com[i] = com[idx]
            ordered_maxpos[i] = maxpos[idx]
        labels = ordered_labels
        area = ordered_area[::-1]
        com = ordered_com[::-1]
        maxpos = ordered_maxpos[::-1]
        del ordered_labels  # this one can be big
        # store cluster labels after forward-mapping
        outds.fa['clusters_featurewise_thresh'] = labels.copy()
        # location info
        outds.a['clusterlocations'] = \
            np.rec.fromarrays(
                [com, maxpos], names=('center_of_mass', 'max'))

        # update cluster size histogram with the actual result to get a
        # proper lower bound for p-values
        # this will make a copy, because the original matrix is int
        cluster_probs_raw = _transform_to_pvals(
            area, self._null_cluster_sizes.astype('float'))

        clusterstats = (
            [area, cluster_probs_raw],
            ['size', 'prob_raw']
        )
        # evaluate a bunch of stats for all clusters
        morestats = {}
        for cid in xrange(len(area)):
            # keep clusters on outer loop, because selection is more expensive
            clvals = ds.samples[0, labels == cid + 1]
            for id_, fx in (
                    ('mean', np.mean),
                    ('median', np.median),
                    ('min', np.min),
                    ('max', np.max),
                    ('std', np.std)):
                stats = morestats.get(id_, [])
                stats.append(fx(clvals))
                morestats[id_] = stats

        for k, v in morestats.items():
            clusterstats[0].append(v)
            clusterstats[1].append(k)

        if self.params.multicomp_correction is not None:
            # do a local import as only this tiny portion needs statsmodels
            import statsmodels.stats.multitest as smm
            rej, probs_corr = smm.multipletests(
                cluster_probs_raw,
                alpha=self.params.fwe_rate,
                method=self.params.multicomp_correction)[:2]
            # store corrected per-cluster probabilities
            clusterstats[0].append(probs_corr)
            clusterstats[1].append('prob_corrected')
            # remove cluster labels that did not pass the FWE threshold
            for i, r in enumerate(rej):
                if not r:
                    labels[labels == i + 1] = 0
            outds.fa['clusters_fwe_thresh'] = labels
        outds.a['clusterstats'] = \
            np.rec.fromarrays(clusterstats[0], names=clusterstats[1])
        return outds
예제 #53
0
파일: SEEM.py 프로젝트: JulKoch/SEEM
def connectivity(obs,model):   
    # The s array defines the directions of possible connections.     
    s = [[1,1,1],[1,1,1],[1,1,1]] # 8 way connection
    #s = [[0,1,0],[1,1,1],[[0,1,0]] # 4 way connection      
    array_size=obs.shape    
    # The connectivity is computed at each percentile. 
    # First, compute percentiles of each map. Reshape to 1D     
    obs_re=np.reshape(obs,[array_size[0]*array_size[1]])
    model_re=np.reshape(model,[array_size[0]*array_size[1]]) 
    # Delete NANs    
    obs_re=obs_re[~np.isnan(obs_re)]  
    model_re=model_re[~np.isnan(model_re)]
    # Compute percentiles
    per_obs=np.percentile(obs_re,np.linspace(1,100,num=100))
    per_model=np.percentile(model_re,np.linspace(1,100,num=100))
    # The connectivity is calculated for the low and the high phase.
    # The high phase considers values exceeding the threshold percentile.
    # The high phase considers values exceeding the thresholds percentile.     
    #  Initialize: low phase
    # connectivity for each threshold percentile       
    con_obs_low=np.empty([100]) 
    con_model_low=np.empty([100])
    # cluster maps for each threshold percentile
    cl_obs_low=np.empty([array_size[0],array_size[1],100])
    cl_model_low=np.empty([array_size[0],array_size[1],100])
    # Initialize: high phase
    # connectivity for each threshold percentile        
    con_obs_high=np.empty([100])
    con_model_high=np.empty([100])
    # cluster maps for each threshold percentile
    cl_obs_high=np.empty([array_size[0],array_size[1],100])
    cl_model_high=np.empty([array_size[0],array_size[1],100])
    
    # iterate through all percentiles    
    for j in range(0,100):    
        # truncate low phase at percentile j and transfer to binary        
        temp_obs_low=(obs<=per_obs[j]).astype(int) # smaller than threshold
        temp_model_low=(model<=per_model[j]).astype(int)
        # perform the cluster analysis on the binary map.
        # it returns a map of connected clusters where each cluster gets a unique ID
        cl_obs_low[:,:,j], num_obs = measurements.label(temp_obs_low,structure=s)
        cl_model_low[:,:,j], num_obs = measurements.label(temp_model_low,structure=s)
        # this returns the size (number of pixels) of each cluster
        area_obs_low = measurements.sum(temp_obs_low, cl_obs_low[:,:,j], index=np.arange(cl_obs_low[:,:,j].max() + 1))        
        area_model_low = measurements.sum(temp_model_low, cl_model_low[:,:,j], index=np.arange(cl_model_low[:,:,j].max() + 1))
        # the connectivity metric describes the proportion of pairs of cells that are connected among all possible pairs of connected cells        
        con_obs_low[j]=np.sum(np.square(area_obs_low))/np.square(np.sum(area_obs_low))      
        con_model_low[j]=np.sum(np.square(area_model_low))/np.square(np.sum(area_model_low))
        # as a perfromance metric the RMSE is computed for the connectivity of obs and model at each percentile
        out_low=np.sqrt(np.nanmean((con_model_low-con_obs_low)**2))        
        
        # truncate high phase at percentile j and transfer to binary         
        temp_obs_high=(obs>=per_obs[j]).astype(int) # greater than threshold
        temp_model_high=(model>=per_model[j]).astype(int)
        # perform the cluster analysis on the binary map.
        # it returns a map of connected clusters where each cluster gets a unique ID
        cl_obs_high[:,:,j], num_obs = measurements.label(temp_obs_high,structure=s)
        cl_model_high[:,:,j], num_obs = measurements.label(temp_model_high,structure=s)
        # this returns the size (number of pixels) of each cluster
        area_obs_high = measurements.sum(temp_obs_high, cl_obs_high[:,:,j], index=np.arange(cl_obs_high[:,:,j].max() + 1))        
        area_model_high = measurements.sum(temp_model_high, cl_model_high[:,:,j], index=np.arange(cl_model_high[:,:,j].max() + 1))
         # the connectivity metric describes the proportion of pairs of cells that are connected among all possible pairs of connected cells          
        con_obs_high[j]=np.sum(np.square(area_obs_high))/np.square(np.sum(area_obs_high))      
        con_model_high[j]=np.sum(np.square(area_model_high))/np.square(np.sum(area_model_high))
        # as a performance metric the RMSE is computed for the connectivity of obs and model at each percentile
        out_high=np.sqrt(np.nanmean((con_model_high-con_obs_high)**2))
    
    #output: 
    # connectivity at each percentile for obs-high phase
    # connectivity at each percentile for model-high phase
    # connectivity at each percentile for obs-low phase
    # connectivity at each percentile for model-low phase
    # cluster maps at each percentile for obs-high phase
    # cluster maps at each percentile for model-high phase
    # cluster maps at each percentile for obs-low phase
    # cluster maps at each percentile for model-low phase 
    # final connectivity metric for high phase
    # final connectivity metric for low phase
    return (con_obs_high,con_model_high,con_obs_low,con_model_low,cl_obs_high,cl_model_high,cl_obs_low,cl_model_low,out_high,out_low)    
예제 #54
0
# the mask image and num of objects
labeled_array, num_features = measurements.label(img_bin, structure=s)
print num_features 

# list of slice index of object's box
obj_list = measurements.find_objects(labeled_array)

ob_area_list = []
#for ob in obj_list:
#h = ob[0].stop-ob[0].start
#w = ob[1].stop-ob[1].start
#print ob, h, w
img_bin_words = np.zeros_like(img_bin)
for i in range(num_features):
    area = measurements.sum(img_bin,labeled_array,index=i+1)
    if area<20:
        continue
    print area
    ob_area_list.append(area)
    img_bin_words[labeled_array==(i+1)]=img_bin[labeled_array==(i+1)]
hist(ob_area_list)
area_mode = stats.mode(ob_area_list,axis=None)
print area_mode

#print img_bin,stats.mode(img_bin,axis=None)
#print img_bin,np.max(img_bin)

# do gaussian blur to the bin img
#img_bin = filters.gaussian_filter(img_bin,0.26935)
#print img_bin,stats.mode(img_bin,axis=None)
예제 #55
0
colorbar()
title("Matrix")

# Show image of labeled clusters (shuffled)
lw, num = measurements.label(z)
subplot(1,4,3)
b = arange(lw.max() + 1) # create an array of values from 0 to lw.max() + 1
shuffle(b) # shuffle this array
shuffledLw = b[lw] # replace all values with values from b
imshow(shuffledLw, origin='lower', interpolation='nearest') # show image clusters as labeled by a shuffled lw
colorbar()
title("Labeled clusters")

# Calculate areas
subplot(1,4,4)
area = measurements.sum(z, lw, index=arange(lw.max() + 1))
areaImg = area[lw]
im3 = imshow(areaImg, origin='lower', interpolation='nearest')
colorbar()
title("Clusters by area")

# Bounding box
sliced = measurements.find_objects(areaImg == areaImg.max())
if(len(sliced) > 0):
    sliceX = sliced[0][1]
    sliceY = sliced[0][0]
    plotxlim=im3.axes.get_xlim()
    plotylim=im3.axes.get_ylim()
    plot([sliceX.start, sliceX.start, sliceX.stop, sliceX.stop, sliceX.start], \
                      [sliceY.start, sliceY.stop, sliceY.stop, sliceY.start, sliceY.start], \
                      color="red")
예제 #56
0
파일: wds.py 프로젝트: flomertens/wise
 def get_total_intensity(self):
     labels = self.segmented_image.get_labels()
     img = self.segmented_image.get_img().data
     return measurements.sum(img, labels, self.segmentid)
예제 #57
0
    def _call(self, ds):
        if len(ds) > 1:
            # average all samples into one, assuming we got something like one
            # sample per subject as input
            avgr = mean_sample()
            ds = avgr(ds)
        # threshold input; at this point we only have one sample left
        thrd = ds.samples[0] > self._thrmap
        # mapper default
        mapper = IdentityMapper()
        # overwrite if possible
        if hasattr(ds, 'a') and 'mapper' in ds.a:
            mapper = ds.a.mapper
        # reverse-map input
        osamp = mapper.reverse1(thrd)
        # prep output dataset
        outds = ds.copy(deep=False)
        outds.fa['featurewise_thresh'] = self._thrmap
        # determine clusters
        labels, num = measurements.label(osamp)
        area = measurements.sum(osamp,
                                labels,
                                index=np.arange(1, num + 1)).astype(int)
        # for the rest we need the labels flattened
        labels = mapper.forward1(labels)
        # relabel clusters starting with the biggest and increase index with
        # decreasing size
        ordered_labels = np.zeros(labels.shape, dtype=int)
        ordered_area = np.zeros(area.shape, dtype=int)
        for i, idx in enumerate(np.argsort(area)):
            ordered_labels[labels == idx + 1] = num - i
            ordered_area[i] = area[idx]
        area = ordered_area[::-1]
        labels = ordered_labels
        del ordered_labels  # this one can be big
        # store cluster labels after forward-mapping
        outds.fa['clusters_featurewise_thresh'] = labels.copy()
        # update cluster size histogram with the actual result to get a
        # proper lower bound for p-values
        # this will make a copy, because the original matrix is int
        cluster_probs_raw = _transform_to_pvals(
            area, self._null_cluster_sizes.astype('float'))

        if self.params.multicomp_correction is None:
            probs_corr = np.array(cluster_probs_raw)
            rej = probs_corr <= self.params.fwe_rate
            outds.a['clusterstats'] = \
                np.rec.fromarrays(
                    [area, cluster_probs_raw], names=('size', 'prob_raw'))
        else:
            # do a local import as only this tiny portion needs statsmodels
            import statsmodels.stats.multitest as smm
            rej, probs_corr = smm.multipletests(
                cluster_probs_raw,
                alpha=self.params.fwe_rate,
                method=self.params.multicomp_correction)[:2]
            # store corrected per-cluster probabilities
            outds.a['clusterstats'] = \
                np.rec.fromarrays(
                    [area, cluster_probs_raw, probs_corr],
                    names=('size', 'prob_raw', 'prob_corrected'))
            # remove cluster labels that did not pass the FWE threshold
            for i, r in enumerate(rej):
                if not r:
                    labels[labels == i + 1] = 0
            outds.fa['clusters_fwe_thresh'] = labels
        return outds
예제 #58
0
     perc = []
     while (len(perc)==0):
         nCount = nCount + 1
         if (nCount >1000):
             print "Couldn't make percolation cluster..."
             break
         
         lattice = rand(Lx[LIndex],Ly[LIndex]) < pc
         labelMatrix, nClusters = measurements.label(lattice)
         perc_x = intersect1d(labelMatrix[0,:],labelMatrix[-1,:])
         perc = perc_x[where(perc_x > 0)]
         print nCount
     
     if len(perc) > 0:
         labelList = arange(nClusters + 1)
         areaList = measurements.sum(lattice, labelMatrix, index=labelList)
         areaLabelMatrix = areaList[labelMatrix]
         maxArea = areaList.max()
         spanningCluster = (labelMatrix == perc[0])
         
                
         # Run walk on this cluster
         l,r = PL.walk(spanningCluster)
         
         singlyConnectedSites = l*r 
         nSinglyConnctedSites[LIndex] += size(where(singlyConnectedSites > 0)[0])
         zadd = spanningCluster + singlyConnectedSites
         
 nSinglyConnctedSites[LIndex]=nSinglyConnctedSites[LIndex]/nSamples
     
 if makePlots:
예제 #59
0
def test_cluster_count():
    if externals.versions['scipy'] < '0.10':
        raise SkipTest
    # we get a ZERO cluster count of one if there are no clusters at all
    # this is needed to keept track of the number of bootstrap samples that yield
    # no cluster at all (high treshold) in order to compute p-values when there is no
    # actual cluster size histogram
    assert_equal(gct._get_map_cluster_sizes([0, 0, 0, 0]), [0])
    # if there is at least one cluster: no ZERO count
    assert_equal(gct._get_map_cluster_sizes([0, 0, 1, 0]), [1])
    for i in range(2):  # rerun tests for bool type of test_M
        test_M = np.array([[1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0],
                           [0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1],
                           [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1],
                           [0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1],
                           [1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0],
                           [0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0],
                           [1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0],
                           [0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0],
                           [1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0]])
        expected_result = [5, 4, 3, 3, 2, 0, 2]  # 5 clusters of size 1,
                                                 # 4 clusters of size 2 ...

        test_ds = Dataset([test_M])
        if i == 1:
            test_M = test_M.astype(bool)

        test_M_3d = np.hstack((test_M.flatten(),
                               test_M.flatten())).reshape(2, 9, 16)
        test_ds_3d = Dataset([test_M_3d])
        # expected_result^2
        expected_result_3d = np.array([0, 5, 0, 4, 0, 3, 0,
                                       3, 0, 2, 0, 0, 0, 2])

        size = 10000  # how many times bigger than test_M_3d
        test_M_3d_big = np.hstack((test_M_3d.flatten(), np.zeros(144)))
        test_M_3d_big = np.hstack((test_M_3d_big for i in range(size))
                                  ).reshape(3 * size, 9, 16)
        test_ds_3d_big = Dataset([test_M_3d_big])
        expected_result_3d_big = expected_result_3d * size

        # check basic cluster size determination for plain arrays and datasets
        # with a single sample
        for t, e in ((test_M, expected_result),
                     (test_ds, expected_result),
                     (test_M_3d, expected_result_3d),
                     (test_ds_3d, expected_result_3d),
                     (test_M_3d_big, expected_result_3d_big),
                     (test_ds_3d_big, expected_result_3d_big)):
            assert_array_equal(np.bincount(gct._get_map_cluster_sizes(t))[1:],
                               e)
        # old
        M = np.vstack([test_M_3d.flatten()] * 10)
        # new
        ds = dataset_wizard([test_M_3d] * 10)
        assert_array_equal(M, ds)
        expected_result = Counter(np.hstack([gct._get_map_cluster_sizes(test_M_3d)] * 10))
        assert_array_equal(expected_result,
                           gct.get_cluster_sizes(ds))

        # test the same with some arbitrary per-feature threshold
        thr = 4
        labels, num = measurements.label(test_M_3d)
        area = measurements.sum(test_M_3d, labels,
                                index=np.arange(labels.max() + 1))
        cluster_sizes_map = area[labels]  # .astype(int)
        thresholded_cluster_sizes_map = cluster_sizes_map > thr
        # old
        M = np.vstack([cluster_sizes_map.flatten()] * 10)
        # new
        ds = dataset_wizard([cluster_sizes_map] * 10)
        assert_array_equal(M, ds)
        expected_result = Counter(np.hstack(
            [gct._get_map_cluster_sizes(thresholded_cluster_sizes_map)] * 10))
        th_map = np.ones(cluster_sizes_map.flatten().shape) * thr
        # threshold dataset by hand
        ds.samples = ds.samples > th_map
        assert_array_equal(expected_result,
                           gct.get_cluster_sizes(ds))