Ejemplo n.º 1
0
 def shapeindex_preprocess(self,im):
     ''' apply shap index map at three scales'''
     sh = np.zeros((im.shape[0],im.shape[1],3))
     if np.max(im) ==0:
         return sh
     
     # pad to minimize edge artifacts                    
     sh[:,:,0] = shape_index(im,1, mode='reflect')
     sh[:,:,1] = shape_index(im,1.5, mode='reflect')
     sh[:,:,2] = shape_index(im,2, mode='reflect')
     #sh = 0.5*(sh+1.0)
     return sh
Ejemplo n.º 2
0
def run(params):
    image_location = params['inputImagePath']
    result_location = params['resultPath']
    sigma = float(params['sigma'])
    tCount = int(params['TCount'])
    zCount = int(params['ZCount'])
    if not os.path.exists(image_location):
        print(f"Error: {image_location} does not exist")
        return

    image_data = imread(image_location)
    dims = image_data.shape
    shape_image = np.empty(image_data.shape, dtype=np.float32)

    # 3D+T
    if tCount > 1 and zCount > 1:
        print(f"Applying to 3D+T case with dims: {image_data.shape}")
        for t in range(0, dims[0]):
            for z in range(0, dims[1]):
                shape_image[t, z, :, :] = shape_index(image_data[t, z, :, :],
                                                      sigma=sigma,
                                                      mode='reflect').astype(
                                                          np.float32)
        axes = 'YXZT'
    # 2D+T or 3D
    elif (tCount > 1 and zCount == 1) or (tCount == 1 and zCount > 1):
        print(f"Applying to 2D+T or 3D case with dims: {image_data.shape}")
        for d in range(0, dims[0]):
            shape_image[d, :, :] = shape_index(image_data[d, :, :],
                                               sigma=sigma,
                                               mode='reflect').astype(
                                                   np.float32)
        if tCount > 1:
            axes = 'YXT'
        else:
            axes = 'YXZ'
    # 2D
    else:
        print(f"Applying to 2D case with dims: {image_data.shape}")
        shape_image = shape_index(image_data, sigma=sigma, mode='reflect')
        axes = 'YX'

    # NaNs are usually returned - convert these to possible pixel values
    shape_image = np.nan_to_num(shape_image)

    if image_data.dtype == np.uint16:
        shape_image = img_as_uint(shape_image)
    else:
        shape_image = img_as_ubyte(shape_image)

    imsave(result_location, shape_image, metadata={'axes': axes})
Ejemplo n.º 3
0
def get_shape_index_features(data, size=10):
    # http://scikit-image.org/docs/dev/auto_examples/features_detection/plot_shape_index.html#sphx-glr-auto-examples-features-detection-plot-shape-index-py
    # -- filterbank1 on original image
    result = []
    for i in range(len(data)):
        img = data[i]
        image_gray = color.rgb2gray(img)
        s = shape_index(image_gray)

        # In this example we want to detect 'spherical caps',
        # so we threshold the shape index map to
        # find points which are 'spherical caps' (~1)

        target = 1
        delta = 0.05

        point_y, point_x = np.where(np.abs(s - target) < delta)
        point_z = image_gray[point_y, point_x]

        # The shape index map relentlessly produces the shape, even that of noise.
        # In order to reduce the impact of noise, we apply a Gaussian filter to it,
        # and show the results once in

        # s_smooth = ndi.gaussian_filter(s, sigma=0.5)

        # point_y_s, point_x_s = np.where(np.abs(s_smooth - target) < delta)
        # point_z_s = image_gray[point_y_s, point_x_s]

        # plt.imshow(hog_image)
        # plt.show()
        point_z.sort()
        result.append(point_z[-size:])
    result = normalize_features(result, v_max=1.0, v_min=0.0)
    return result
Ejemplo n.º 4
0
def get_hand_crafted(one_image):
    """ Extracts various features out of the given image
    :param array one_image: the image from which features are to be extracted
    :return: the features associated with this image
    :rtype: Numpy array of size (38, 1)
    """
    #Select wavelet decomposition level so as to have the
    #same number of approximation coefficients
    if (one_image.shape[0] == 1000):
        wavedec_level = 9
    elif (one_image.shape[0] == 64):
        wavedec_level = 5

    hist = histogram(one_image, nbins=20, normalize=True)
    features = hist[0]
    blob_lo = blob_log(one_image,
                       max_sigma=2.5,
                       min_sigma=1.5,
                       num_sigma=5,
                       threshold=0.05)
    shape_ind = shape_index(one_image)
    shape_hist = np.histogram(shape_ind, range=(-1, 1), bins=9)
    shan_ent = shannon_entropy(one_image)
    max_val = one_image.max()
    min_val = one_image.min()
    variance_val = np.var(one_image)
    wavelet_approx = pywt.wavedec2(one_image, 'haar',
                                   level=wavedec_level)[0].flatten()
    features = np.concatenate([
        features, [blob_lo.shape[0]], shape_hist[0], [shan_ent], [max_val],
        [min_val], [variance_val], wavelet_approx
    ])
    return features
def calc_shape_index_scores(dose_dataset):
    shape_key = [
        -1, -7 / 8, -5 / 8, -3 / 8, -1 / 8, 1 / 8, 3 / 8, 5 / 8, 7 / 8, 1
    ]

    scores_list = []
    arr_size = len(dose_dataset.pixel_array)

    for j in range(0, len(shape_key) - 1):
        tick = 0
        delta = 0.05

        for i in range(0, arr_size):
            im = dose_dataset.pixel_array[i]
            sh_im = shape_index(im)
            s_smooth = ndi.gaussian_filter(sh_im, sigma=0.875)

            point_y_s, point_x_s = np.where((sh_im < shape_key[j + 1])
                                            & (sh_im > shape_key[j]))
            point_z_s = im[point_y_s, point_x_s]

            tick += len(point_z_s)

        scores_list.append(np.around(tick / arr_size, 3))
    return scores_list
Ejemplo n.º 6
0
def process_single_image(filename, image_format, scale_metadata_path,
                         threshold_radius, smooth_radius, brightness_offset,
                         crop_radius, smooth_method):
    image = imageio.imread(filename, format=image_format)
    scale = _get_scale(image, scale_metadata_path)
    if crop_radius > 0:
        c = crop_radius
        image = image[c:-c, c:-c]
    pixel_threshold_radius = int(np.ceil(threshold_radius / scale))

    pixel_smoothing_radius = smooth_radius * pixel_threshold_radius
    thresholded = pre.threshold(image,
                                sigma=pixel_smoothing_radius,
                                radius=pixel_threshold_radius,
                                offset=brightness_offset,
                                smooth_method=smooth_method)
    quality = shape_index(image, sigma=pixel_smoothing_radius, mode='reflect')
    skeleton = morphology.skeletonize(thresholded) * quality
    framedata = csr.summarise(skeleton, spacing=scale)
    framedata['squiggle'] = np.log2(framedata['branch-distance'] /
                                    framedata['euclidean-distance'])
    framedata['scale'] = scale
    framedata.rename(columns={'mean pixel value': 'mean shape index'},
                     inplace=True)
    framedata['filename'] = filename
    return image, thresholded, skeleton, framedata
Ejemplo n.º 7
0
def experimental_thresholding(image, mask=None, window_size=15,
                              gaussian_sigma=3.0, shift=0.2, target=-0.5, quotient=1.2,
                              return_threshold=False, **kwargs):
    """
    A novel thresholding method basing upon the shape index as defined by [Koenderink1992]_, and [Bataineh2011]_
    automatic adaptive thresholding. The method is due to be explained in detail in the future.
    
    .. [Koenderink1992] Koenderink and van Doorn (1992) Image Vision Comput.
       DOI: `10.1016/0262-8856(92)90076-F <https://dx.doi.org/10.1016/0262-8856(92)90076-F>`_
    .. [Bataineh2011] Bataineh et al. (2011) Pattern Recognit. Lett.
       DOI: `10.1016/j.patrec.2011.08.001 <https://dx.doi.org/10.1016/j.patrec.2011.08.001>`_
      
    
    :param image: Input image
    :param mask: Possible mask denoting a ROI
    :param window_size: Window size
    :param gaussian_sigma: Sigma of the Gaussian used for smoothing
    :param shift: Shift parameter
    :param target: Target shape index parameter
    :param quotient: Quotient parameter
    :param return_threshold: Whether to return a binarization, or the actual threshold values
    :param kwargs: For compatibility
    :return: 
    """
    # novel method based upon shape index and Bataineh thresholding

    means, stddev = mean_and_std(image, window_size)

    with np.errstate(invalid='ignore'):
        sim = shape_index(image, gaussian_sigma)

    if mask is not None:
        adaptive_stddev = (stddev - stddev[mask].min()) / (stddev[mask].max() - stddev[mask].min())
        image_mean = image[mask].mean()
    else:
        adaptive_stddev = (stddev - stddev.min()) / (stddev.max() - stddev.min())
        image_mean = image.mean()

    if numexpr:
        threshold = numexpr.evaluate(
            "(exp((-(sim - target)**2)/quotient) + shift)*"
            "means*"
            "((image_mean + stddev) * (stddev + adaptive_stddev))/(means**2 - stddev)"
        )
    else:
        threshold = (
            (np.exp((-(sim - target)**2)/quotient) + shift) *
            means *
            ((image_mean + stddev) * (stddev + adaptive_stddev))/(means**2 - stddev)
        )

    if return_threshold:
        return threshold
    else:
        with warnings.catch_warnings():
            warnings.simplefilter('ignore')
            return image < threshold
Ejemplo n.º 8
0
def test_shape_index():
    square = np.zeros((5, 5))
    square[2, 2] = 4
    s = shape_index(square, sigma=0.1)
    assert_almost_equal(
        s, np.array([[ np.nan, np.nan,   -0.5, np.nan, np.nan],
                     [ np.nan,      0, np.nan,      0, np.nan],
                     [   -0.5, np.nan,     -1, np.nan,   -0.5],
                     [ np.nan,      0, np.nan,      0, np.nan],
                     [ np.nan, np.nan,   -0.5, np.nan, np.nan]])
    )
Ejemplo n.º 9
0
def test_shape_index():
    square = np.zeros((5, 5))
    square[2, 2] = 4
    s = shape_index(square, sigma=0.1)
    assert_almost_equal(
        s, np.array([[ np.nan, np.nan,   -0.5, np.nan, np.nan],
                     [ np.nan,      0, np.nan,      0, np.nan],
                     [   -0.5, np.nan,     -1, np.nan,   -0.5],
                     [ np.nan,      0, np.nan,      0, np.nan],
                     [ np.nan, np.nan,   -0.5, np.nan, np.nan]])
    )
Ejemplo n.º 10
0
def test_shape_index():
    square = np.zeros((5, 5))
    square[2, 2] = 4
    with expected_warnings(['divide by zero', 'invalid value']):
        s = shape_index(square, sigma=0.1)
    assert_almost_equal(
        s,
        np.array([[np.nan, np.nan, -0.5, np.nan, np.nan],
                  [np.nan, 0, np.nan, 0, np.nan],
                  [-0.5, np.nan, -1, np.nan, -0.5],
                  [np.nan, 0, np.nan, 0, np.nan],
                  [np.nan, np.nan, -0.5, np.nan, np.nan]]))
Ejemplo n.º 11
0
def test_shape_index():
    square = np.zeros((5, 5))
    square[2, 2] = 4
    with expected_warnings(['divide by zero', 'invalid value']):
        s = shape_index(square, sigma=0.1)
    assert_almost_equal(
        s, np.array([[ np.nan, np.nan,   -0.5, np.nan, np.nan],
                     [ np.nan,      0, np.nan,      0, np.nan],
                     [   -0.5, np.nan,     -1, np.nan,   -0.5],
                     [ np.nan,      0, np.nan,      0, np.nan],
                     [ np.nan, np.nan,   -0.5, np.nan, np.nan]])
    )
Ejemplo n.º 12
0
def postprocess_ws(im, yp):
    '''Watershed based postprocessing using image and its pixel probabilities'''
    # mask dilated
    mask = (yp[:, :, 0] > 0.4)
    # watershed potential
    d = shape_index(im, 1.5, mode='reflect')

    # markers
    # get poles from contour predictions as markers
    sh = shape_index(yp[:, :, 1], 1, mode='reflect')
    markers, c = label(yp[:, :, 0] > 0.95)
    # ther markers should be unique to each cell
    markers = markers * (sh < -0.5)  # only poles

    ws = watershed(d,
                   markers=markers,
                   watershed_line=True,
                   mask=mask,
                   compactness=1,
                   connectivity=1)
    return ws
Ejemplo n.º 13
0
def add_noise(im, sensitivity=0.1, invert=False, seed=42):
    '''preprocessing by adding random noise to image only where the shape index map is around -0.4 for phase contrast images'''
    t0, s = -0.4, 0.025
    sim = -shape_index(im, 1)
    if invert:
        sim = -sim
    ed = np.logical_and(sim > t0 - s, sim > t0 + s) * 1.0
    #ed = erosion(ed)
    np.random.seed(seed)
    noise = np.random.rand(ed.shape[0], ed.shape[1])
    noise = noise * (ed + 0.5 * np.random.rand(ed.shape[0], ed.shape[1]))
    img = normalize2max(im) + sensitivity * noise
    return img
Ejemplo n.º 14
0
 def preprocess(self,im):
     n = len(self.scalesvals)
     sh = np.zeros((im.shape[0],im.shape[1],n))
     
     if np.max(im) ==0:
         return sh
     pw = 15
     im = pad(im,pw,'reflect')
     sh = np.zeros((im.shape[0],im.shape[1],n))    
     for i in range(n):
         sh[:,:,i] = shape_index(im,self.scalesvals[i])
  
     return sh[pw:-pw,pw:-pw,:]
Ejemplo n.º 15
0
def test_shape_index():
    # software floating point arm doesn't raise a warning on divide by zero
    # https://github.com/scikit-image/scikit-image/issues/3335
    square = np.zeros((5, 5))
    square[2, 2] = 4
    with expected_warnings([r'divide by zero|\A\Z', r'invalid value|\A\Z']):
        s = shape_index(square, sigma=0.1)
    assert_almost_equal(
        s, np.array([[ np.nan, np.nan,   -0.5, np.nan, np.nan],
                     [ np.nan,      0, np.nan,      0, np.nan],
                     [   -0.5, np.nan,     -1, np.nan,   -0.5],
                     [ np.nan,      0, np.nan,      0, np.nan],
                     [ np.nan, np.nan,   -0.5, np.nan, np.nan]])
    )
Ejemplo n.º 16
0
def test_shape_index():
    # software floating point arm doesn't raise a warning on divide by zero
    # https://github.com/scikit-image/scikit-image/issues/3335
    square = np.zeros((5, 5))
    square[2, 2] = 4
    with expected_warnings([r'divide by zero|\A\Z', r'invalid value|\A\Z']):
        s = shape_index(square, sigma=0.1)
    assert_almost_equal(
        s,
        np.array([[np.nan, np.nan, -0.5, np.nan, np.nan],
                  [np.nan, 0, np.nan, 0, np.nan],
                  [-0.5, np.nan, -1, np.nan, -0.5],
                  [np.nan, 0, np.nan, 0, np.nan],
                  [np.nan, np.nan, -0.5, np.nan, np.nan]]))
Ejemplo n.º 17
0
def calc_shape_diff_scores(dose_dataset):
   shape_key = [-1,-7/8,-5/8,-3/8,-1/8,1/8,3/8,5/8,7/8,1]
   arr_list = []
   scores_list = [] 
   arr_size = len(dose_dataset.pixel_array)

   for i in range (0,arr_size):
       im = dose_dataset.pixel_array[i]
       sh_im = shape_index(im)
       arr_list.append(sh_im)
    
   score = np.abs(np.diff( np.nan_to_num(arr_list), axis=0).sum())/arr_size
    
        #scores_list.append(np.around(tick/arr_size,3))
   return score
Ejemplo n.º 18
0
def main_loop(pps_list, mirror, params_dict):

    virago_dir = '{}/v3-analysis'.format(os.getcwd())
    vcount_dir = '{}/vcounts'.format(virago_dir)
    img_dir = '{}/processed_images'.format(virago_dir)
    histo_dir = '{}/histograms'.format(virago_dir)
    overlay_dir = '{}/overlays'.format(virago_dir)
    filo_dir = '{}/filo'.format(virago_dir)
    fluor_dir = '{}/fluor'.format(virago_dir)

    if not os.path.exists(virago_dir):
        os.makedirs(virago_dir)
        if not os.path.exists(img_dir): os.makedirs(img_dir)
        if not os.path.exists(histo_dir): os.makedirs(histo_dir)
        if not os.path.exists(fluor_dir): os.makedirs(fluor_dir)
        if not os.path.exists(filo_dir): os.makedirs(filo_dir)
        if not os.path.exists(overlay_dir): os.makedirs(overlay_dir)
        if not os.path.exists(vcount_dir): os.makedirs(vcount_dir)

    cam_micron_per_pix = params_dict['cam_micron_per_pix']
    mag = params_dict['mag']
    pix_per_um = mag / cam_micron_per_pix
    spacing = 1 / pix_per_um
    conv_factor = (cam_micron_per_pix / mag)**2

    exo_toggle = params_dict['exo_toggle']
    cv_cutoff = params_dict['cv_cutoff']

    perc_range = params_dict['perc_range']

    # IRISmarker = imread('/usr3/bustaff/ajdevaux/virago/images/IRISmarker_new.tif')
    IRISmarker = params_dict['IRISmarker']
    # IRISmarker_exo = skio.imread('images/IRISmarker_v4_topstack.tif')

    # pps_list, mirror = vpipes.mirror_finder(pps_list)

    passes_per_spot = len(pps_list)
    spot_ID = pps_list[0][:-8]
    scans_counted = [int(file.split(".")[2]) for file in pps_list]
    first_scan = min(scans_counted)

    circle_dict, marker_dict, overlay_dict, shift_dict = {}, {}, {}, {}

    vdata_dict = vpipes.get_vdata_dict(exo_toggle, version="3.x")

    # missing_data = set(range(1,pass_counter+1)).difference(scans_counted)
    #
    # if missing_data != set():
    #     print("Missing image files... fixing...\n")
    #     for scan in missing_data:
    #         bad_scan = '{0}.{1}'.format(*(spot_ID, str(scan).zfill(3)))
    #         vdata_dict.update({'img_name':bad_scan})
    #
    #         with open('{}/{}.vdata.txt'.format(vcount_dir,bad_scan),'w') as f:
    #             for k,v in vdata_dict.items():
    #                 f.write('{}: {}\n'.format(k,v))
    #         print("Writing blank data files for {}".format(bad_scan))

    total_shape_df = pd.DataFrame()

    for scan in range(
            0, passes_per_spot):  ##Main Loop for image processing begins here.
        img_stack = tuple(file for file in pps_list
                          if file.startswith(pps_list[scan]))
        fluor_files = [
            file for file in img_stack if file.split(".")[-2] in 'ABC'
        ]
        if fluor_files:
            img_stack = tuple(file for file in img_stack
                              if file not in fluor_files)
            print(
                "\nFluorescent channel(s) detected: {}\n".format(fluor_files))

        topstack_img = img_stack[0]
        name_split = topstack_img.split('.')
        img_name = '.'.join(name_split[:-1])
        spot_num, pass_num = map(int, name_split[1:3])

        if name_split[-1] == 'tif':
            tiff_toggle = True
        else:
            tiff_toggle = False

        pic3D = vpipes.load_image(img_stack, tiff_toggle)

        print("{} Loaded\n".format(img_name))

        validity = True

        #     if convert_tiff == True:
        #         vpipes.pgm_to_tiff(pic3D, img_name, img_stack,
        #                            tiff_compression=1, archive_pgm=True)
        #
        zslice_count, nrows, ncols = pic3D.shape
        total_pixels = nrows * ncols

        if mirror.size == total_pixels:
            pic3D = pic3D / mirror
            print("Applying mirror to image stack...\n")

        pic3D_norm = np.uint8(normalize(pic3D, None, 0, 255, NORM_MINMAX))

        pic3D_clahe = vimage.cv2_clahe_3D(pic3D_norm,
                                          kernel_size=(1, 1),
                                          cliplim=4)

        pic3D_rescale = vimage.rescale_3D(pic3D_clahe, perc_range=perc_range)

        print("Contrast adjusted\n")
        #Many operations are on the Z-stack compressed image.
        #Several methods to choose, but Standard Deviation works well.
        # maxmin_proj_rescale = np.max(pic3D_rescale, axis = 0) - np.min(pic3D_rescale, axis = 0)
        sd_proj_rescale = np.std(pic3D_rescale, axis=0)
        #Convert to 8-bit for OpenCV comptibility
        sd_proj_rescale = np.uint8(
            normalize(sd_proj_rescale, None, 0, 255, NORM_MINMAX))

        if pass_num == 1:
            marker = IRISmarker
        else:
            marker = found_markers

        if img_name not in marker_dict:
            marker_locs = vimage.marker_finder(pic3D_rescale[0],
                                               marker=marker,
                                               thresh=0.6)
            marker_dict[img_name] = marker_locs
        else:
            marker_locs = marker_dict[img_name]

        pos_plane_list = vquant.measure_focal_plane(
            pic3D_norm, marker_locs, exo_toggle, marker_shape=IRISmarker.shape)

        if pos_plane_list != []:
            pos_plane = max(pos_plane_list)
        else:
            pos_plane = zslice_count // 3

        pic_rescale_pos = pic3D_rescale[pos_plane]

        overlay_dict['.'.join(img_name.split('.')[1:])] = sd_proj_rescale

        print("Using image {} from stack\n".format(
            str(pos_plane + 1).zfill(3)))

        # if pass_counter <= 15:
        #     overlay_mode = 'series'
        # else:
        overlay_mode = 'baseline'

        if pass_num == first_scan:
            print("First Valid Scan\n")
            valid_shift = (0, 0)
            overlay_toggle = False

        else:
            prescan_img, postscan_img = vimage._dict_matcher(overlay_dict,
                                                             spot_num,
                                                             pass_num,
                                                             mode=overlay_mode)
            overlay_toggle = True
            # if img_name in shift_dict:
            #     valid_shift = shift_dict[img_name]
            # else:
            ORB_shift = vimage.measure_shift_ORB(prescan_img,
                                                 postscan_img,
                                                 ham_thresh=10,
                                                 show=False)
            # for coord in ORB_shift:
            #     if abs(coord) < 75:
            #
            valid_shift = ORB_shift
            #     else: ##In case ORB fails to give a good value
            #         # overlay_toggle = False
            #         print("Using alternative shift measurement...\n")
            #         mean_shift, overlay_toggle = vimage.measure_shift(marker_dict,pass_num,
            #                                                             spot_num,mode=overlay_mode
            #         )
            #         valid_shift = mean_shift

            print("Valid Shift: {}\n".format(valid_shift))

            img_overlay = vimage.overlayer(prescan_img, postscan_img,
                                           valid_shift)
            shape_mask = vimage.shape_mask_shift(shape_mask, valid_shift)
            img_overlay_difference = np.int16(img_overlay[:, :, 1]) - np.int16(
                img_overlay[:, :, 0])
            median_overlay = np.median(img_overlay_difference)
            sd_overlay = np.std(img_overlay_difference)
            print(median_overlay, sd_overlay)

            # overlay_name = "{}_overlay_{}".format(img_name, overlay_mode)
            # vimage.gen_img_deets(img_overlay_difference, name=overlay_name, savedir=overlay_dir)

        if (overlay_toggle == False) & (pass_num != first_scan):
            validity = False
            print("No compatible markers, cannot compute shift")
        #
        # else:
        #     print("Cannot overlay images\n")
        if spot_num in circle_dict:  #Get the location of the Antibody spot
            spot_coords = circle_dict[spot_num]
            shift_x = spot_coords[0] + valid_shift[1]
            shift_y = spot_coords[1] + valid_shift[0]
            spot_coords = (shift_x, shift_y, spot_coords[2])

        else:  #Find the Antibody spot if it has not already been determined
            circles = None
            cannyMax = 200
            cannyMin = 100
            while type(circles) == type(None):
                circles = HoughCircles(sd_proj_rescale,
                                       HOUGH_GRADIENT,
                                       1,
                                       minDist=500,
                                       param1=cannyMax,
                                       param2=cannyMin,
                                       minRadius=300,
                                       maxRadius=600)
                cannyMax -= 50
                cannyMin -= 25
            spot_coords = tuple(map(lambda x: round(x, 0), circles[0][0]))
            print("Spot center coordinates (row, column, radius): {}\n".format(
                spot_coords))

        circle_dict[spot_num] = spot_coords

        row, col = np.ogrid[:nrows, :ncols]
        width = col - spot_coords[0]
        height = row - spot_coords[1]
        rad = spot_coords[2] - 25
        disk_mask = (width**2 + height**2 > rad**2)

        marker_mask, found_markers = vimage.marker_masker(
            pic3D_rescale[0], marker_locs, marker)

        full_mask = disk_mask + marker_mask

        # vimage.image_details(pic3D_norm[pos_plane], pic3D_clahe[pos_plane], pic3D_rescale[pos_plane].copy(), pic_canny)

        # maxmin_proj_rescale_masked = np.ma.array(maxmin_proj_rescale, mask=full_mask).filled(fill_value=np.nan)

        # maxmin_median = np.nanmedian(maxmin_proj_rescale_masked)

        #*********************************************************************************************#
        # if pass_num > first_scan:

        # if overlay_toggle == True:
        # img_overlay = np.ma.array(img_overlay, mask=full_mask).filled(fill_value=np.nan)

        # if Ab_spot_mode == False:
        #     pic_to_show = sd_proj_rescale
        # elif exo_toggle == True:
        #     pic_to_show = sd_proj_rescale
        # else:

        if pass_num == 1:
            pic_to_show = sd_proj_rescale
        else:
            pic_to_show = img_overlay_difference

    #*********************************************************************************************#
        with warnings.catch_warnings():
            ##RuntimeWarning ignored: invalid values are expected
            warnings.simplefilter("ignore")
            warnings.warn(RuntimeWarning)

            shapedex = shape_index(pic_rescale_pos)
            shapedex = np.ma.array(shapedex,
                                   mask=full_mask).filled(fill_value=np.nan)
            if pass_num > first_scan:
                shapedex = np.ma.array(shapedex,
                                       mask=shape_mask).filled(fill_value=-1)

            shapedex_gauss = gaussian_filter(shapedex, sigma=1)

        pix_area = np.count_nonzero(np.invert(np.isnan(shapedex)))
        area_sqmm = round((pix_area * conv_factor) * 1e-6, 6)

        ##Pixel topology classifications
        background = 0
        ridge = 0.5
        sphere = 1

        bg_rows, bg_cols = zip(*vquant.classify_shape(
            shapedex, sd_proj_rescale, background, delta=0.25, intensity=0))
        sd_proj_bg = sd_proj_rescale[bg_rows, bg_cols]

        sd_proj_bg_median = np.median(sd_proj_bg)  ##Important
        sd_proj_bg_stdev = np.std(sd_proj_bg)
        print("Median intensity of spot background={}, SD={}".format(
            round(sd_proj_bg_median, 4), round(sd_proj_bg_stdev, 4)))

        # if Ab_spot_mode == True:
        # if exo_toggle == True:
        ridge_thresh = sd_proj_bg_median * 3.5
        sphere_thresh = sd_proj_bg_median * 2.5
        ridge_thresh_s = sd_proj_bg_median * 3.5
        # else:
        #     ridge_thresh   = sd_proj_bg_median+sd_proj_bg_stdev*2
        #     sphere_thresh  = sd_proj_bg_median+sd_proj_bg_stdev*2
        #     ridge_thresh_s = sd_proj_bg_median+sd_proj_bg_stdev*3
        # else:
        #     ridge_thresh   = sd_proj_bg_median+sd_proj_bg_stdev*2.75
        #     sphere_thresh  = sd_proj_bg_median+sd_proj_bg_stdev*2.75
        #     ridge_thresh_s = sd_proj_bg_median+sd_proj_bg_stdev*2.75

        ridge_list = vquant.classify_shape(shapedex,
                                           sd_proj_rescale,
                                           ridge,
                                           delta=0.25,
                                           intensity=ridge_thresh)

        sphere_list = vquant.classify_shape(shapedex,
                                            sd_proj_rescale,
                                            sphere,
                                            delta=0.2,
                                            intensity=sphere_thresh)

        ridge_list_s = vquant.classify_shape(shapedex_gauss,
                                             sd_proj_rescale,
                                             ridge,
                                             delta=0.3,
                                             intensity=ridge_thresh_s)

        pix_list = ridge_list + sphere_list
        ridge_list_s = list(set(pix_list) - set(ridge_list_s))
        pix_list = pix_list + ridge_list_s

        pic_binary = np.zeros_like(sd_proj_rescale, dtype=int)

        if not pix_list == []:
            rows, cols = zip(*pix_list)

            pic_binary[rows, cols] = 1

            pic_binary = binary_fill_holes(pic_binary)

    #*********************************************************************************************#
        vdata_dict.update({
            'img_name': img_name,
            # 'spot_type': spot_type,
            'area_sqmm': area_sqmm,
            'valid_shift': valid_shift,
            'overlay_mode': overlay_mode,
            'pos_plane': pos_plane,
            'spot_coords': spot_coords,
            'marker_locs': marker_locs,
            'classifier_median': round(sd_proj_bg_median, 4)
        })

        #*********************************************************************************************#
        ##EXTRACT DATA FROM THE BINARY IMAGE
        prop_list = [
            'label', 'coords', 'area', 'centroid', 'moments_central', 'bbox',
            'filled_image', 'major_axis_length', 'minor_axis_length'
        ]

        shape_df = vquant.binary_data_extraction(pic_binary,
                                                 pic3D[pos_plane],
                                                 prop_list,
                                                 pix_range=(3, 500))
        print('S')
        if not shape_df.empty:

            particle_mask = vquant.particle_masker(pic_binary, shape_df,
                                                   pass_num, first_scan)

            if pass_num == first_scan:
                shape_mask = binary_dilation(particle_mask, iterations=3)
            else:
                shape_mask = np.add(
                    shape_mask, binary_dilation(particle_mask, iterations=2))

            shape_df['pass_number'] = [pass_num] * len(shape_df.index)
            shape_df['coords'] = shape_df.coords.apply(
                lambda a: [tuple(x) for x in a])
            shape_df['bbox'] = shape_df.bbox.map(vquant.bbox_verts)

            print('R')
        else:
            print("----No valid particle shapes----\n")
            vdata_dict.update({'total_valid_particles': 0, 'validity': False})
            print(shape_df)

            with open('{}/{}.vdata.txt'.format(vcount_dir, img_name),
                      'w') as f:
                for k, v in vdata_dict.items():
                    f.write('{}: {}\n'.format(k, v))

            continue
    #*********************************************************************************************#
        filo_pts_tot, round_pts_tot = [], []
        z_intensity_list, max_z_slice_list, max_z_stacks, shape_validity = [],[],[],[]
        greatest_max_list, sd_above_med_difference = [], []
        intensity_increase_list = []
        print('Measuring particle intensities...\n')
        for coord_array in shape_df.coords:

            coord_set = set(coord_array)
            filo_pts = len(coord_set.intersection(ridge_list))

            filo_pts = filo_pts + (len(coord_set.intersection(ridge_list_s)) *
                                   0.15)

            round_pts = len(coord_set.intersection(sphere_list))

            filo_pts_tot.append(filo_pts)
            round_pts_tot.append(round_pts)

            # if pic3D.ndim > 2:

            all_z_stacks = np.array(
                [pic3D[:, coords[0], coords[1]] for coords in coord_array])
            greatest_max = np.max(all_z_stacks)
            max_z_stack = all_z_stacks[np.where(
                all_z_stacks == np.max(all_z_stacks))[0][0]].tolist()
            if max_z_stack[0] >= max_z_stack[-1]:
                shape_validity.append(True)
            else:
                shape_validity.append(False)
            maxmax_z = max(max_z_stack)
            max_z_slice = max_z_stack.index(maxmax_z)
            z_intensity = (maxmax_z - min(max_z_stack)) * 100

            std_z_stack = list(np.round(np.std(all_z_stacks, axis=0), 4))

            max_z_slice_list.append(max_z_slice)
            max_z_stacks.append(max_z_stack)
            z_intensity_list.append(z_intensity)
            greatest_max_list.append(greatest_max)

            if (pass_num > first_scan) & (overlay_toggle == True):
                intensity_increase = max([
                    img_overlay_difference[coords[0], coords[1]]
                    for coords in coord_array
                ])
                intensity_increase_list.append(intensity_increase)
            else:
                intensity_increase_list = [np.nan] * len(shape_df)

        print('N')

        shape_df['max_z_slice'] = max_z_slice_list
        shape_df['max_z_stack'] = max_z_stacks
        shape_df['z_intensity'] = z_intensity_list

        shape_df['greatest_max'] = greatest_max_list
        shape_df['validity'] = shape_validity

        shape_df['filo_points'] = filo_pts_tot
        shape_df['round_points'] = round_pts_tot

        shape_df['intensity_increase'] = intensity_increase_list

        bbox_pixels = [
            vquant.get_bbox_pixels(bbox, pic3D[z])
            for i, z, bbox in shape_df[['max_z_slice', 'bbox']].itertuples()
        ]

        median_bg_list, shape_df['cv_bg'] = zip(*map(
            lambda x: (np.median(x), np.std(x) / np.mean(x)), bbox_pixels))

        shape_df['perc_contrast'] = (
            (shape_df['greatest_max'] - median_bg_list) * 100 / median_bg_list)

        shape_df.loc[shape_df.perc_contrast <= 0, 'validity'] = False
        shape_df.loc[shape_df.cv_bg > cv_cutoff, 'validity'] = False
        shape_df.loc[shape_df.intensity_increase < 40, 'validity'] = False

        # if len(shape_df) > 1:
        #     regression = smapi.OLS(shape_df.z_intensity, shape_df.perc_contrast).fit()
        #     outlier_df = regression.outlier_test()
        #     shape_df.loc[outlier_df['bonf(p)'] < 0.5, 'validity'] = False

        shape_df = vquant.remove_overlapping_objs(shape_df, radius=10)
        #---------------------------------------------------------------------------------------------#
        ##Filament Measurements
        shape_df['circularity'] = list(
            map(lambda A, P: round((4 * np.pi * A) / (perimeter(P)**2), 4),
                shape_df.area, shape_df.filled_image))

        shape_df['ellipticity'] = round(shape_df.major_axis_length /
                                        shape_df.minor_axis_length,
                                        4)  #max val = 1

        shape_df['eccentricity'] = shape_df.moments_central.map(
            vquant.eccentricity)

        shape_df = shape_df[(shape_df['filo_points'] +
                             shape_df['round_points']) >= 1]

        shape_df['filo_score'] = (
            (shape_df['filo_points'] / shape_df['area']) -
            (shape_df['round_points'] / shape_df['area']))
        shape_df['roundness_score'] = ((shape_df['round_points'] /
                                        shape_df['area']))
        #---------------------------------------------------------------------------------------------#
        filolen_df = pd.DataFrame([
            vfilo.measure_fiber_length(coords, spacing=spacing)
            for coords in shape_df.coords
        ],
                                  columns=['fiber_length', 'vertices'],
                                  index=shape_df.index)
        shape_df = pd.concat([shape_df, filolen_df], axis=1)

        shape_df['curl'] = (shape_df['major_axis_length'] *
                            spacing) / shape_df['fiber_length']

        total_particles = len(shape_df)
        shape_df['channel'] = ['V'] * total_particles

        valid_shape_df = shape_df[(shape_df['cv_bg'] < cv_cutoff)
                                  & (shape_df['validity'] == True)]
        total_valid_particles = len(valid_shape_df)
        #---------------------------------------------------------------------------------------------#

        #---------------------------------------------------------------------------------------------#

        kparticle_density = round(total_valid_particles / area_sqmm * 0.001, 2)

        if pass_num != first_scan:
            print("Particle density in {}: {} kp/sq.mm\n".format(
                img_name, kparticle_density))
        else:
            print("Background density in {}: {} kp/sq.mm\n".format(
                img_name, kparticle_density))

        # shape_df.reset_index(drop=True, inplace=True)

        total_shape_df = pd.concat([total_shape_df, shape_df],
                                   axis=0,
                                   sort=False)
        # total_shape_df.reset_index(drop=True, inplace=True)

        keep_data = [
            'label', 'area', 'centroid', 'pass_number', 'max_z_slice',
            'eccentricity', 'ellipticity', 'curl', 'circularity', 'validity',
            'z_intensity', 'perc_contrast', 'cv_bg', 'sd_above_med_difference',
            'fiber_length', 'filo_score', 'roundness_score', 'channel',
            'fl_intensity'
        ]

        vdata_dict.update({
            'total_valid_particles': total_valid_particles,
            'validity': validity
        })

        with open('{}/{}.vdata.txt'.format(vcount_dir, img_name), 'w') as f:
            for k, v in vdata_dict.items():
                f.write('{}: {}\n'.format(k, v))

    #---------------------------------------------------------------------------------------------#
        vgraph.gen_particle_image(pic_to_show,
                                  shape_df,
                                  spot_coords,
                                  pix_per_um=pix_per_um,
                                  show_particles=True,
                                  cv_cutoff=cv_cutoff,
                                  r2_cutoff=0,
                                  scalebar=15,
                                  markers=marker_locs,
                                  exo_toggle=exo_toggle)
        savefig('{}/{}.png'.format(img_dir, img_name), dpi=96)
        clf()
        close('all')
        print(
            "#******************PNG generated for {}************************#\n\n"
            .format(img_name))
    #---------------------------------------------------------------------------------------------#
    total_shape_df.to_csv('{}/{}.particle_data.csv'.format(
        vcount_dir, spot_ID),
                          columns=keep_data)
Ejemplo n.º 19
0
        greycghg=feature.greycoprops(glcm, prop='homogeneity')/(256*256*9)#size1*5
        greycgcl=feature.greycoprops(glcm, prop='correlation')/(256*256*9)
        greycgeg=feature.greycoprops(glcm, prop='energy')/(256*256*9)
        greycgasm=feature.greycoprops(glcm, prop='ASM')/(256*256*9)
        greycgctt=feature.greycoprops(glcm, prop='contrast')/(256*256*9)
        
        
        lbp=feature.local_binary_pattern(imgrey, 8, np.pi/4)#size同图片
        plm=feature.peak_local_max(imgrey, min_distance=1)#多个坐标对
        st=feature.structure_tensor(imgrey, sigma=1, mode='constant', cval=0)#三个同图片一样大的矩阵
        ste=feature.structure_tensor_eigvals(st[0],st[1],st[2])#两个个同图片一样大的矩阵
#        hmf=feature.hessian_matrix(image, sigma=1, mode='constant', 
#                                   cval=0, order=None)#6个三通道的原图大小矩阵
        hmd=feature.hessian_matrix_det(imgrey, sigma=1)#原图大小矩阵
#        hme=feature.hessian_matrix_eigvals(hmf, Hxy=None, Hyy=None)
        si=feature.shape_index(imgrey, sigma=1, mode='constant', cval=0)#原图大小矩阵
#        ckr=feature.corner_kitchen_rosenfeld(image, mode='constant', cval=0) ##原图大小矩阵 三通道               
#        ch=feature.corner_harris(imgrey, method='k', k=0.05, eps=1e-06, sigma=1)#原图大小矩阵
#        cht=feature.corner_shi_tomasi(imgrey, sigma=1)#原图大小矩阵
#        cfs=feature.corner_foerstner(imgrey, sigma=1)#2个 #原图大小矩阵
#        csb=feature.corner_subpix(image, ch, window_size=11, alpha=0.99)
        cps=feature.corner_peaks(imgrey, min_distance=1, threshold_abs=None, 
                                 threshold_rel=0.1, exclude_border=True, indices=True, 
                                 footprint=None, labels=None)#一堆坐标值
#        cmr=feature.corner_moravec(imgrey, window_size=1)#原图大小矩阵
#        cft=feature.corner_fast(imgrey, n=12, threshold=0.15)#原图大小矩阵
        corners = feature.corner_peaks(feature.corner_fast(imgrey, 9), min_distance=1)#一堆坐标
        corts=feature.corner_orientations(imgrey, corners, octagon(3, 2))#一维矩阵长度不定
#        mtem=feature.match_template(image, template, pad_input=False,
#                                    mode='constant', constant_values=0)
#        bldg=feature.blob_dog(imgrey, min_sigma=1, max_sigma=50, 
Ejemplo n.º 20
0
def get_shape_index(img):
    img = io.imread(img, as_gray=True)
    shape_index = feature.shape_index(img)
    shape_index_1D = np.ravel(shape_index)
    avg_squares_shape_index = np.average(np.square(shape_index_1D))
    return avg_squares_shape_index
Ejemplo n.º 21
0
            loc=1.0,
            scale=0.5,
            size=(cloud_noise_size, cloud_noise_size)
        ),
        image_size / cloud_noise_size
    )

    np.random.set_state(state)

    return ndi.gaussian_filter(image, sigma=2.0)

# First create the test image and its shape index

image = create_test_image()

s = shape_index(image)

# In this example we want to detect 'spherical caps',
# so we threshold the shape index map to
# find points which are 'spherical caps' (~1)

target = 1
delta = 0.05

point_y, point_x = np.where(np.abs(s - target) < delta)
point_z = image[point_y, point_x]

# The shape index map relentlessly produces the shape, even that of noise.
# In order to reduce the impact of noise, we apply a Gaussian filter to it,
# and show the results once in
def get_shape_index(img):
    shape_index = feature.shape_index(img)
    shape_index_1D = np.ravel(shape_index)
    avg_squares_shape_index = np.average(np.square(shape_index_1D))
    return avg_squares_shape_index
Ejemplo n.º 23
0
def Shape_indexing_normalization(img, shape_indexing_sigma=2):
    surface = feature.shape_index(img, sigma=shape_indexing_sigma)
    surface = np.nan_to_num(surface, copy=True)
    surface = (exposure.equalize_hist(surface) * 255).astype(np.uint8)
    return (surface)
Ejemplo n.º 24
0
    row, col = np.ogrid[:nrows,:ncols]
    width = col - xyr[0]
    height = row - xyr[1]
    rad = xyr[2] - 50
    disk_mask = (width**2 + height**2 > rad**2)
    full_mask = disk_mask# + marker_mask

#*********************************************************************************************#
    pic_maxmin_masked = np.ma.array(pic_maxmin, mask = full_mask).filled(fill_value = np.nan)

    with warnings.catch_warnings():
        ##RuntimeWarning ignored: invalid values are expected
        warnings.simplefilter("ignore")
        warnings.warn(RuntimeWarning)
        shapedex = shape_index(pic3D_rescale[5])
        shapedex = np.ma.array(shapedex,mask = full_mask).filled(fill_value = np.nan)
        # if pass_num > 1:
        #     shapedex= np.ma.array(shapedex,mask = particle_mask).filled(fill_value = -1)

    # vimage.gen_img(shapedex)
    shapedex_gauss = ndi.gaussian_filter(shapedex, sigma=1)

    pix_area = np.count_nonzero(np.invert(np.isnan(shapedex)))
    pix_per_um = mag/cam_micron_per_pix
    conv_factor = (cam_micron_per_pix / mag)**2
    area_sqmm = round((pix_area * conv_factor) * 1e-6, 6)



    # def classify_shape(shapedex, pic2D, shape, delta, intensity = 0.55, operator = 'greater'):
Ejemplo n.º 25
0
    image *= ndi.zoom(
        np.random.normal(loc=1.0,
                         scale=0.5,
                         size=(cloud_noise_size, cloud_noise_size)),
        image_size / cloud_noise_size)

    np.random.set_state(state)

    return ndi.gaussian_filter(image, sigma=2.0)


# First create the test image and its shape index

image = create_test_image()

s = shape_index(image)

# In this example we want to detect 'spherical caps',
# so we threshold the shape index map to
# find points which are 'spherical caps' (~1)

target = 1
delta = 0.05

point_y, point_x = np.where(np.abs(s - target) < delta)
point_z = image[point_y, point_x]

# The shape index map relentlessly produces the shape, even that of noise.
# In order to reduce the impact of noise, we apply a Gaussian filter to it,
# and show the results once in
    well = all_wells[well_id]
    well_imgs = [f for f in fnames if well in f and 'ch2' not in f]

    gamma = 0.5
    pad = 5
    
    imgdata = []
    for i in range(1,5):
        wellpos = [f for f in well_imgs if 'f0' + str(i)+ 'p' in f]
        imgseries = load_image_series(path=platedir, imgfiles=wellpos)
        imgseries = imgseries.reshape((8, 4, 2160,2160))
        mipseries = np.amax(imgseries, axis=0)
        hoechst = mipseries[0]
        
        img_th = threshold_img(hoechst**gamma, method='otsu', binary=False)
        img_s = shape_index(img_th)
        img_enh = nantonum(img_s, pad=-1)
        # run blob detection on the shape-index enhanced image
        blobs = blob_log(img_enh,
                         min_sigma=10,
                         max_sigma=14,
                         threshold=0.05)
        if len(blobs):
            bbox = np.stack([np.array([bl[1] - bl[2] - pad,
                               bl[1] + bl[2] + pad,
                               bl[0] - bl[2] - pad,
                               bl[0] + bl[2] + pad]) for bl in blobs])
            imax = hoechst.shape[0] - 1
            bbox[bbox < 0] = 0
            bbox[bbox > imax ] = imax
            bbox = bbox.astype(int)