예제 #1
0
def opencv_th_k_means_th(img, K=2, max_iter=10, epsilon=1.0, n_ripet=10):
    img = normalize(img)
    feat_ldg = img.reshape((-1, 1))
    label, center = k_means_clustering(feat_ldg, K, max_iter, epsilon, n_ripet)

    # Now convert back into uint8, and make original image
    center = np.uint8(center)
    res = center[label.flatten()]
    return np.array([res.reshape(img.shape), np.array(center), label])
예제 #2
0
def old_opencv_threshold(img, thresh=None, max_value=255):
    if not img.dtype == np.uint8:
        img = normalize(img)
    if thresh is not None:
        return (cv2.threshold(img, thresh, max_value,
                              cv2.THRESH_BINARY)[1]).astype(np.uint8)
    else:
        return (cv2.threshold(img, 0, 255, cv2.THRESH_BINARY +
                              cv2.THRESH_OTSU)[1]).astype(np.uint8)
예제 #3
0
def opencv_th_otsu_after_gauss_blur(img,
                                    max_value=255,
                                    block_size=(5, 5),
                                    sigmaX=0):
    if not img.dtype == np.uint8:
        img = normalize(img)
        _blur = cv2.GaussianBlur(img, block_size, sigmaX)
        return (cv2.threshold(_blur, 0, max_value, cv2.THRESH_BINARY +
                              cv2.THRESH_OTSU)[1]).astype(np.uint8)
    else:
        return None
예제 #4
0
def opencv_th_adaptive_gaussian(img, max_value=255, block_size=11):
    if not img.dtype == np.uint8:
        img = normalize(img)
        return (cv2.adaptiveThreshold(img,
                                      max_value,
                                      cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                      cv2.THRESH_BINARY,
                                      blockSize=block_size,
                                      C=2)).astype(np.uint8)
    else:
        return None
예제 #5
0
def clahe(img, parameters, img_name, to_folder, _save=False):
    ksize = parameters['clahe_ksize']
    clip = parameters['clip_clahe']
    img = normalize(
        exposure.equalize_adapthist(img, clip_limit=clip, kernel_size=ksize))
    if _save:
        save_tiff(img=img,
                  img_name=img_name,
                  comment='eq',
                  folder_path=to_folder)
    return img
예제 #6
0
def segment_by_masks(parsers):
    print('START')
    args = parser.parse_args()
    if args.source_file_list == None or args.mask_file_list == None:
        print('You should select the Path of Source Images and masks Images.')
        parser.print_help()

    source_data = make_dataset(args.source_file_list)
    print('source_file_list ', args.source_file_list)
    print('source_data.len() = ', len(source_data))

    mask_data = make_dataset(args.mask_file_list)
    print('mask_file_list ', args.mask_file_list)
    print('mask_data.len() = ', len(mask_data))

    dest_folder = args.output
    print('dest_folder ', dest_folder)

    imgs = create_stack(source_data)
    print('imgs.shape[2] = ', imgs.shape[2])

    masks = create_stack(mask_data)
    print('imgs.shape[2] = ', imgs.shape[2])

    if imgs is not None and masks is not None:

        if imgs.shape[2] != masks.shape[2]:
            print('Warning! Dimension of Images and masks mismatch.')

        else:
            # todo script
            print('write in {} \n'.format(dest_folder))

            segmented = np.zeros(imgs.shape)
            print('\n segmented shape: ', segmented.shape)
            print('imgs shape: ', imgs.shape)
            print('masks shape: ', masks.shape)

            for z in range(imgs.shape[2]):
                img = normalize(imgs[:, :, z])
                img_eq = exposure.equalize_adapthist(img, clip_limit=0.03)

                segmented[:, :, z] = img_eq * masks[:, :, z]

                save_tiff(img=segmented[:, :, z],
                          img_name=str(z),
                          prefix='s',
                          comment='dec_seg',
                          folder_path=dest_folder)
                print('img n° {}'.format(z))

            print(' \n ** Proces Finished \n')
예제 #7
0
def skimage_threshold(img, method='isodata'):
    if not img.dtype == np.uint8:
        print('skimage_threshold NORM because received: ', img.dtype)
        img = normalize(img)

    thresh = {
        'otsu': filters.threshold_otsu(img),  # delivers very high threshold
        'yen': filters.threshold_yen(img),  # delivers even higher threshold
        'isodata': filters.threshold_isodata(img),  # works extremely well
    }.get(method, 0)  # None is default value
    if thresh == 0:
        print(' ***  ERROR - skimage_threshold select THRESHOLD = 0  ***')
    return thresh, cv2.threshold(img, thresh, 255, cv2.THRESH_BINARY)[1]
예제 #8
0
def create_surrounded_images(BW_mask,
                             img_eq,
                             img_name,
                             to_countourned_folder,
                             _save=False):
    # plot contour mak over original image
    contours = (cv2.findContours(BW_mask.copy(), cv2.RETR_TREE,
                                 cv2.CHAIN_APPROX_NONE))[1]

    # prepare image
    img_eq_with_contour = normalize(img_eq.copy())

    # draw contour
    cv2.drawContours(img_eq_with_contour, contours, -1, 255, 1)

    if _save:
        save_tiff(img=img_eq_with_contour,
                  img_name=img_name,
                  comment='cont',
                  folder_path=to_countourned_folder)
    return contours
def main(parser):
    ''' Docstring (TODO) parameters '''

    # ====== 0 ====== Initial operations
    # TODO estrare dai parametri da terminale la bool 'verbose'
    verbose = False

    # read args from console
    args = parser.parse_args()
    source_path = manage_path_argument(args.source_folder)

    base_path = os.path.dirname(os.path.dirname(source_path))
    stack_name = os.path.basename(source_path)

    # Def .txt filepath
    txt_parameters_path = os.path.join(base_path, 'parameters.txt')
    txt_results_path = os.path.join(base_path, 'GAMMA_orientation_results.txt')

    Results_filename = 'orientation_Results.npy'
    Results_filepath = os.path.join(base_path, Results_filename)

    # print to video and write in results.txt init message
    init_message = [
        ' *****************   GAMMA - Orientation analysis of 3D stack   *****************\n'
        ' Source from path : {}'.format(source_path),
        ' Base path : {}'.format(base_path),
        ' Stack : {}'.format(stack_name),
    ]

    with open(txt_results_path, 'w') as f:
        for line in init_message:
            print(line)
            f.write(line + '\n')

    # reads parameters
    parameters = extract_parameters(txt_parameters_path)

    # analysis block dimension in z-axis
    num_of_slices_P = parameters['num_of_slices_P']

    # Parameters of Acquisition System:
    res_z = parameters['res_z']
    res_xy = parameters['res_xy']
    resolution_factor = res_z / res_xy
    block_side = row_P = col_P = int(num_of_slices_P * resolution_factor)
    shape_P = np.array((row_P, col_P, num_of_slices_P)).astype(np.int32)
    """ =====================================================================================
    __________________________  -1-  OPEN STACK _______________________________________________"""

    loading_mess = list()
    loading_mess.append(
        ' ***** Start load the Stack, this may take a few minutes... ')

    # extract data (OLD METHOD)
    volume = load_tif_data(source_path)
    if len(volume.shape) == 2:
        volume = np.expand_dims(volume, axis=2)  # add the zeta axis

    # extract stack (NEW METHOD)-------------------
    # infile = InputFile(source_path) # read virtual file (shape, dtype, ecc..)
    # volume = infile.whole() # load real images in RAM
    # # move axes from (z, y, x) -> to (r, c, z)=(y, x, z) [S.R. ZetaStitcher -> Strip Analysis]
    # volume = np.moveaxis(volume, 0, -1)
    # ---------------------------------------------

    loading_mess.append(' - Volume shape : {}'.format(volume.shape))
    with open(txt_results_path, 'a') as f:
        for m in loading_mess:
            print(m)
            f.write(m + '\n')

    # calculate dimension
    shape_V = np.array(volume.shape)
    """ =====================================================================================
    ________________________  -2-   CYCLE FOR BLOCKS EXTRACTION and ANALYSIS __________________"""
    t_start = time.time()

    # create empty Result matrix
    R, shape_R = create_R(shape_V, shape_P)

    # create 3D filter
    mask = create_3D_filter(block_side, res_xy, parameters['sarc_length'])

    count = 1  # count iteration
    total_iter = np.prod(shape_R)
    print(
        '\n \n ***** Start iteration of analysis, expectd iterations : {} \n'.
        format(total_iter))

    with open(txt_results_path, 'a') as f:
        for z in range(shape_R[2]):
            for r in range(shape_R[0]):
                for c in range(shape_R[1]):
                    # initialize list of string lines
                    lines = []

                    start_coord = create_coord_by_iter(r, c, z, shape_P)
                    slice_coord = create_slice_coordinate(start_coord, shape_P)
                    if verbose: lines.append('\n \n')
                    lines.append(
                        '- iter: {} - init_coord : {} - on total: {}'.format(
                            count, start_coord, total_iter))

                    # save init info in R
                    R[r, c, z]['id_block'] = count
                    R[r, c, z]['init_coord'] = start_coord

                    # extract parallelepiped
                    parall = volume[slice_coord]

                    # check dimension (if iteration is on border of volume, add zero_pad)
                    parall = pad_dimension(parall, shape_P)

                    if np.max(parall) != 0:
                        parall = (normalize(parall)).astype(
                            np.float32)  # fft analysis work with float

                        # analysis of parallelepiped extracted
                        there_is_cell, there_is_freq, results = block_analysis(
                            parall, shape_P, parameters, block_side, mask,
                            verbose, lines)
                        # save info in R[r, c, z]
                        if there_is_cell: R[r, c, z]['cell_info'] = True
                        if there_is_freq: R[r, c, z]['freq_info'] = True

                        # save results in R
                        for key in results.keys():
                            R[r, c, z][key] = results[key]

                    else:
                        if verbose: lines.append('   block rejected')

                    for l in lines:
                        print(l)
                    count += 1

    # execution time
    (h, m, s) = seconds_to_min_sec(time.time() - t_start)
    print('\n Iterations ended successfully \n')
    """ =====================================================================================
        ________________________  -3-   RESULTS ANALYSIS   __________________________________"""

    post_proc_mess = list()

    # count results, rejected and accepted blocks
    block_with_cell = np.count_nonzero(R['cell_info'])
    block_with_peak = np.count_nonzero(R['freq_info'])
    p_rejec_cell = 100 * (1 - block_with_cell / count)
    p_rejec_freq_tot = 100 * (1 - block_with_peak / count)
    p_rejec_freq = 100 * (1 - block_with_peak / block_with_cell)

    post_proc_mess.append(
        '\n ***** End of iterations, time of execution: {0:2d}h {1:2d}m {2:2d}s \n'
        .format(int(h), int(m), int(s)))
    post_proc_mess.append('\n - Expected iterations : {}'.format(total_iter))
    post_proc_mess.append(' - Total iterations : {}'.format(count - 1))
    post_proc_mess.append(
        '\n - block with cell : {}, rejected from total: {} ({}%)'.format(
            block_with_cell, count - block_with_cell, p_rejec_cell))
    post_proc_mess.append(
        ' - block with freq. info : {}'
        '\n    rejected from total: {} ({}%)'
        '\n    rejected from block with cell: {} ({}%)'.format(
            block_with_peak, count - block_with_peak, p_rejec_freq_tot,
            block_with_cell - block_with_peak, p_rejec_freq))
    with open(txt_results_path, 'a') as f:
        for m in post_proc_mess:
            print(m)
            f.write(m + '\n')

    post_proc_mess = list()

    # threshold results on frequency validation parameter and save matrix
    mess = '\n \n *** Analysis of Results : remove block with low frequency affidability \n'
    post_proc_mess.append(mess)
    print(mess)

    # - 1 normalization of psd_ratio values
    R = parameter_normalizer(R, 'psd_ratio')
    mess = '- 1 - Normalization on \'psd_ratio\': complete'
    post_proc_mess.append(mess)
    print(mess)

    # - 2 thresholding on psd_ratio values
    R, before, after = threshold_par(R, parameters, 'psd_ratio')
    mess = '- 2 - First thresholding based on PSD Information: selected {} blocks from {}'.format(
        after, before)
    post_proc_mess.append(mess)
    print(mess)

    # - 3 outlier remotion based of orientation and psd_ratio values - NOT EXECUTED
    mess = '-*** NO Outlier Remotion based on PSD Information.'
    post_proc_mess.append(mess)
    print(mess)

    # save Result matrix
    np.save(Results_filepath, R)

    # - 4 Estimate and write local disorder inside Result Matrix
    R, shape_LD, isolated_value = estimate_local_disorder(
        R, parameters, resolution_factor)
    mess = '- 4 - Local Disorder estimated inside result Matrix, with grane (r, c, z): ({}, {}, {}) ' \
           'and isolated points setted with local_disorder = {}'\
            .format(shape_LD[0], shape_LD[1], shape_LD[2], isolated_value)
    post_proc_mess.append(mess)
    print(mess)

    # se funziona tutto, salvare solo una versione di R
    Results_filename = 'orientation_Results_after_disorder.npy'
    Results_filepath = os.path.join(base_path, Results_filename)
    np.save(Results_filepath, R)

    with open(txt_results_path, 'a') as f:
        for m in post_proc_mess:
            f.write(m + '\n')
    del post_proc_mess
    """ =====================================================================================
            ________________________  -4-   STATISTICS   __________________________________"""

    stat = statistics(R, parameters)
    result_mess = list()
    result_mess.append(
        '\n \n *** Results of statistical analysis on accepted points: \n')
    result_mess.append(' - {0} : {1:.3f} um^(-1)'.format(
        'Mean module', stat['Mean Module']))
    result_mess.append(' - {0} : {1:.3f} um'.format('Mean Period',
                                                    stat['Mean Period']))
    result_mess.append(' - {0} : {1:.3f} % '.format('Alignment',
                                                    100 * stat['Alignment']))
    result_mess.append(' - {0} : {1:.3f} % '.format(
        'XZ Dispersion (area_ratio)', 100 * stat['area_ratio']))
    result_mess.append(' - {0} : {1:.3f} % '.format(
        'XZ Dispersion (sum dev.std)', 100 * stat['sum_std']))
    result_mess.append(
        ' \n \n ***************************** END GAMMA - orientation_analysis.py ********************'
        '\n \n \n \n ')

    with open(txt_results_path, 'a') as f:
        for l in result_mess:
            print(l)
            f.write(l + '\n')
예제 #10
0
def opencv_th_otsu(img, max_value=255):
    if not img.dtype == np.uint8:
        img = normalize(img)
    return (cv2.threshold(img, 0, max_value, cv2.THRESH_BINARY +
                          cv2.THRESH_OTSU)[1]).astype(np.uint8)
예제 #11
0
def main(parser):

    # --- PRELIMINARY OPERATIONS --------------------------------------------------------

    # read args from console
    args = parser.parse_args()
    source_path = manage_path_argument(args.source_folder)

    # take base path and stack name
    base_path = os.path.dirname(os.path.dirname(source_path))
    stack_name = os.path.basename(source_path)

    # Def .txt filepath
    txt_parameters_path = os.path.join(base_path, 'parameters.txt')
    txt_results_path = os.path.join(base_path, 'ALFA_volume_results.txt')
    txt_metadata_path = os.path.join(base_path, 'metadata.txt')

    process_names = [
        'clahe', 'mask_bin', 'segmented', 'contourned', 'contours'
    ]

    # create destination paths where save images
    destination_paths = []
    for process in process_names:
        destination_paths.append(os.path.join(base_path, process, stack_name))

    # if those folders do not exist, this creates them
    for path in destination_paths:
        if not os.path.exists(path):
            os.makedirs(path)

    # --- SCRIPT ---------------------------------------------------------------

    # reads parameters
    parameters = extract_parameters(txt_parameters_path)

    # read user preference (if save images or not)
    _save_clahe = bool(parameters['save_clahe'])
    _save_binary_mask = bool(parameters['save_binary_mask'])
    _save_segmented = bool(parameters['save_segmented'])
    _save_countourned = bool(parameters['save_countourned'])
    _save_contours = bool(parameters['save_contours'])

    # print to video and write in results.txt init message
    init_message = [
        ' Source from path : {} \n'.format(base_path),
        ' Stack : {} \n'.format(stack_name),
        '\n\n - Start load the Stack, this may take a few minutes... '
    ]
    error_message = '\n *** ERROR *** : stack in this path is None'

    with open(txt_results_path, 'w') as f:
        for line in init_message:
            print(line, end='')
            f.write(line + '\n')

    # measure units
    x_step = parameters['res_xy']  # micron
    y_step = parameters['res_xy']  # micron
    z_step = parameters['res_z']  # micron
    voxel_in_micron3 = x_step * y_step * z_step  # micron^3

    # extract stack (OLD METHOD)
    # volume, mess = load_stack_into_numpy_ndarray([source_path])
    # img_shape = volume[:, :, 0].shape

    # extract stack (NEW METHOD)---------------
    volume = load_tif_data(source_path)
    if len(volume.shape) == 2:
        volume = np.expand_dims(volume, axis=2)  # add the zeta axis
    img_shape = (volume.shape[0], volume.shape[1])
    # ---------------------------------------------

    print(' Images shape : ', img_shape, '\n')

    # measure of imaging volume
    area_of_slice = img_shape[0] * img_shape[1]
    number_of_slices = volume.shape[2]

    # Estimated Volume
    total_imaging_volume = area_of_slice * number_of_slices  # total volume, from z=0 to z maximum

    # Initializing to zero the Volume counter
    effective_myocites_volume = 0  # real estimated volume of myocites (sum of area of real cells)

    # Boolean vector with length = number_of_slices.
    # Element i-th is:
    # True - if i-th slice have info
    # False - if i-th lice i empty
    slices_info = np.zeros(number_of_slices).astype(
        bool)  # created with all False values.

    # save elaboration time for each iteration (for estimate mean time of elaboration)
    slice_elab_time_list = list()
    t_start = time.time()

    print(' EQUALIZATION and SEGMENTATION of every frame:')

    with open(txt_results_path, 'a') as f:
        if volume is not None:

            for z in range(number_of_slices):

                # extract current slice
                img = volume[:, :, z]
                img_name = create_img_name_from_index(z)  # img_name.tif

                #  check if img is empty (too black)
                if image_have_info(img, parameters['t_rate_info']):
                    elab_start = time.time()

                    slices_info[z] = True
                    img = normalize(img)

                    equalized_img = clahe(img,
                                          parameters,
                                          img_name,
                                          destination_paths[0],
                                          _save=_save_clahe)

                    bw_mask, pixels_of_real_cells = create_byn_mask(
                        equalized_img,
                        parameters,
                        img_name,
                        destination_paths[1],
                        _save=_save_binary_mask)

                    effective_myocites_volume += pixels_of_real_cells

                    create_and_save_segmented_images(bw_mask,
                                                     equalized_img,
                                                     img_name,
                                                     destination_paths[2],
                                                     _save=_save_segmented)

                    contours = create_surrounded_images(
                        bw_mask,
                        equalized_img,
                        img_name,
                        destination_paths[3],
                        _save=_save_countourned)

                    if _save_contours:
                        # save image with only contours for fiji visualization:
                        save_contours(img_name, contours, bw_mask.shape,
                                      destination_paths[4])
                        save_contours(img_name, contours, bw_mask.shape,
                                      destination_paths[4])

                    slice_elab_time_list.append(time.time() - elab_start)
                    elapsed_time = (number_of_slices - z -
                                    1) * np.mean(slice_elab_time_list)
                    (h, m, s) = seconds_to_min_sec(elapsed_time)

                    measure = ' - {0} --> {1:.1f} um^3   -   ET: {2:2d}h {3:2d}m {4:2d}s'.format(
                        img_name, pixels_of_real_cells * voxel_in_micron3,
                        int(h), int(m), int(s))

                else:
                    # black = np.zeros(img.shape)
                    # for path in destination_paths:
                    #     save_tiff(img=black, img_name=img_name, comment='empty', folder_path=path)
                    measure = ' - {} is black, rejected'.format(img_name)

                print(measure)
                f.write(measure + '\n')

            # execution time
            (h, m, s) = seconds_to_min_sec(time.time() - t_start)

            # Num of saved slices
            saved = np.count_nonzero(slices_info)

            # Num of empty slice on top = Index of first slice with info
            empty_on_top = np.where(slices_info == True)[0][0]

            # Num of empty slice on bottom = Index of first slice with info, searching from from z=z_max to z=0
            empty_on_bottom = np.where(np.flipud(slices_info) == True)[0][0]

            # volumes in micron^3
            total_imaging_volume_in_micron = total_imaging_volume * voxel_in_micron3
            effective_volume_in_micron3 = effective_myocites_volume * voxel_in_micron3

            # in percentage
            myocites_perc = 100 * effective_myocites_volume / total_imaging_volume

            result_message = list()
            result_message.append(
                '\n ***  Process successfully completed, time of execution: {0:2d}h {1:2d}m {2:2d}s \n'
                .format(int(h), int(m), int(s)))
            result_message.append(' Number of saved slices: {}'.format(saved))
            result_message.append(
                ' Number of rejected slices (because empty) on the top of Volume: {}'
                .format(empty_on_top))
            result_message.append(
                ' Number of rejected slices (because empty) on the bottom of Volume: {}'
                .format(empty_on_bottom))
            result_message.append(
                ' Total Imaging volume : {0:.6f} mm^3'.format(
                    total_imaging_volume_in_micron / 10**9))
            result_message.append(
                ' Effective miocytes tissue volume : {0:.6f} mm^3, {1:.3f}% of Imaging volume'
                .format(effective_volume_in_micron3 / 10**9, myocites_perc))
            result_message.append('\n')
            result_message.append(' OUTPUT SAVED IN: \n')

            for path in destination_paths:
                result_message.append(path)

            # write and print results
            for l in result_message:
                print(l)
                f.write(l + '\n')

            # write metadata
            with open(txt_metadata_path, 'w') as m:
                m.write('empty_on_top = {}\n'.format(empty_on_top))
                m.write('empty_on_bottom = {}\n'.format(empty_on_bottom))
                m.write('saved = {}'.format(saved))

        else:
            print(error_message)
            f.write(error_message)

        print(' \n \n \n ')