コード例 #1
0
def main():
    args = parse_args()

    infile = InputFile(args.input_file)
    infile.channel = args.c

    total_byte_size = np.asscalar(np.prod(infile.shape) * infile.dtype.itemsize)
    bigtiff = total_byte_size > 2 ** 31 - 1

    n = int(total_byte_size // (psutil.virtual_memory().available * 0.9)) + 1

    try:
        os.remove(args.output_file)
    except FileNotFoundError:
        pass

    curr_z = 0
    part_height = infile.nfrms // n
    end_loop = False

    while True:
        end_z = curr_z + part_height
        if end_z >= infile.shape[0]:
            end_z = infile.shape[0]
            end_loop = True

        logger.info('loading \tz=[{}:{}]'.format(curr_z, end_z))
        img = infile[curr_z:end_z]

        logger.info('saving to {}'.format(args.output_file))
        tiff.imsave(args.output_file, img, append=True, bigtiff=bigtiff)

        curr_z = end_z
        if end_loop:
            break
コード例 #2
0
def main():
    coloredlogs.install(level='INFO',
                        fmt='%(levelname)s [%(name)s]: %(message)s')

    args = parse_args()

    logger.info('loading {}'.format( args.input_file))
    infile = InputFile(args.input_file)

    a = infile.whole()
    a = a[::-1]

    logger.info('writing to {}'.format(args.output_file))
    tiff.imsave(args.output_file, a)
コード例 #3
0
def main():
    args = parse_args()

    infile = InputFile(args.input_file)

    if args.channel is not None:
        infile.channel = args.channel

    total_byte_size = np.asscalar(np.prod(infile.shape) * infile.dtype.itemsize)
    bigtiff = total_byte_size > 2 ** 31 - 1

    ram = psutil.virtual_memory().available * 0.5
    n_of_parts = ceil(total_byte_size / ram)

    try:
        os.remove(args.output_file)
    except FileNotFoundError:
        pass

    curr_z = 0
    part_height = infile.nfrms // n_of_parts
    end_loop = False

    mip = np.zeros(infile.shape[1:], dtype=infile.dtype)
    tiff.imsave(args.output_file, mip)

    while True:
        end_z = curr_z + part_height
        if end_z >= infile.shape[0]:
            end_z = infile.shape[0]
            end_loop = True

        logger.info('loading \tz=[{}:{}]'.format(curr_z, end_z))
        img = infile[curr_z:end_z]

        logger.info('Computing MIP...')
        mip = np.maximum(np.max(img, axis=0), mip)

        del img

        curr_z = end_z
        if end_loop:
            break

    logger.info('saving to {}'.format(args.output_file))
    if args.channel is None and infile.nchannels > 1:
        mip = np.moveaxis(mip, -3, -1)
    tiff.imsave(args.output_file, mip)
コード例 #4
0
def main():
    args = parse_args()

    infile = InputFile(args.input_file)
    if args.channel != -1:
        infile.channel = args.channel

    os.makedirs(args.output_directory, exist_ok=True)

    n_of_digits = math.ceil(math.log10(infile.nfrms))
    output_filename_fmt = '{:0' + str(n_of_digits) + '}.tiff'

    for z in range(infile.nfrms):
        a = infile[z]
        if infile.nchannels != 1 and infile.channel == -1:
            a = np.moveaxis(a, -3, -1)
        output_filename = os.path.join(args.output_directory,
                                       output_filename_fmt.format(z))
        logger.info('saving to {}'.format(output_filename))
        tiff.imsave(output_filename, a, compress=args.compression)
コード例 #5
0
def main():
    args = parse_args()

    logger.info(args.input_file)
    infile = InputFile(args.input_file)

    os.makedirs(args.output_dir, exist_ok=True)

    n_of_digits = math.ceil(math.log10(infile.shape[0]))
    fmt = '{}{:0' + str(n_of_digits) + '}.tiff'

    for z in range(args.zmin, args.zmax + 1):
        fname = os.path.join(args.output_dir, fmt.format(args.prefix, z))
        logger.info(fname)
        tiff.imsave(fname, infile[z])
コード例 #6
0
def main():
    args = parse_args()

    infile = InputFile(args.input_file)

    total_byte_size = np.asscalar(np.prod(infile.shape) * infile.dtype.itemsize)
    bigtiff = total_byte_size > 2 ** 31 - 1

    try:
        os.remove(args.output_file)
    except FileNotFoundError:
        pass

    curr_z = 0
    part_height = infile.nfrms // args.n
    end_loop = False

    while True:
        end_z = curr_z + part_height
        if end_z >= infile.shape[0]:
            end_z = infile.shape[0]
            end_loop = True

        logger.info('loading \tz=[{}:{}]'.format(curr_z, end_z))
        img = infile[curr_z:end_z]
        img = img.astype(np.float32) / 255
        img_he = np.zeros_like(img)

        logger.info('applying HE coloring...')
        idx_0 = np.index_exp[:, 0, ...]
        idx_1 = np.index_exp[:, 1, ...]
        idx_2 = np.index_exp[:, 2, ...]
        img_he[idx_0] = np.power(10, -(0.644 * img[idx_1] + 0.093 * img[idx_0]))
        img_he[idx_1] = np.power(10, -(0.717 * img[idx_1] + 0.954 * img[idx_0]))
        img_he[idx_2] = np.power(10, -(0.267 * img[idx_1] + 0.283 * img[idx_0]))

        logger.info('saving to {}'.format(args.output_file))
        if infile.nchannels > 1:
            img_he = np.moveaxis(img_he, -3, -1)
        tiff.imsave(args.output_file, (255 * img_he).astype(np.uint8),
                    append=True, bigtiff=bigtiff)

        curr_z = end_z
        if end_loop:
            break
コード例 #7
0
def main():
    args = parse_args()

    infile = InputFile(args.input_file)

    if infile.nchannels <= 1:
        raise ValueError('Not enough channels: {}'.format(infile.nchannels))

    # input
    total_byte_size = np.asscalar(
        np.prod(infile.shape) * infile.dtype.itemsize)
    n = int(total_byte_size // (psutil.virtual_memory().available * 0.25)) + 1

    # output
    total_byte_size = infile.nfrms * infile.ysize * infile.xsize * 2
    bigtiff = total_byte_size > 2**31 - 1

    try:
        os.remove(args.output_file)
    except FileNotFoundError:
        pass

    curr_z = 0
    part_height = infile.nfrms // n
    end_loop = False

    while True:
        end_z = curr_z + part_height
        if end_z >= infile.shape[0]:
            end_z = infile.shape[0]
            end_loop = True

        logger.info('loading \tz=[{}:{}]'.format(curr_z, end_z))
        img = infile[curr_z:end_z]

        img = np.sum(img, axis=1).astype(np.uint16)

        logger.info('saving to {}'.format(args.output_file))
        tiff.imsave(args.output_file, img, append=True, bigtiff=bigtiff)

        curr_z = end_z
        if end_loop:
            break
コード例 #8
0
def main():
    def worker():
        while True:
            got = q.get()

            if got is None:
                return

            start, ofname = got

            a = np.zeros((stack_shape[0], stack_shape[-2], stack_shape[-1]),
                         dtype=np.uint8)
            for j in range(nfrms):
                a[j] = imageio.imread(names[start + j])

            logger.info('Writing {}'.format(ofname))
            tiff.imsave(ofname, a, compress=args.compression)

    args = parse_args()

    tar = tarfile.open(args.input_file)
    tar_outer_dir = tar.firstmember.name

    logger.info('Getting names of files in reference folder...')
    files = glob.glob(os.path.join(args.reference_directory, '*x_*.tiff'))

    d = {}

    for f in files:
        m = re.search('^(\d+)x_.*', os.path.basename(f))
        i = int(m.group(1))
        d[i] = f

    tmproot = '/mnt/ramdisk'
    tmproot = tmproot if os.path.exists(tmproot) else None
    logger.info('Unpacking archive to temporary directory...')
    with tempfile.TemporaryDirectory(dir=tmproot) as td:
        shutil.unpack_archive(args.input_file, td)

        names = sorted(glob.glob(os.path.join(td, tar_outer_dir, '*')))

        stack_shape = InputFile(files[0]).shape
        nfrms = stack_shape[0]
        if len(names) != len(d) * nfrms:
            raise RuntimeError('Different number of items in tar archive ({}) '
                               'and in reference folder {} (nfrms: {})'.format(
                                   len(names), len(d), nfrms))

        os.makedirs(args.output_directory, exist_ok=True)
        i = 0
        q = queue.Queue(maxsize=os.cpu_count())
        threads = []

        for _ in range(q.maxsize):
            t = threading.Thread(target=worker)
            t.start()
            threads.append(t)

        for _, reference_file in sorted(d.items()):
            output_filename = os.path.join(args.output_directory,
                                           os.path.basename(reference_file))
            q.put((i * nfrms, output_filename))
            i += 1

        for _ in threads:
            q.put(None)

        for t in threads:
            t.join()
コード例 #9
0
def convert_to_jp2ar(input_data, output_dir, compression, nthreads,
                     temp_dir=None, output_file=None):
    """

    Parameters
    ----------
    input_data : str (filename) or object implementing shape and __getitem__
    output_dir : str
        can be None if output_file is specified
    compression
    nthreads
    temp_dir
    output_file : str
        must be specified if input_file is not a string
    """
    def waiter():
        while True:
            thr = thread_q.get()
            if thr is None:
                break
            thr.join()

    def save_file(arr, zf, full_path, compression):
        glymur.Jp2k(full_path, data=arr, cratios=[compression])
        with tar_lock:
            zf.write(full_path, arcname=os.path.split(full_path)[1])
        os.remove(full_path)

    thread_q = Queue(nthreads)

    if type(input_data) is str:
        out_file_name = os.path.split(input_data)[1]
        out_file_name = '{}.zip'.format(os.path.splitext(out_file_name)[0])
        out_file_name = os.path.join(output_dir, out_file_name)
        os.makedirs(output_dir, exist_ok=True)
        input_data = InputFile(input_data)
    else:
        out_file_name = output_file

    w = threading.Thread(target=waiter)
    w.start()

    dir = temp_dir
    if dir is None and os.path.exists(RAMDISK_PATH):
        dir = RAMDISK_PATH

    zf = zipfile.ZipFile(out_file_name, mode='w',
                         compression=zipfile.ZIP_STORED)

    tar_lock = threading.Lock()
    n_of_digits = math.ceil(math.log10(input_data.shape[0]))
    fmt = '{:0' + str(n_of_digits) + '}.jp2'

    with tempfile.TemporaryDirectory(dir=dir) as td:
        for k in range(0, input_data.shape[0]):
            fname = fmt.format(k)
            full_path = os.path.join(td, fname)
            a = input_data[k]  # read frame
            if k % 100 == 0:
                logger.info('JPEG2000 Progress: {:.2f}%'.format(
                    k / input_data.shape[0] * 100))

            t = threading.Thread(target=save_file,
                                 args=(a, zf, full_path, compression))
            t.start()
            thread_q.put(t)

        thread_q.put(None)
        w.join()
    zf.close()
コード例 #10
0
def main(parser):

    ### Extract input information FROM TERMINAL =======================
    args = parser.parse_args()
    source_path = manage_path_argument(args.source_path)
    param_filename = args.parameters_filename[0]

    # preferences
    _verbose = args.verbose
    _deep_verbose = args.deep_verbose
    _save_csv = args.csv
    _save_hist = args.histogram
    _save_maps = args.maps

    if _verbose:
        print(Bcolors.FAIL + ' *** VERBOSE MODE *** ' + Bcolors.ENDC)
    if _deep_verbose:
        print(Bcolors.FAIL + ' *** DEBUGGING MODE *** ' + Bcolors.ENDC)
    ### ===============================================================

    # extract filenames and folder names
    stack_name = os.path.basename(source_path)
    process_folder = os.path.basename(os.path.dirname(source_path))
    base_path = os.path.dirname(os.path.dirname(source_path))
    param_filepath = os.path.join(base_path, process_folder, param_filename)
    stack_prefix = stack_name.split('.')[0]

    # create introductory information
    mess_strings = list()
    mess_strings.append(Bcolors.OKBLUE +
                        '\n\n*** Structure Tensor Orientation Analysis ***\n' +
                        Bcolors.ENDC)
    mess_strings.append(' > Source path:        {}'.format(source_path))
    mess_strings.append(' > Stack name:         {}'.format(stack_name))
    mess_strings.append(' > Process folder:     {}'.format(process_folder))
    mess_strings.append(' > Base path:          {}'.format(base_path))
    mess_strings.append(' > Parameter filename: {}'.format(param_filename))
    mess_strings.append(' > Parameter filepath: {}'.format(param_filepath))
    mess_strings.append('')
    mess_strings.append(' > PREFERENCES:')
    mess_strings.append('   - _verbose       {}'.format(_verbose))
    mess_strings.append('   - _deep_verbose  {}'.format(_deep_verbose))
    mess_strings.append('   - _save_csv      {}'.format(_save_csv))
    mess_strings.append('   - _save_hist     {}'.format(_save_hist))
    mess_strings.append('   - _save_maps     {}'.format(_save_maps))

    # extract parameters
    param_names = [
        'roi_xy_pix', 'px_size_xy', 'px_size_z', 'mode_ratio',
        'threshold_on_cell_ratio', 'local_disarray_xy_side',
        'local_disarray_z_side', 'neighbours_lim', 'fwhm_xy', 'fwhm_z'
    ]

    param_values = search_value_in_txt(param_filepath, param_names)

    # create parameter dictionary
    parameters = {}
    mess_strings.append('\n\n*** Parameters used:')
    mess_strings.append(
        ' > Parameters extracted from {}\n'.format(param_filename))
    for i, p_name in enumerate(param_names):
        parameters[p_name] = float(param_values[i])
        mess_strings.append('> {} - {}'.format(p_name, parameters[p_name]))

    # acquisition system characteristics: ratio of the pixel size along the z and x-y axes
    ps_ratio = parameters['px_size_z'] / parameters['px_size_xy']

    # size of the analyzed block along the z axis
    shape_P = np.array(
        (int(parameters['roi_xy_pix']), int(parameters['roi_xy_pix']),
         int(parameters['roi_xy_pix'] / ps_ratio))).astype(np.int32)

    mess_strings.append('\n *** Analysis configuration:')
    mess_strings.append(
        ' > Pixel size ratio (z / xy) = {0:0.2f}'.format(ps_ratio))
    mess_strings.append(
        ' > Number of selected stack slices for each ROI ({} x {}): {}'.format(
            shape_P[0], shape_P[1], shape_P[2]))
    mess_strings.append(' > Parallelepiped size: ({0},{1},{2}) pixel ='
                        '  [{3:2.2f} {4:2.2f} {5:2.2f}] um'.format(
                            shape_P[0], shape_P[1], shape_P[2],
                            shape_P[0] * parameters['px_size_xy'],
                            shape_P[1] * parameters['px_size_xy'],
                            shape_P[2] * parameters['px_size_z']))

    # create .txt report file
    txt_info_filename = 'Orientations_INFO_' + stack_prefix + '_' \
                   + str(int(parameters['roi_xy_pix'] * parameters['px_size_xy'])) + 'um.txt'
    txt_info_path = os.path.join(os.path.dirname(source_path),
                                 txt_info_filename)

    # print analysis report to screen and write into .txt file all introductory information
    write_on_txt(mess_strings, txt_info_path, _print=True, mode='w')

    # clear list of strings
    mess_strings.clear()

    # 1 - OPEN STACK ------------------------------------------------------------------------

    # extract data (entire volume 'V')
    volume = InputFile(source_path).whole()

    # change axes: (r, c, z) -> (z, y, x)
    volume = np.moveaxis(volume, 0, -1)

    # compute volume size
    shape_V = np.array(volume.shape)
    pixel_for_slice = shape_V[0] * shape_V[1]
    total_voxel_V = pixel_for_slice * shape_V[2]

    # print volume size information
    mess_strings.append('\n\n*** Loaded volume size')
    mess_strings.append(' > Size of entire volume:  ({}, {}, {})'.format(
        shape_V[0], shape_V[1], shape_V[2]))
    mess_strings.append(
        ' > Pixels for stack slice:  {}'.format(pixel_for_slice))
    mess_strings.append(' > Total number of voxels:  {}'.format(total_voxel_V))

    # extract list of math information (strings) about the volume.npy variable
    info = print_info(volume,
                      text='\nVolume informations:',
                      _std=False,
                      _return=True)
    mess_strings = mess_strings + info

    # print volume information to screen and add it to the report .txt file
    write_on_txt(mess_strings, txt_info_path, _print=True, mode='a')

    # clear list of strings
    mess_strings.clear()

    # 2 - LOOP FOR BLOCK EXTRACTION and ANALYSIS -------------------------------------------
    print('\n\n')
    print(Bcolors.OKBLUE + '*** Start Structure Tensor Analysis... ' +
          Bcolors.ENDC)

    t_start = time.time()

    # create empty result matrix
    R, shape_R = create_R(shape_V, shape_P)

    # conduct analysis on input volume
    R, count = iterate_orientation_analysis(volume, R, parameters, shape_R,
                                            shape_P, _verbose)
    mess_strings.append('\n > Orientation analysis complete.')

    # retrieve information about the analyzed data
    block_with_cell = np.count_nonzero(R[Param.CELL_INFO])
    block_with_info = np.count_nonzero(R[Param.ORIENT_INFO])
    p_rejec_cell = 100 * (1 - (block_with_cell / count))
    p_rejec_info_tot = 100 * (1 - (block_with_info / count))
    p_rejec_info = 100 * (1 - (block_with_info / block_with_cell))

    # get analysis time
    t_process = time.time() - t_start

    # create result strings
    mess_strings.append('\n\n*** Results of Orientation analysis:')
    mess_strings.append(' > Expected iterations: {}'.format(np.prod(shape_R)))
    mess_strings.append(' > Total    iterations: {}'.format(count))
    mess_strings.append(' > Time elapsed:        {0:.3f} s'.format(t_process))
    mess_strings.append('\n > Total blocks analyzed: {}'.format(count))
    mess_strings.append(
        ' > Block with cell: {0}, rejected from total: {1} ({2:0.1f}%)'.format(
            block_with_cell, count - block_with_cell, p_rejec_cell))
    mess_strings.append(
        ' > Block with gradient information: {}'.format(block_with_info))
    mess_strings.append(
        ' > rejected from total:             {0} ({1:0.1f}%)'.format(
            count - block_with_info, p_rejec_info_tot))
    mess_strings.append(
        ' > rejected from block with cell:   {0} ({1:0.1f}%)'.format(
            block_with_cell - block_with_info, p_rejec_info))

    mess_strings.append(
        '\n > R matrix created ( shape: ({}, {}, {}) cells (zyx) )'.format(
            R.shape[0], R.shape[1], R.shape[2]))

    # print information to screen and add it to the report .txt file
    write_on_txt(mess_strings, txt_info_path, _print=True, mode='a')

    # clear list of strings
    mess_strings.clear()

    # 3 - DISARRAY AND FRACTIONAL ANISOTROPY ESTIMATION -------------------------------------

    # estimate local disarrays and fractional anisotropy, write estimated values also inside R
    mtrx_of_disarrays, mtrx_of_local_fa, shape_G, R = estimate_local_disarray(
        R, parameters, ev_index=2, _verb=_verbose, _verb_deep=_deep_verbose)

    # 4a - SAVE R ( updated with estimate_local_disarray() ) TO NUMPY FILE ------------------

    # create result matrix (R) filename:
    R_filename = 'R_' + stack_prefix + '_' + str(
        int(parameters['roi_xy_pix'] * parameters['px_size_xy'])) + 'um.npy'
    R_prefix = R_filename.split('.')[0]
    R_filepath = os.path.join(base_path, process_folder, R_filename)

    # save results to R.npy
    np.save(R_filepath, R)
    mess_strings.append('\n> R matrix saved to: {}'.format(
        os.path.dirname(source_path)))
    mess_strings.append('> with name: {}'.format(R_filename))
    mess_strings.append('\n> Information .txt file saved to: {}'.format(
        os.path.dirname(txt_info_path)))
    mess_strings.append('> with name: {}'.format(txt_info_filename))

    # print information to screen and add it to the report .txt file
    write_on_txt(mess_strings, txt_info_path, _print=True, mode='a')

    # clear list of strings
    mess_strings.clear()

    # 4b - SAVE DISARRAY TO NUMPY FILE AND COMPILE RESULTS TXT FILE -------------------------

    # save disarray matrices (computed with arithmetic and weighted means) to numpy file
    disarray_np_filename = dict()
    for mode in [att for att in vars(Mode) if str(att)[0] is not '_']:
        disarray_np_filename[getattr(Mode, mode)] = save_in_numpy_file(
            mtrx_of_disarrays[getattr(Mode, mode)],
            R_prefix,
            shape_G,
            parameters,
            base_path,
            process_folder,
            data_prefix='MatrixDisarray_{}_'.format(mode))

    # save fractional anisotropy to numpy file
    fa_np_filename = save_in_numpy_file(mtrx_of_local_fa,
                                        R_prefix,
                                        shape_G,
                                        parameters,
                                        base_path,
                                        process_folder,
                                        data_prefix='FA_local_')

    mess_strings.append(
        '\n> Disarray and Fractional Anisotropy matrices saved to:')
    mess_strings.append('> {}'.format(os.path.join(base_path, process_folder)))
    mess_strings.append('with name: \n > {}\n > {}\n > {}\n'.format(
        disarray_np_filename[Mode.ARITH], disarray_np_filename[Mode.WEIGHT],
        fa_np_filename))
    mess_strings.append('\n')

    # 5 - STATISTICAL ANALYSIS, HISTOGRAMS AND SAVINGS --------------------------------------

    # estimate statistics (see class Stat) of disarray and fractional anisotropy matrices
    disarray_ARITM_stats = statistics_base(mtrx_of_disarrays[Mode.ARITH],
                                           invalid_value=CONST.INV)
    disarray_WEIGHT_stats = statistics_base(mtrx_of_disarrays[Mode.WEIGHT],
                                            w=mtrx_of_local_fa,
                                            invalid_value=CONST.INV)

    fa_stats = statistics_base(mtrx_of_local_fa, invalid_value=CONST.INV)

    # compile/append strings of statistical results
    s1 = compile_results_strings(mtrx_of_disarrays[Mode.ARITH], 'Disarray',
                                 disarray_ARITM_stats, 'ARITH', '%')
    s2 = compile_results_strings(mtrx_of_disarrays[Mode.WEIGHT], 'Disarray',
                                 disarray_WEIGHT_stats, 'WEIGHT', '%')
    s3 = compile_results_strings(mtrx_of_local_fa, 'Fractional Anisotropy',
                                 fa_stats)
    disarray_and_fa_results_strings = s1 + ['\n\n\n'] + s2 + ['\n\n\n'] + s3

    # update mess strings
    mess_strings = mess_strings + disarray_and_fa_results_strings

    # create results .txt filename and path
    txt_results_filename = 'results_disarray_by_{}_G({},{},{})_limNeig{}.txt'.format(
        R_prefix, int(shape_G[0]), int(shape_G[1]), int(shape_G[2]),
        int(parameters['neighbours_lim']))

    # save to .csv
    if _save_csv:
        mess_strings.append('\n> CSV files saved to:')

        # save disarray and fractional anisotropy matrices to .csv file
        for (mtrx, np_fname) in zip([
                mtrx_of_disarrays[Mode.ARITH], mtrx_of_disarrays[Mode.WEIGHT],
                mtrx_of_local_fa
        ], [
                disarray_np_filename[Mode.ARITH],
                disarray_np_filename[Mode.WEIGHT], fa_np_filename
        ]):

            # extract only valid values (different from INV = -1)
            values = mtrx[mtrx != CONST.INV]

            # create .csv file path and save data
            csv_filename = np_fname.split('.')[0] + '.csv'
            csv_filepath = os.path.join(base_path, process_folder,
                                        csv_filename)
            np.savetxt(csv_filepath, values, delimiter=",", fmt='%f')
            mess_strings.append('> {}'.format(csv_filepath))

    # save histograms
    if _save_hist:
        mess_strings.append('\n> Histogram plots saved to:')

        # zip matrices, description and filenames
        for (mtrx, lbl, np_fname) in zip([
                mtrx_of_disarrays[Mode.ARITH], mtrx_of_disarrays[Mode.WEIGHT],
                mtrx_of_local_fa
        ], [
                'Local Disarray % (arithmetic mean)',
                'Local Disarray % (weighted mean)',
                'Local Fractional Anisotropy'
        ], [
                disarray_np_filename[Mode.ARITH],
                disarray_np_filename[Mode.WEIGHT], fa_np_filename
        ]):

            # extract only valid values (different from INV = -1)
            values = mtrx[mtrx != CONST.INV]

            # create file path
            hist_fname = '.'.join(np_fname.split('.')[:-1]) + '.tiff'
            hist_filepath = os.path.join(base_path, process_folder, hist_fname)

            # create histograms and save them to image files
            plot_histogram(values,
                           xlabel=lbl,
                           ylabel='Sub-volume occurrence',
                           filepath=hist_filepath)
            mess_strings.append('> {}'.format(hist_filepath))

    # save disarray and fa maps
    if _save_maps:
        mess_strings.append(
            '\n> Disarray and Fractional Anisotropy plots saved to:')

        # disarray value normalization:
        #  - in order to preserve the little differences between ARITM and WEIGH disarray matrices,
        #    these are normalized together
        #  - invalid values are NOT removed for preserving the original matrix (image) shape
        #  - invalid values (if present) are set to the minimum value
        abs_max = np.max([
            mtrx_of_disarrays[Mode.ARITH].max(),
            mtrx_of_disarrays[Mode.WEIGHT].max()
        ])
        abs_min = np.min([
            mtrx_of_disarrays[Mode.ARITH].min(),
            mtrx_of_disarrays[Mode.WEIGHT].min()
        ])
        dis_norm_A = 255 * ((mtrx_of_disarrays[Mode.ARITH] - abs_min) /
                            (abs_max - abs_min))
        dis_norm_W = 255 * ((mtrx_of_disarrays[Mode.WEIGHT] - abs_min) /
                            (abs_max - abs_min))

        # define destination folder
        dest_folder = os.path.join(base_path, process_folder)

        # create and save data frames (disarray and fractional anisotropy)
        for (mtrx,
             np_fname) in zip([dis_norm_A, dis_norm_W, mtrx_of_local_fa], [
                 disarray_np_filename[Mode.ARITH],
                 disarray_np_filename[Mode.WEIGHT], fa_np_filename
             ]):

            # plot frames and save them inside a sub_folder (folder_path)
            folder_path = plot_map_and_save(mtrx, np_fname, dest_folder,
                                            shape_G, shape_P)
            mess_strings.append('> {}'.format(folder_path))

    # print information to screen and add it to the results .txt file
    txt_results_filepath = os.path.join(base_path, process_folder,
                                        txt_results_filename)
    write_on_txt(mess_strings, txt_results_filepath, _print=True, mode='w')
コード例 #11
0
def main():
    args = parse_args()

    infile = InputFile(args.input_file)
    ashape = np.flipud(np.array(infile.shape))  # X, Y, Z order

    M_inv, final_shape = inv_matrix(
        shape=ashape,
        theta=args.theta,
        direction=args.direction,
        view=args.view,
        z=args.z,
        xy=args.xy
    )

    logger.info('input_shape: {}, output_shape: {}'
                .format(infile.shape, tuple(final_shape)))

    if os.path.exists(args.output_file):
        logger.warning('Output file {} already exists'.format(args.output_file))
        if not args.force:
            logger.error('(use -f to force)')
            return

    output_dir = os.path.dirname(args.output_file)
    if output_dir:
        os.makedirs(output_dir, exist_ok=True)

    total_byte_size = np.asscalar(np.prod(final_shape) * infile.dtype.itemsize)
    bigtiff = total_byte_size > 2 ** 31 - 1

    logger.info('loading {}'.format(args.input_file))

    a = infile.whole()

    threads = []

    if args.jp2ar_enabled:
        p = Path(args.output_file).with_suffix('.zip')
        logger.info('saving JP2000 ZIP archive to {}'.format(p))
        jp2ar_thread = threading.Thread(target=convert_to_jp2ar, kwargs=dict(
            input_data=a, output_dir=None, compression=args.jp2_compression,
            nthreads=args.nthreads, temp_dir=None, output_file=str(p)))
        jp2ar_thread.start()
        threads.append(jp2ar_thread)

    def worker():
        if args.slices is None:
            t = transform(a.T, M_inv, final_shape)  # X, Y, Z order
            logger.info('saving to {}'.format(args.output_file))
            tiff.imwrite(args.output_file, t.T, bigtiff=bigtiff)
            return

        if os.path.exists(args.output_file):
            os.remove(args.output_file)

        i = 0
        for t in sliced_transform(a, M_inv, final_shape, args.slices):
            i += 1
            logger.info('saving slice {}/{} to {}'.format(
                i, args.slices, args.output_file))

            t = t.T  # Z, Y, X order

            # add dummy color axis to trick imsave
            # (otherwise when size of Z is 3, it thinks it's an RGB image)
            t = t[:, np.newaxis, ...]
            tiff.imwrite(args.output_file, t, append=True, bigtiff=bigtiff)

    transform_thread = threading.Thread(target=worker)
    transform_thread.start()
    threads.append(transform_thread)

    for thread in threads:
        thread.join()
コード例 #12
0
def CLAHE(parser):
    print('** =========== **')
    print('** START CLAHE **')
    print('** =========== **')
    args = parser.parse_args()

    # extract data
    infile = InputFile(args.source)
    data = infile.whole()

    # sizes
    (num_of_slices, height, weight) = infile.shape
    ksize = nextpow2(weight / 8) if args.kernel == 0 else args.kernel

    # extract path and filename
    base_path = os.path.dirname(os.path.dirname(args.source))
    filename = os.path.splitext(os.path.basename(args.source))[0]

    # create destination paths where save result
    destination_path = os.path.join(base_path,
                                    'clahed_c{}_k{}'.format(args.clip, ksize))
    if not os.path.exists(destination_path):
        os.makedirs(destination_path)

    # print informations
    print('\n ** PATHS : ')
    print(' - source : {}'.format(args.source))
    print(' - output : {}'.format(destination_path))
    print('\n ** FILENAME: {}'.format(filename))
    print('\n ** CLAHE PARAMETERS : ')
    print(' - clip: {}'.format(args.clip))
    print(' - ksize: {}'.format(ksize))

    print('\n ** OUTPUT FORMAT:')
    if args.image_sequence:
        print(' - output is saved like a 2d tiff images sequence')
    else:
        print(' - output is saved ike a 3D tif files')

    # output array
    if not args.image_sequence:
        clahed = np.zeros_like(data)

    print()
    # Execution
    for z in range(num_of_slices):
        img = normalize(data[z, ...])
        img_eq = normalize(
            exposure.equalize_adapthist(image=img,
                                        kernel_size=ksize,
                                        clip_limit=args.clip))

        if args.image_sequence:
            img_name = create_img_name_from_index(z, post="_clahe")
            save_tiff(img=img_eq,
                      img_name=img_name,
                      prefix='',
                      comment='',
                      folder_path=destination_path)
            print(img_name)
        else:
            clahed[z, ...] = img_eq
            print('z = {}'.format(z))

    # save output
    if not args.image_sequence:
        save_tiff(clahed, os.path.join(destination_path, 'clahed'))
    print(' \n ** Process Finished \n')
コード例 #13
0
def main(parser):

    # read args from console
    args = parser.parse_args()

    # read source path (path of binary segmentation images)
    source_path = manage_path_argument(args.source_folder)

    # take base path and stack name
    base_path = os.path.dirname(os.path.dirname(source_path))
    stack_name = os.path.basename(source_path)

    # create path and folder where save section images
    sections_path = os.path.join(base_path, 'xz_sections', stack_name)
    filled_sections_path = os.path.join(base_path, 'xz_filled_sections', stack_name)
    convex_sections_path = os.path.join(base_path, 'xz_convex_sections', stack_name)
    for path in [sections_path, filled_sections_path, convex_sections_path]:
        if not os.path.exists(path):
            os.makedirs(path)

    # portion of y axes for section estimation
    y_start = np.uint16(args.y_start[0])
    y_stop = np.uint16(args.y_stop[0])

    # Def .txt filepath
    txt_parameters_path = os.path.join(base_path, 'parameters.txt')
    txt_results_path = os.path.join(base_path, 'Measure_analysis.txt')

    # SCRIPT ----------------------------------------------------------------------
    
    # print to video and write in results.txt init message
    init_message = [' ****  Script for Estimation of Real Myocardial fraction volume **** \n \n'
                    ' Source from path : {}'.format(base_path),
                    ' Stack : {}'.format(stack_name),
                    '\n\n *** Start processing... \n'
                    ]
    error_message = '\n *** ERROR *** : stack in this path is None'
    with open(txt_results_path, 'w') as f:
        for line in init_message:
            print(line)
            f.write(line+'\n')

    # reads parameters
    parameters = extract_parameters(txt_parameters_path)

    # measure units
    x_step = parameters['res_xy']  # micron
    y_step = parameters['res_xy']  # micron
    z_step = parameters['res_z']  # micron
    pixel_xz_in_micron2 = x_step * z_step  # micron^2
    voxel_in_micron3 = x_step * y_step * z_step  # micron^3

    # preferences
    _save_binary_sections = bool(parameters['save_binary_sections'])

    # load data
    print(' *** Start to load the Stack...')
    infile = InputFile(source_path)
    masks = infile.whole()

    # swap axis from ZYX to YXZ
    masks = np.moveaxis(masks, 0, -1)

    # check if it's a 3D or a 2D image (if only one frame, it's 2D and i add an empty axis
    if len(masks.shape) == 2:
        masks = np.expand_dims(masks, axis=2)  # add the zeta axis

    # count selected sections
    total_sections = masks.shape[0]  # row -> y -> number of sections
    print('\n Volume shape:', masks.shape)
    print('Number of total sections: ', total_sections)
    print('\n')

    # set y portion [optional]
    if y_start == y_start == 0:
        y_start = 0
        y_stop = total_sections - 1
        print(' *** ATTENTION : selected all the sections: {} -> {}'.format(y_start, y_stop))
    if y_stop < y_start:
        y_start = np.uint16(total_sections / 4)
        y_stop = np.uint16(total_sections * 3 / 4)
        print(' *** ATTENTION : y portion selected by DEFAULT: {} -> {}'.format(y_start, y_stop))

    # Every Section(y) is XZ projection of mask. Estimated Area is the sum of the Non_Zero pixel in the section image
    selected_sections = np.uint16(y_stop - y_start)
    sections_micron2 = np.zeros(selected_sections) # area in micron of every section
    print('Number of selected sections: ', y_stop-y_start)

    # Initializing to zero the Volume counters
    effective_myocites_volume = 0  # real estimated volume of myocites (sum of area of real cells in sections)
    filled_myocite_volume = 0  # filled tissue volume (sum of area of the sections with filled holes)
    global_tissue_volume = 0  # global tissue volume (sum of area of convex envelop of the sections)

    t_start = time.time()
    analyzed_section = 0  # counter, only for control the for loop (remove)

    with open(txt_results_path, 'a') as f:
        
        pre_info = list()
        pre_info.append('\nPortion of y selected: [{} -> {}]'.format(y_start, y_stop))
        pre_info.append('Option for save the sections images: {}'.format(_save_binary_sections))
        pre_info.append('\n')
        for l in pre_info:
            print(l)
            f.write(l+'\n')

        if masks is not None:

            print('\n... Estimation of mean section and Volume fraction of Myocardial Tissue...')
            for y in range(y_start, y_stop):

                # extract section
                section = masks[y, :, :]
                sec_name = create_img_name_from_index(total_sections - y - 1)  # img_name.tif

                # count pixels of real cardiomyocyte cells of current section
                pixels_with_cardiomyocyte = np.count_nonzero(section)
                effective_myocites_volume += pixels_with_cardiomyocyte

                # save original sections
                if _save_binary_sections:
                    # transform point of view and save
                    save_tiff(img=np.rot90(m=np.flipud(section), k=1, axes=(0, 1)),
                              img_name=sec_name, comment='section', folder_path=sections_path)

                # fill the section holes and set comment for tiff filenames (to save images)
                section = 255 * ndimage.morphology.binary_fill_holes(section).astype(np.uint8)
                # count cell pixel in the envelopped section
                pixels_with_filled_cell = np.count_nonzero(section.astype(bool))
                filled_myocite_volume += pixels_with_filled_cell

                if _save_binary_sections:
                    # transform point of view and save
                    save_tiff(img=np.rot90(m=np.flipud(section), k=1, axes=(0, 1)),
                              img_name=sec_name, comment='filled_section', folder_path=filled_sections_path)

                # create envelop (convex polygon) of section to estimate and set comment for tiff filenames
                section = 255 * convex_hull_image(np.ascontiguousarray(section)).astype(np.uint8)  # envelop
                if _save_binary_sections:
                    # transform point of view and save
                    save_tiff(img=np.rot90(m=np.flipud(section), k=1, axes=(0, 1)),
                              img_name=sec_name, comment='convex_section', folder_path=convex_sections_path)

                # count cell pixel in the enveloped section
                pixels_with_generic_cell = np.count_nonzero(section.astype(bool))
                global_tissue_volume += pixels_with_generic_cell

                # estimate area of this section
                if pixels_with_cardiomyocyte > 0:
                    real_area_in_micron2 = pixels_with_cardiomyocyte * pixel_xz_in_micron2
                    filled_area_in_micron2 = pixels_with_filled_cell * pixel_xz_in_micron2
                    global_area_in_micron2 = pixels_with_generic_cell * pixel_xz_in_micron2

                    # save in the section area list
                    sections_micron2[y - y_start] = real_area_in_micron2

                    # create string messages
                    measure = bcolors.OKBLUE + '{}'.format(os.path.basename(base_path)) + bcolors.ENDC + \
                              ' - {} ->'.format(sec_name) + \
                              'real: {0:3.1f} um^2 - filled: {1:3.1f} um^2 - convex: {2:3.1f}'.\
                                  format(real_area_in_micron2, filled_area_in_micron2, global_area_in_micron2)

                else:
                    measure = ' - {} is empty'.format(sec_name)

                analyzed_section += 1
                print(measure)
                # f.write(measure+'\n')

            # execution time
            (h, m, s) = seconds_to_min_sec(time.time() - t_start)

            # percentage of cardiomyocyte volumes
            perc_fill = 100 * effective_myocites_volume / filled_myocite_volume
            perc_env = 100 * effective_myocites_volume / global_tissue_volume

            # volumes in micron^3
            effective_volume_in_micron3 = effective_myocites_volume * voxel_in_micron3
            filled_volume_in_micron3 = filled_myocite_volume * voxel_in_micron3
            global_tissue_volume_in_micron3 = global_tissue_volume * voxel_in_micron3

            # count empty sections
            sections_with_cell = np.count_nonzero(sections_micron2)
            empties = selected_sections - sections_with_cell

            # Mean sections:   
            mean_section = np.sum(sections_micron2) / sections_with_cell  # (original images)

            # create results string
            result_message = list()
            result_message.append('\n ***  Process successfully completed, time of execution: {0:2d}h {1:2d}m {2:2d}s \n'.format(int(h), int(m), int(s)))
            result_message.append(' Total number of frames: {}'.format(masks.shape[2]))
            result_message.append(' Total sections: {}'.format(total_sections))
            result_message.append(' Selected sections: {}'.format(selected_sections))
            result_message.append(' Effective analyzed sections: {}'.format(analyzed_section))
            result_message.append(' Number of empty section: {}'.format(empties))
            result_message.append(' Number of section with cells: {}'.format(sections_with_cell))
            result_message.append('\n')
            result_message.append(' Mean sections: {0:.3f} um^2'.format(mean_section))
            result_message.append('\n')
            result_message.append(' Myocardium volume : {0:.6f} mm^3'.format(effective_volume_in_micron3 / 10 ** 9))
            result_message.append(' Filled volume : {0:.6f} mm^3'.format(filled_volume_in_micron3 / 10 ** 9))
            result_message.append(' Global volume : {0:.6f} mm^3'.format(global_tissue_volume_in_micron3 / 10 ** 9))
            result_message.append(' Percentage of myocardium tissue filled: {}%'.format(perc_fill))
            result_message.append(' Percentage of myocardium tissue enveloped: {}%'.format(perc_env))

            result_message.append('\n')
            result_message.append(' \n OUTPUT SAVED IN: \n')
            result_message.append(txt_results_path)

            # write and print results
            for l in result_message:
                print(l)
                f.write(l+'\n')

        else:
            print(error_message)
            f.write(error_message)

        print(' \n \n \n ')
コード例 #14
0
ファイル: st_analysis.py プロジェクト: ghianda/whole_heart
def main(parser):

    args = parser.parse_args()

    # Extract input information
    source_path = manage_path_argument(args.source_path)
    parameter_filename = args.parameters_filename[0]

    # extract filenames and folders
    stack_name = os.path.basename(source_path)
    process_folder = os.path.basename(os.path.dirname(source_path))
    base_path = os.path.dirname(os.path.dirname(source_path))
    parameter_filepath = os.path.join(base_path, process_folder,
                                      parameter_filename)
    stack_prefix = stack_name.split('.')[0]

    # extract other preferences
    _verbose = args.verbose
    _save_csv = args.csv

    # create sointroductiveme informations
    mess_strings = list()
    mess_strings.append('\n\n*** ST orientation Analysis ***\n')
    mess_strings.append(' > source path: {}'.format(source_path))
    mess_strings.append(' > stack name: {}'.format(stack_name))
    mess_strings.append(' > process folder: {}'.format(process_folder))
    mess_strings.append(' > base path: {}'.format(base_path))
    mess_strings.append(' > Parameter filename: {}'.format(parameter_filename))
    mess_strings.append(' > Parameter filepath: {}'.format(parameter_filepath))
    mess_strings.append('')

    # TODO here added local_disarray_z_side and local_disarray_xy_side
    # extract parameters
    param_names = [
        'roi_xy_pix', 'px_size_xy', 'px_size_z', 'mode_ratio',
        'threshold_on_cell_ratio', 'local_disarray_xy_side',
        'local_disarray_z_side', 'neighbours_lim', 'fwhm_xy', 'fwhm_z'
    ]

    param_values = search_value_in_txt(parameter_filepath, param_names)

    # create dictionary of parameters
    parameters = {}
    mess_strings.append('\n\n*** Parameters used:')
    mess_strings.append(
        ' > Parameters extracted from {}\n'.format(parameter_filename))
    for i, p_name in enumerate(param_names):
        parameters[p_name] = float(param_values[i])
        mess_strings.append('> {} - {}'.format(p_name, parameters[p_name]))

    # Parameters of Acquisition System:
    # ratio between pixel size in z and xy
    ps_ratio = parameters['px_size_z'] / parameters['px_size_xy']

    # analysis block dimension in z-axis
    num_of_slices_P = int(parameters['roi_xy_pix'] / ps_ratio)

    row_P = col_P = int(parameters['roi_xy_pix'])
    shape_P = np.array((row_P, col_P, num_of_slices_P)).astype(np.int32)

    mess_strings.append('\n *** Analysis configuration')
    mess_strings.append(
        ' > Rapporto fra Pixel Size (z / xy) = {0:0.2f}'.format(ps_ratio))
    mess_strings.append(
        ' > Numero di slice selezionate per ogni ROI ({} x {}): {}'.format(
            row_P, col_P, num_of_slices_P))
    mess_strings.append(
        ' > Dimension of Parallelepiped: ({0},{1},{2}) pixel  ='
        '  [{3:2.2f} {4:2.2f} {5:2.2f}] um'.format(
            shape_P[0], shape_P[1], shape_P[2],
            row_P * parameters['px_size_xy'], col_P * parameters['px_size_xy'],
            num_of_slices_P * parameters['px_size_z']))

    # create result.txt filename:
    txt_filename = 'Orientations_' + stack_prefix + '_' \
                   + str(int(parameters['roi_xy_pix'] * parameters['px_size_xy'])) + 'um.txt'
    txt_path = os.path.join(os.path.dirname(source_path), txt_filename)

    # print and write into .txt introductive informations
    write_on_txt(mess_strings, txt_path, _print=True)
    # clear list of strings
    mess_strings.clear()

    # 1 ----------------------------------------------------------------------------------------------------
    # OPEN STACK

    # extract data - entire Volume: 'V'
    volume = InputFile(source_path).whole()
    # NB - in futuro va cambiata gestion assi
    volume = np.moveaxis(volume, 0, -1)  # (r, c, z) -> (z, y, x)

    # calculate dimension
    shape_V = np.array(volume.shape)
    pixel_for_slice = shape_V[0] * shape_V[1]
    total_voxel_V = pixel_for_slice * shape_V[2]

    mess_strings.append('\n\n*** Entire loaded Volume dimension:')
    mess_strings.append(' > Dimension if entire Volume : ({}, {}, {})'.format(
        shape_V[0], shape_V[1], shape_V[2]))
    mess_strings.append(
        ' > Pixel for slice            : {}'.format(pixel_for_slice))
    mess_strings.append(
        ' > Total voxel in Volume      : {}'.format(total_voxel_V))

    # extract list of math informations (as strings) about volume.npy variable
    info = print_info(volume,
                      text='\nVolume informations:',
                      _std=False,
                      _return=True)
    mess_strings = mess_strings + info

    # print and write into .txt
    write_on_txt(mess_strings, txt_path, _print=True)
    # clear list of strings
    mess_strings.clear()

    # 2 ----------------------------------------------------------------------------------------------------
    # CYCLE FOR BLOCKS EXTRACTION and ANALYSIS
    print('\n\n')
    print('*** Start Structure Tensor analysis... ')

    t_start = time.time()

    # create empty Result matrix
    R, shape_R = create_R(shape_V, shape_P)

    # estimate sigma of blurring for isotropic resolution
    sigma_blur = sigma_for_uniform_resolution(
        FWHM_xy=parameters['fwhm_xy'],
        FWHM_z=parameters['fwhm_z'],
        px_size_xy=parameters['px_size_xy'])
    perc = 0
    count = 0  # count iteration
    tot = np.prod(shape_R)
    print(' > Expected iterations : ', tot)

    for z in range(shape_R[2]):
        if _verbose: print('\n\n')
        print('{0:0.1f} % - z: {1:3}'.format(perc, z))
        for r in range(shape_R[0]):
            for c in range(shape_R[1]):

                start_coord = create_coord_by_iter(r, c, z, shape_P)
                slice_coord = create_slice_coordinate(start_coord, shape_P)

                perc = 100 * (count / tot)
                if _verbose: print('\n')

                # save init info in R
                R[r, c, z]['id_block'] = count
                R[r, c, z]['init_coord'] = start_coord

                # extract parallelepiped
                parall = volume[slice_coord]

                # check dimension (if iteration is on border of volume, add zero_pad)
                parall = pad_dimension(parall, shape_P)

                # If it's not all black...
                if np.max(parall) != 0:

                    # analysis of parallelepiped extracted
                    there_is_cell, there_is_info, results = block_analysis(
                        parall, shape_P, parameters, sigma_blur, _verbose)

                    # save info in R[r, c, z]
                    if there_is_cell: R[r, c, z]['cell_info'] = True
                    if there_is_info: R[r, c, z]['orient_info'] = True

                    # save results in R
                    if _verbose: print(' saved in R:  ')
                    for key in results.keys():
                        R[r, c, z][key] = results[key]
                        if _verbose:
                            print(' > {} : {}'.format(key, R[r, c, z][key]))

                else:
                    if _verbose: print('   block rejected   ')
                    print()

                count += 1

    block_with_cell = np.count_nonzero(R['cell_info'])
    block_with_info = np.count_nonzero(R['orient_info'])
    p_rejec_cell = 100 * (1 - (block_with_cell / count))
    p_rejec_info_tot = 100 * (1 - (block_with_info / count))
    p_rejec_info = 100 * (1 - (block_with_info / block_with_cell))

    t_process = time.time() - t_start

    mess_strings.append('\n\n*** Results of Orientation analysis:')
    mess_strings.append(' > Expected iterations : {}'.format(np.prod(shape_R)))
    mess_strings.append(' > total_ iteration : {}'.format(count))
    mess_strings.append(' > Time elapsed: {0:.3f} s'.format(t_process))
    mess_strings.append('\n > Total blocks: {}'.format(count))
    mess_strings.append(
        ' > block with cell : {0}, rejected from total: {1} ({2:0.1f}%)'.
        format(block_with_cell, count - block_with_cell, p_rejec_cell))
    mess_strings.append(
        ' > block with gradient information : {}'.format(block_with_info))
    mess_strings.append(' > rejected from total: {0} ({1:0.1f}%)'.format(
        count - block_with_info, p_rejec_info_tot))
    mess_strings.append(
        ' > rejected from block with cell: {0} ({1:0.1f}%)'.format(
            block_with_cell - block_with_info, p_rejec_info))

    # print and write into .txt
    write_on_txt(mess_strings, txt_path, _print=True)
    # clear list of strings
    mess_strings.clear()

    # 3 ----------------------------------------------------------------------------------------------------
    # Disarray estimation

    # the function estimate local disarrays and write these values also inside R
    matrix_of_disarrays, shape_G, R = estimate_local_disarry(R,
                                                             parameters,
                                                             ev_index=2,
                                                             _verb=True,
                                                             _verb_deep=False)

    # extract only valid disarray values
    disarray_values = matrix_of_disarrays[matrix_of_disarrays != -1]

    # 4 ----------------------------------------------------------------------------------------------------
    # WRITE RESULTS AND SAVE

    # create result matrix (R) filename:
    R_filename = 'R_' + stack_prefix + '_' + str(
        int(parameters['roi_xy_pix'] * parameters['px_size_xy'])) + 'um.npy'
    R_prefix = R_filename.split('.')[0]
    R_filepath = os.path.join(base_path, process_folder, R_filename)

    # Save Results in R.npy
    np.save(R_filepath, R)
    mess_strings.append('\n > R matrix saved in: {}'.format(
        os.path.dirname(source_path)))
    mess_strings.append(' > with name: {}'.format(R_filename))

    mess_strings.append('\n > Results .txt file saved in: {}'.format(
        os.path.dirname(txt_path)))
    mess_strings.append(' > with name: {}'.format(txt_filename))

    # create filename of numpy.file where save disarray matrix
    disarray_numpy_filename = 'MatrixDisarray_{}_G({},{},{})_limNeig{}.npy'.format(
        R_prefix, int(shape_G[0]), int(shape_G[1]), int(shape_G[2]),
        int(parameters['neighbours_lim']))

    mess_strings.append('\n> Matrix of Disarray saved in:')
    mess_strings.append(os.path.join(base_path, process_folder))
    mess_strings.append(' > with name: \n{}'.format(disarray_numpy_filename))

    # save numpy file
    np.save(os.path.join(base_path, process_folder, disarray_numpy_filename),
            matrix_of_disarrays)

    # create results strings
    mess_strings.append(
        '\n\n*** Results of statistical analysis of Disarray on accepted points. \n'
    )
    mess_strings.append('> Disarray (%):= 100 * (1 - alignment)\n')
    mess_strings.append('> Matrix of disarray shape: {}'.format(
        matrix_of_disarrays.shape))
    mess_strings.append('> Valid disarray values: {}'.format(
        disarray_values.shape))
    mess_strings.append('\n> Disarray mean: {0:0.2f}%'.format(
        np.mean(disarray_values)))
    mess_strings.append('> Disarray std: {0:0.2f}% '.format(
        np.std(disarray_values)))
    mess_strings.append('> Disarray (min, MAX)%: ({0:0.2f}, {1:0.2f})'.format(
        np.min(disarray_values), np.max(disarray_values)))

    # create results.txt filename and filepath
    disarray_results_filename = 'results_disarray_by_{}_G({},{},{})_limNeig{}.txt'.format(
        R_prefix, int(shape_G[0]), int(shape_G[1]), int(shape_G[2]),
        int(parameters['neighbours_lim']))

    disarray_txt_filepath = os.path.join(base_path, process_folder,
                                         disarray_results_filename)

    if _save_csv:
        disarray_csv_filename = disarray_results_filename.split(
            '.')[0] + '.csv'
        np.savetxt(os.path.join(base_path, process_folder,
                                disarray_csv_filename),
                   disarray_values,
                   delimiter=",",
                   fmt='%f')

    # print and write into .txt
    write_on_txt(mess_strings, disarray_txt_filepath, _print=True)