Example #1
0
def UNETPrediction(filesRaw,
                   model,
                   Savedir,
                   min_size,
                   n_tiles,
                   axis,
                   show_after=1):

    count = 0
    for fname in filesRaw:
        count = count + 1
        print('Applying UNET prediction')
        Name = os.path.basename(os.path.splitext(fname)[0])
        image = imread(fname)

        Segmented = model.predict(image, axis, n_tiles=n_tiles)
        thresh = threshold_otsu(Segmented)
        Binary = Segmented > thresh

        #Postprocessing steps
        Filled = binary_fill_holes(Binary)
        Finalimage = label(Filled)
        Finalimage = fill_label_holes(Finalimage)

        Finalimage = relabel_sequential(Finalimage)[0]

        if count % show_after == 0:
            doubleplot(image, Finalimage, "Original", "Segmentation")
        imwrite(Savedir + Name + '.tif', Finalimage.astype('uint16'))

    return Finalimage
Example #2
0
def tiff_encode(data, level=1, **kwargs):
    """Encode TIFF."""
    with io.BytesIO() as fh:
        tifffile.imwrite(fh, data, **kwargs)
        fh.seek(0)
        out = fh.read()
    return out
Example #3
0
 def write_tile(
     pixels, resolution, x_start, y_start, tile_width, tile_height,
     filename
 ):
     x_end = x_start + tile_width
     y_end = y_start + tile_height
     try:
         if self.file_type in ("n5", "zarr"):
             # Special case for N5/Zarr which has a single n-dimensional
             # array representation on disk
             pixels = self.make_planar(pixels, tile_width, tile_height)
             z = zarr.open(filename)[str(resolution)]
             z[:, y_start:y_end, x_start:x_end] = pixels
         elif self.file_type == 'tiff':
             # Special case for TIFF to save in planar mode using
             # deinterleaving and the tifffile library; planar data
             # is much more performant with the Bio-Formats API
             pixels = self.make_planar(pixels, tile_width, tile_height)
             with open(filename, 'wb') as destination:
                 imwrite(destination, pixels, planarconfig='SEPARATE')
         else:
             with Image.frombuffer(
                 'RGB', (int(tile_width), int(tile_height)),
                 pixels, 'raw', 'RGB', 0, 1
             ) as source, open(filename, 'wb') as destination:
                 source.save(destination)
     except Exception:
         import traceback
         traceback.print_exc()
         print(
             "Failed to write tile [:, %d:%d, %d:%d] to %s" % (
                 x_start, x_end, y_start, y_end, filename
             )
         )
Example #4
0
def two_channel_pipeline(
    ch1: np.ndarray,
    ch1_fname: pathlib.Path,
    ch2: np.ndarray,
    ch2_fname: pathlib.Path,
    fig,
):
    new_fname = ch1_fname.parent / (
        "combined_" + ch1_fname.stem + "_" + ch2_fname.stem + ".tif"
    )
    roi_fname = str(
        new_fname.parent
        / ("only_roi_" + ch1_fname.stem + "_" + ch2_fname.stem + ".tif")
    )
    new_fname = str(new_fname)
    tifffile.imwrite(new_fname, np.stack([ch1, ch2]))
    # ch1 -= ch1.min()
    # ch2 -= ch2.min()
    vmin1, vmax1 = ch1.min() * 1.1, ch1.max() * 0.9
    vmin2, vmax2 = ch2.min() * 1.1, ch2.max() * 0.9
    fig.axes[0].images.pop()
    fig.axes[0].imshow(ch1, cmap=cc.cm.kgy, vmin=vmin1, vmax=vmax1)
    fig.axes[0].imshow(ch2, cmap=cc.cm.kr, alpha=0.55, vmin=vmin2, vmax=vmax2)
    fig.axes[0].set_title("Ch1 is green, Ch2 is red")
    fig.canvas.set_window_title(f"{new_fname}")
    plt.show(block=False)
    return roi_fname
Example #5
0
    def convert_series(self,
                       series,
                       return_metadata=True,
                       channels="all",
                       outpath=None,
                       lif=None):
        if channels == "all":
            channels = range(len(series.getChannels()))
        if lif == None:
            basepath = os.curdir
            filename = "converted"
        else:
            basepath, filename = os.path.split(lif)
        if outpath == None:
            outpath = basepath
#
        metadata = series.getMetadata()
        name = series.getName()
        frame = series.getFrame(channel=channels)
        savename = os.path.join(outpath, filename) + name + ".tif"
        tifffile.imwrite(savename, frame)

        metadata["filename"] = savename

        return (metadata)
Example #6
0
def create_ground_truth(root_dir, target_dir, target_dir_w_m,
                        sigma=3.5, truncate=1.5,
                        flip=True,
                        visualize=False):
    #flip = False
    print flip
    for _csv in os.listdir(root_dir):
        gt_img, gt_seed = gt_builder_from_gaussian_filter(os.path.join(root_dir, _csv),
                                                 sigma, truncate,
                                                 flip)
        file_name = _csv.split("-")[0]

        print file_name

        torch_gt = torch.from_numpy(gt_img.astype('float16'))
        torch_seed = torch.from_numpy  (gt_seed.astype('float16'))
        # with open(os.path.join(target_dir, file_name + ".pkl"), 'wb') as f:
        #     pickle.dump(torch_gt, f)

        torch.save(torch_gt,
                   os.path.join(target_dir, file_name + "-GT.pth"))
        torch.save(torch_seed,
                   os.path.join(target_dir_w_m, file_name + "_weighted_map.pth"))
        if visualize:
            tifffile.imwrite(os.path.join(target_dir, file_name + ".tif"),
                             gt_img, photometric='minisblack')

            tifffile.imwrite(os.path.join(target_dir, file_name + "_weighted_map.tiff"),
                             gt_seed, photometric = 'minisblack')
Example #7
0
def array2tiff(data, fname, pxsize=1, dim="yxz", transpose3=True):
    """
    Write 2D or 3D array to tiff image file
    ===========================================================================
    Input       Meaning
    ---------------------------------------------------------------------------
    data        2D or 3D array with data (integer numbers int16)
                    order: TZCYXS
                    with    t   time
                            c   channel
    fname       Name of the file to write to
    pxsize      Pixel size [µm]
    dim         String with dimensions in image
                    e.g. z stack of planar images: dim = "yxz"
                    The order must be "tzcyxs". The same order must be used
                    for data
                    E.g. for a xy time series: dim="tyx" and 'data' is a 3D
                    array with time, y, and x as 1st, 2nd, and 3r dimension
                    The only exception is that for a 3D array also "yxz" is ok
                    in combination with transpose3=True
                    (which moves the 3rd dimension to the first to correct the
                     order)
    ===========================================================================
    Output      Meaning
    ---------------------------------------------------------------------------
    tiff image
    ===========================================================================
    """

    # check file extension
    fname = checkfname(fname, "tiff")

    # check number of images in data
    ndim = data.ndim
    if ndim >= 3 and transpose3:
        # transpose data to make 3rd dimension first
        data = np.transpose(data, (2, 0, 1))

    # order of dimensions is now TZCYXS
    dimAll = "tzcyxs"
    N = [1, 1, 1, 1, 1, 1]
    d = 0
    Ishape = np.shape(data)
    for i in range(6):
        if dimAll[i] in dim:
            N[i] = Ishape[d]
            d += 1

    data.shape = N  # dimensions in TZCYXS order
    data = data.astype('int16')
    imwrite(fname,
            data,
            imagej=True,
            resolution=(1. / pxsize, 1. / pxsize),
            metadata={'unit': 'um'})

    # add every image to same tiff file
    #imsave(fname, data)

    print("Done.")
Example #8
0
 def save_data(self,
               data,
               Dataset='Dataset',
               Position='Position',
               Hybe='Hybe',
               Channel='Channel',
               Zindex='Zindex',
               Type='Type'):
     if Type in ['image', 'stack']:
         File_Name = str(Dataset) + '_' + str(Position) + '_' + str(
             Hybe) + '_' + str(Channel) + '_' + str(Zindex) + '_' + str(
                 Type) + '.tif'
         tifffile.imwrite(os.path.join(self.utilities_path, File_Name),
                          data=data)
     elif Type in ['flag', 'log']:
         File_Name = str(Dataset) + '_' + str(Position) + '_' + str(
             Hybe) + '_' + str(Channel) + '_' + str(Zindex) + '_' + str(
                 Type) + '.csv'
         fname = os.path.join(self.utilities_path, File_Name)
         with open(fname, "w+") as f:
             f.write(str(data))
             f.close()
     else:
         File_Name = str(Dataset) + '_' + str(Position) + '_' + str(
             Hybe) + '_' + str(Channel) + '_' + str(Zindex) + '_' + str(
                 Type) + '.pkl'
         pickle.dump(
             data, open(os.path.join(self.utilities_path, File_Name), 'wb'))
def main(folder):
    """
    converts arbitrary formated tif file to 8bits
    """
    try:
        import tifffile as tif
    except ImportError:
        print('[ERROR] no module named tifffile found, please install it')
        return

    for file in listdir(folder):
        image = f'{folder}\\{file}'

        # check if image if a tif
        if image[-3:] not in ('tif', 'tiff'):
            print(f"[Warning] {image} is not a tif.")
            continue

        # load and convert image
        imageArray = tif.imread(image)
        converted = np.around(imageArray).astype('int8')

        # getting the date
        date = datetime.now()
        year, month, day = date.year, date.month, date.day

        # getting the new name
        name = image[:-4]

        new_name = f'{name}_{month:02}{day:02}{year}.tif'
        tif.imwrite(new_name, converted)

        print(f"[INFO] {file}: conversion to 8bits was successful")
    def run(self):
        fname_base = Path(self.args['output_dir'])
        fnames = []
        for i in range(10):
            data = np.ones((100, 32, 32), 'int16') * i
            fnames.append(fname_base / f"my_tiff_{i}.tif")
            tifffile.imwrite(fnames[-1], data)

        ops_path = Path(self.args['output_dir']) / "ops.npy"
        ops_keys = [
            "Lx", "Ly", "nframes", "xrange", "yrange", "xoff", "yoff",
            "corrXY", "meanImg"
        ]

        if self.mock_ops_data is None:
            ops_dict = {k: 0 for k in ops_keys}
        else:
            ops_dict = self.mock_ops_data
        self.logger.info(f"Saving ops_dict with: {ops_dict}")

        np.save(ops_path, ops_dict)
        outj = {
            'output_files': {
                'ops.npy': [str(ops_path)],
                '*.tif': [str(i) for i in fnames]
            }
        }
        self.output(outj)
Example #11
0
def save_data(path_to_final, final, header=None, final_image_type=None, compress=True):
    if final_image_type == None:
        final_image_type = os.path.splitext(path_to_final)[1]
        if final_image_type == '.gz':
            final_image_type = '.nii.gz'
    if final_image_type == '.am':
        np_to_amira(path_to_final, [final], header)
    elif final_image_type in ['.hdr', '.mhd', '.mha', '.nrrd', '.nii', '.nii.gz']:
        final = np.swapaxes(final, 0, 2)
        save(final, path_to_final, header)
    elif final_image_type == '.zip':
        header, file_names, final_dtype = header[0], header[1], header[2]
        final = final.astype(final_dtype)
        final = np.swapaxes(final, 2, 1)
        filename, _ = os.path.splitext(path_to_final)
        os.makedirs(filename)
        os.chmod(filename, 0o777)
        for k, file in enumerate(file_names):
            save(final[k], filename + '/' + os.path.basename(file), header[k])
        with zipfile.ZipFile(path_to_final, 'w') as zip:
            for file in file_names:
                zip.write(filename + '/' + os.path.basename(file), os.path.basename(file))
    else:
        imageSize = int(final.nbytes * 10e-7)
        bigtiff = True if imageSize > 2000 else False
        try:
            compress = 'zlib' if compress else None
            imwrite(path_to_final, final, bigtiff=bigtiff, compression=compress)
        except:
            compress = 6 if compress else 0
            imwrite(path_to_final, final, bigtiff=bigtiff, compress=compress)
Example #12
0
def make_stacks(input_image_directory, file_list, input_ranges):
    # Getting the short name of the directory (should usually be a date)
    short_dir_name = os.path.basename(os.path.normpath(input_image_directory))

    # Making a directory for the stack output inside of the input directory
    directory_path = input_image_directory + short_dir_name + '_stacks/'
    try:
        os.mkdir(directory_path)
    except OSError:
        print("...stacks directory exists")

    for x in input_ranges:
        trial_name = x[0]
        trial_range = x[1]

        print("\n...building stack: " + trial_name)
        files_in_trial = file_list[trial_range]
        print("...images in stack:\n" + "\n".join(files_in_trial))

        # Reading in the images
        trial_images = tiff.imread(files_in_trial)

        # Writing out the images in a stack
        tiff.imwrite(input_image_directory + short_dir_name + '_stacks/' +
                     short_dir_name + '_' + trial_name + ".tif", trial_images)
        print("...stack write complete\n")
Example #13
0
 def transform_3D(self):
     root_path = '3D/test'
     root_save_path = 'valDataset/3D'
     file_path = os.listdir(root_path)[5:10]
     function = [
         lambda x: self.Rotation(x), lambda x, rows=None, cols=None: self.
         Shift(x, rows=rows, cols=cols), lambda x: self.Tilt(x),
         lambda x, rows=None, cols=None: self.Stain(
             x, rows=rows, cols=cols), lambda x: self.Contrast(x)
     ]
     class_name = ["Rotation", "Shift", "Tilt", "Stain", "Contrast"]
     for i in range(len(file_path)):
         f = function[i % len(function)]
         name = class_name[i % len(class_name)]
         file_name = file_path[i]
         path = os.path.join(root_path, file_name)
         scalar = tifffile.imread(path)
         # print(scalar.dtype)
         # scalar = (scalar * 255).astype(np.uint8)
         os.makedirs(os.path.join(root_save_path, name), exist_ok=True)
         scalars, params = f(scalar)
         for i in range(len(scalars)):
             image_aug = scalars[i]
             f_name = "{}_{}{}.tif".format(file_name[:-4], name, params[i])
             save_path = os.path.join(root_save_path, name, f_name)
             tifffile.imwrite(save_path, image_aug)
Example #14
0
def Label_counter(filesRaw, ProbabilityThreshold, Resultdir, min_size=10):

    AllCount = []
    AllName = []
    for fname in filesRaw:
        Name = os.path.basename(os.path.splitext(fname)[0])
        TwoChannel = imread(fname)
        SpotChannel = TwoChannel[:, 0, :, :]
        Binary = SpotChannel > ProbabilityThreshold
        Binary = remove_small_objects(Binary, min_size=min_size)
        Integer = label(Binary)
        waterproperties = measure.regionprops(Integer, Integer)
        labels = []
        for prop in waterproperties:
            if prop.label > 0:

                labels.append(prop.label)
        count = len(labels)
        imwrite(Resultdir + Name + '.tif', Integer.astype('uint16'))
        AllName.append(Name)
        AllCount.append(count)

    df = pd.DataFrame(list(zip(AllCount)), index=AllName, columns=['Count'])

    df.to_csv(Resultdir + '/' + 'CountMasks' + '.csv')
    df
Example #15
0
def exportTiff(normalizedArray, file):
    try:
        import tifffile as tiff  # optional dependency
        tiff.imwrite(file, normalizedArray)
    except ImportError:
        print('tifffile required for tiff export')
        raise
Example #16
0
def inferdir(dirpath,
             modelfile,
             savedir,
             globpattern='*.tif',
             device='cuda',
             size=(400, 400),
             axis=(-2, -1),
             probability=0.9):

    if not dirpath.endswith('/'):
        dirpath += '/'
    if not savedir.endswith('/'):
        savedir += '/'
    tm = train.get_model(2)
    tm.load_state_dict(torch.load(modelfile))
    files = sorted(glob.glob(dirpath + globpattern))

    cnn = predict(tm, size=size, probability=probability)
    for f in files:
        x = tifffile.imread(f)
        r, bx = cnn(x)
        bn = os.path.basename(f)
        bn = bn[:-3]
        rfile = savedir + "inferred_" + bn + "tif"
        tifffile.imwrite(rfile, r.astype(np.float32))
        bxname = savedir + "inferred_" + bn + "pkl"
        with open(bxname, 'wb') as p:
            pickle.dump(bx, p)
Example #17
0
def save_registered_stack(imstack,fout,crop=True):
    """
    Saves imstack.registered_stack as bigtiff, appending to fout using tifffile
    Inputs:
        fout    str     path to output filename.
                        Appends to existing file if present
    """

    if splitext(fout)[1]=='.tif':
        filepath=fout
    else:
        filepath=fout+'.tif'
    '''if isfile(filepath):
        raise IOError('File already exists') 
    with tifffile.TiffWriter(filepath, bigtiff=False, append=True) as tif:
        for img_slice in range(0,np.size(imstack.stack_registered,2)):
            if crop:
                tif.save(np.float32(imstack.stack_registered[imstack.xmin:imstack.xmax,imstack.ymin:imstack.ymax,img_slice])) 
            else:
                tif.save(np.float32(imstack.stack_registered[:,:,img_slice]))
    '''
    if crop:
        tifffile.imwrite(filepath,np.float32(np.rollaxis(imstack.stack_registered[imstack.xmin:imstack.xmax,imstack.ymin:imstack.ymax,:],2,0)))
    else:
        tifffile.imwrite(filepath,np.float32(np.rollaxis(imstack.stack_registered,2,0)))
    return
Example #18
0
def write_ometiff(output_path, array, omexml_string=None, compression=None):
    """
    Write the given 5D array as an ome.tiff

    Parameters
    ----------
    output_path : str
        Path where to save the ome.tiff
    array : np.ndarray
        5D array containing ome.tiff data, order should be TZCYX
    omexml_string : Optional[encoded xml]
        encoded XML Metadata, will be generated if not provided.
    compression : str
        possible values listed here: 
        https://github.com/cgohlke/tifffile/blob/f55fc8a49c2ad30697a6b1760d5a325533574ad8/tifffile/tifffile.py#L12131 
    """
    if omexml_string is None:
        omexml_string = gen_xml(array)

    if sys.version < "3.7":
        tifffile.imwrite(output_path,
                         array,
                         photometric="minisblack",
                         description=omexml_string,
                         metadata=None,
                         compress=compression)
    else:
        tifffile.imwrite(output_path,
                         array,
                         photometric="minisblack",
                         description=omexml_string,
                         metadata=None,
                         compression=compression)
 def save_slice(viewer):
     t, z = int(viewer.dims.point[0]), int(viewer.dims.point[1])
     # Save the human-annotated labels from the current slice
     filename = human_labels_dir / ('t%06i_z%06i.tif' % (t, z))
     x = data_with_labels[t, z, :-2, :, :]
     print("Saving", x.shape, x.dtype, "as", filename)
     ranges = []
     for ch in x:  # Autoscale each channel
         ranges.extend((ch.min(), ch.max()))
     imwrite(filename, x, imagej=True, ijmetadata={'Ranges': tuple(ranges)})
     # Also force the random-forest annotated labels to agree with
     # the human labels, and resave them to disk.
     filename = rf_labels_dir / ('t%06i_z%06i.tif' % (t, z))
     # Agreement on screen
     human_labels = data_with_labels[t, z, -3, :, :]
     rf_labels = data_with_labels[t, z, -2, :, :]
     overruled = (human_labels != 0)
     rf_labels[overruled] = human_labels[overruled]
     viewer.layers['Rand. forest labels'].refresh()
     # Agreement on disk
     data_and_rf_slices = (*range(data_with_labels.shape[2] - 3), -2)
     x = data_with_labels[t, z, data_and_rf_slices, :, :]
     print("Saving", x.shape, x.dtype, "as", filename)
     ranges = []
     for ch in x:  # Autoscale each channel
         ranges.extend((ch.min(), ch.max()))
     imwrite(filename, x, imagej=True, ijmetadata={'Ranges': tuple(ranges)})
Example #20
0
def raw_to_array(filename, downscale=False):
    """Convert file from 16bit little endian .raw to .tiff
    filename - name of file t be converted
    downscale - if color depth shoud be downscaled to 8 bit"""

    with open(filename, 'rb') as file_:
        file_.seek(15, os.SEEK_SET)
        arr = np.fromfile(file_, dtype=np.dtype('<u2'))
        arr = arr.reshape([960, 1278])
        arr = np.fliplr(arr)
        arr = np.flipud(arr)
        if downscale:
            arr = arr / (arr.max() / (2**8 - 1))
            # arr[arr>=2**8] = 2**8-1
            arr = arr.astype(dtype=np.dtype('u1'))
            imwrite(filename[0:-4] + '_8b_.tif',
                    arr,
                    arr.shape,
                    dtype=np.dtype('u1'))
            return
        else:
            imwrite(filename[0:-4] + '_16b_.tif',
                    arr,
                    arr.shape,
                    dtype=np.dtype('u2'))
Example #21
0
 def process_queue(self):
     while True:
         # stop the thread if stop signal is received
         if self.stop_signal_received:
             return
         # process the queue
         if self.queue.empty() == False:
             try:
                 [image, frame_ID] = self.queue.get(timeout=0.1)
                 self.image_lock.acquire(True)
                 folder_ID = int(self.counter /
                                 self.max_num_image_per_folder)
                 file_ID = int(self.counter % self.max_num_image_per_folder)
                 # create a new folder
                 if file_ID == 0:
                     os.mkdir(
                         os.path.join(self.base_path, self.experiment_ID,
                                      str(folder_ID)))
                 saving_path = os.path.join(
                     self.base_path, self.experiment_ID, str(folder_ID),
                     str(file_ID) + '_' + str(frame_ID) + '.tif')
                 tif.imwrite(saving_path, image, imagej=True)
                 # cv2.imwrite(saving_path,image)
                 self.counter = self.counter + 1
                 self.queue.task_done()
                 self.image_lock.release()
             except:
                 print('error occurred during processing image saving')
                 pass
         else:
             time.sleep(0.001)
def main():
    flat_filenames = [
        'F:/pixinsight_learning/orion_135mm/flats/flats1_gray.tif',
        'F:/pixinsight_learning/orion_135mm/flats/flats2_gray.tif',
        'F:/pixinsight_learning/orion_135mm/flats/flats3_gray.tif',
        'F:/pixinsight_learning/orion_135mm/flats/flats4_gray.tif',
    ]

    output_flat_filename = 'F:/pixinsight_learning/orion_135mm/flats/img_calibrated_flat.tif'

    test_img_filename = [
        'F:/pixinsight_learning/orion_135mm/test_images/DSC03694.tif'
    ]

    flat_images = [load_gray_tiff(fn) for fn in flat_filenames]
    test_img = load_gray_tiff(test_img_filename)

    flat_images_rgb = np.array(
        [extract_channel_image(img) for img in flat_images])

    test_img_rgb = extract_channel_image(test_img)

    calibrated_flat_img_rgb, exposure_index = get_exposure_matched_flat(
        flat_images_rgb, test_img_rgb)

    calibrated_flat_img = flatten_channel_image(calibrated_flat_img_rgb)

    # plt.imshow(calibrated_flat_img)
    # plt.show()

    tiff.imwrite(output_flat_filename, calibrated_flat_img)
def experimental_images(file_path, train_size = [256, 256], intensity_threshold = 1000,
                        timesteps = 11, timeinterval = [1,3], overlapratio = 0.5, normalization = True, offset = 100, pad = None):
    """
    file_path:      the directory where experimental images stored
    train_size:     experimental images are all acquired by sCMOS camera (2x2 bin); cut into small ROIs
    intensity_threshold: a minimal average intensity requirement for accepting the ROI in the training dataset, make sure ROI is not mostly background
    timesteps:      length of timelapse movie
    timeinterval:   time interval in frame number, augmentation to include the effect of movement with different scale
    overlapratio:   the overlap ratio between ROIs, default 50%
    normalization:  normalize each ROI image to the range of [0,1]
    pad:            pad mode for the starting and ending frames ['same','zero', None]
    """
    if not os.path.exists(file_path):
        raise FileNotFoundError

    allROIs = []
    allsignals = []
    index = 0
    for filename in os.listdir(file_path):
        if os.path.splitext(filename)[1] == '.tif': #read tif files
            movie = tiff.imread(file_path + filename)
            ROIs, signals = extract_ROIs(movie - offset, train_size, intensity_threshold, timesteps, timeinterval, overlapratio, normalization)
            allROIs = allROIs + ROIs
            allsignals = allsignals + signals
            if False:
                for i in range(len(ROIs)):
                    tiff.imwrite(file_path + filename + 'ROI_{}.tif'.format(index), ROIs[i], dtype='float32')
                    index = index + 1
    np.savez(file_path + 'highSNR', highSNR = allROIs, signal = allsignals)
Example #24
0
def save_split_files(movie_path, loc_path, split_movies, split_locs, frame_idx):
    # Split movie & localization file
    save_dirs_locs = [os.path.splitext(loc_path)[0] + "_A.csv",
                 os.path.splitext(loc_path)[0] + "_B.csv"]
    save_dirs_movies = [os.path.splitext(movie_path)[0] + "_A.tif",
                 os.path.splitext(movie_path)[0] + "_B.tif"]

    deep_storm_header = ['frame', 'x [nm]', 'y [nm]', 'Photon #', 'Sigma [nm]']
    picasso_header = ['frame', 'x [nm]', 'y [nm]', 'sigma [nm]', 'intensity [photon]', 'offset [photon]',
                      'bkgstd [photon]', 'uncertainty_xy [nm]']
    picasso_header_str = ['"frame"', '"x [nm]"', '"y [nm]"', '"sigma [nm]"', '"intensity [photon]"',
                          '"offset [photon]"', '"bkgstd [photon]"', '"uncertainty_xy [nm]"']

    if list(split_locs[0].columns) == deep_storm_header:
        header = deep_storm_header
        for file in split_locs:
            file.index = [*range(1, len(file)+1)]
    elif list(split_locs[0].columns) == picasso_header:
        header = picasso_header_str
        for file in split_locs:
            file.index = [*range(len(file))]
            file.index.name = '"id"'

    for file, save_path in zip(split_locs, save_dirs_locs):
        file.to_csv(save_path, header=header, quoting=csv.QUOTE_NONE)
        print("File saved at " + save_path)
    for file, save_path in zip(split_movies, save_dirs_movies):
        file = file.astype("int16")
        tifffile.imwrite(save_path, file)
        print("File saved at " + save_path)
Example #25
0
def tiff_files(request):
    N_rows = 13
    N_cols = 27
    N_points = 7
    fpp = request.param
    f_dir = TemporaryDirectory()
    template = "%s/%s_%05d.tiff"
    fname = f"adh_{fpp}_test"
    file_index = count()

    print(f_dir.name)
    for pt in range(N_points):
        for _ in range(fpp):
            write_fname = template % (f_dir.name, fname, next(file_index))
            tifffile.imwrite(write_fname, np.ones((N_rows, N_cols)) * pt)
            print(write_fname)

    def finalize():
        f_dir.cleanup()

    print("*")
    for f in Path(f_dir.name).rglob("*.tiff"):
        print(f)
    print("*")
    request.addfinalizer(finalize)

    return (
        (f_dir.name,
         dict(template=template, frame_per_point=fpp, filename=fname)),
        (N_rows, N_cols, N_points, fpp),
    )
Example #26
0
def save_output(output, img_path):
    guess = F.softmax(output['out'].cpu().data, dim=1).numpy().astype('float32')
    imwrite(img_path,
            guess,
            photometric='MINISBLACK',
            imagej=True,
            ijmetadata={'Ranges:', (0, 1)*guess.shape[1]})    
Example #27
0
def save_image(dest, img):
    _logger.info("[SAVE] '{}'".format(dest))

    if str(dest).endswith(".tif"):
        tifffile.imwrite(dest, img)
    else:
        PIL.Image.fromarray(img).write(dest)
Example #28
0
File: io.py Project: EoinUL/TEMUL
def save_individual_images_from_image_stack(image_stack, output_folder='individual_images'):
    '''
    Save each image in an image stack. The images are saved in a new folder.

    Parameters
    ----------

    image_stack : rigid registration image stack object

    output_folder : string
        Name of the folder in which all individual images from 
        the stack will be saved.

    Returns
    -------

    n/a

    Examples
    --------

    '''

    # Save each image as a 32 bit tiff )cqn be displayed in DM
    image_stack_32bit = np.float32(image_stack)
    folder = './' + output_folder + '/'
    create_new_folder(create_new_folder)
    i = 0
    delta = 1
    # Find the number of images, change to an integer for the loop.
    while i < int(image_stack_32bit[0, 0, :].shape[0]):
        im = image_stack_32bit[:, :, i]
        i_filled = str(i).zfill(4)
        imwrite(folder + 'images_aligned_%s.tif' % i_filled, im)
        i = i+delta
Example #29
0
def run(original_image_path):

    outputPathList = []

    # Read original image
    ihc_rgb = io.imread(original_image_path)

    # Process
    ihc_hed = rgb2hed(ihc_rgb)
    ihc_h = exposure.rescale_intensity(ihc_hed[:, :, 0], out_range=(0, 255))
    ihc_e = exposure.rescale_intensity(ihc_hed[:, :, 1], out_range=(0, 255))
    ihc_d = exposure.rescale_intensity(ihc_hed[:, :, 2], out_range=(0, 255))

    images = [ihc_h, ihc_e, ihc_d]
    outnames = ['Hematoxylin.tiff', 'Eosin.tiff', 'DAB.tiff']

    for img, out in zip(images, outnames):

        outputPathList.append(out)
        tf.imwrite(out, np.int16(img))

    tf.imwrite('HED.tiff', np.float16(ihc_hed))
    outputPathList.append('HED.tiff')

    print(outputPathList)

    # Return apeer output values as dictionary
    return {'processed_images': outputPathList}
Example #30
0
def write_frame(t, img_ubyte):
    """Write frame to set of TIFF files"""

    for z in range(img_ubyte.shape[2]):

        fpath = f'{OUT_DIR}/image_t{t+1:04d}_z{z+1:04d}.tif'
        tifffile.imwrite(fpath, img_ubyte[:, :, z])