示例#1
0
    def __get_stacks(self, index):
        centre_z = self.batches[index][0].z

        half_cube_depth = self.num_planes_needed_for_cube // 2
        min_plane = centre_z - half_cube_depth

        if is_even(self.num_planes_needed_for_cube):
            # WARNING: not centered because even
            max_plane = centre_z + half_cube_depth
        else:
            # centered
            max_plane = centre_z + half_cube_depth + 1

        signal_stack = np.empty((
            self.num_planes_needed_for_cube,
            self.image_height,
            self.image_width,
        ))
        background_stack = np.empty_like(signal_stack)
        for plane, plane_path in enumerate(
                self.signal_planes[min_plane:max_plane]):
            signal_stack[plane] = tifffile.imread(plane_path)

        for plane, plane_path in enumerate(
                self.background_planes[min_plane:max_plane]):
            background_stack[plane] = tifffile.imread(plane_path)

        return signal_stack, background_stack
示例#2
0
def test_cube_extraction(tmpdir, depth=20):
    tmpdir = str(tmpdir)
    args = CubeExtractArgs(tmpdir)
    extract_cubes.main(args)

    validation_cubes = load_cubes_in_dir(validate_cubes_dir)
    test_cubes = load_cubes_in_dir(tmpdir)

    for idx, test_cube in enumerate(test_cubes):
        assert (validation_cubes[idx] == test_cube).all()

    system.delete_directory_contents(tmpdir)

    # test cube scaling
    args.x_pixel_um = 2
    args.y_pixel_um = 2
    args.z_pixel_um = 7.25

    extract_cubes.main(args)

    validation_cubes_scale = load_cubes_in_dir(validate_cubes_scale_dir)
    test_cubes = load_cubes_in_dir(tmpdir)
    for idx, test_cube in enumerate(test_cubes):
        assert (validation_cubes_scale[idx] == test_cube).all()

    #  test edge of data errors
    cell = Cell("x0y0z10", 2)
    plane_paths = os.listdir(signal_data_dir[0])
    first_plane = tifffile.imread(
        os.path.join(signal_data_dir[0], plane_paths[0]))
    stack_shape = first_plane.shape + (depth, )
    stacks = {}
    stacks[0] = np.zeros(stack_shape, dtype=np.uint16)
    stacks[0][:, :, 0] = first_plane

    for plane in range(1, depth):
        im_path = os.path.join(signal_data_dir[0], plane_paths[plane])
        stacks[0][:, :, plane] = tifffile.imread(im_path)

    cube = extract_cubes.Cube(cell, 0, stacks)
    assert (cube.data == 0).all()

    cell = Cell("x2500y2500z10", 2)
    cube = extract_cubes.Cube(cell, 0, stacks)
    assert (cube.data == 0).all()

    # test insufficient z-planes for a specific cube
    stacks[0] = stacks[0][:, :, 1:]
    cube = extract_cubes.Cube(cell, 0, stacks)
    assert (cube.data == 0).all()

    # test insufficient z-planes for any cube to be extracted at all.
    system.delete_directory_contents(tmpdir)
    args.z_pixel_um = 0.1

    with pytest.raises(extract_cubes.StackSizeError):
        extract_cubes.main(args)
示例#3
0
    def __read_local(self, tile):
        """Read a locally cached GeoTIFF altitude map"""

        local_file = self.root_path + tile + '.tif'
        if self.verbose > 0:
            print("Reading local tile", tile)
        if self.verbose > 1:
            print(" from", local_file)

        try:
            tif = tiff.imread(local_file)
            #print tif.shape

        except ValueError as e:
            print('Local file error {0}'.format(e.message))
            return False

        except:
            # File was not found!
            print('Unexpected error while reading local file!')
            return False

        # File loaded succesfully, add to cache
        # Y-axis needs to be inverted so that values increase to north
        self.cache[tile] = numpy.flip(tif, axis=0)

        return True
示例#4
0
def load(path: PathLike, force_rgb=False):
    """
    Loads an image from the supplied path in grayscale or RGB depending on the
    source. If the source is RGB and has redundant channels, the image will be
    converted to grayscale. If force_rgb is True, the image will be returned
    with 3 channels. If the image has only one channel, then all channels will
    be identical.
    """
    if PurePath(path).suffix.casefold() in (".tif", ".tiff"):
        image = tf.imread(path)
    else:
        image = Image.open(str(path))
        image = np.array(image)

    image = _add_channel_dim(image)

    # convert redundant rgb to grayscale
    if _is_color(image):
        g_redundant = (image[..., 0] == image[..., 1]).all()
        b_redundant = (image[..., 0] == image[..., 2]).all()
        if (g_redundant and b_redundant) and not force_rgb:
            image = image[..., 0]
            image = image[..., np.newaxis]

    if _is_gray(image) and force_rgb:
        image = np.repeat(image, 3, axis=2)

    assert image.ndim == 3
    assert _is_gray(image) or _is_color(image)
    return image
示例#5
0
def process_series(_experiment, _series_id, _overwrite=False):
    _series_image_path = paths.serieses(_experiment, _series_id)
    _image_properties = load.image_properties(_experiment, _series_id)
    _series_image = tifffile.imread(_series_image_path)
    _cells_coordinates = load.cell_coordinates_tracked_series_file_data(
        _experiment, _series_id)
    _series_image_by_time_frames = [
        np.array([
            _z[IMAGE_FIBER_CHANNEL_INDEX] for _z in _series_image[_time_frame]
        ]) for _time_frame in range(_series_image.shape[0])
    ]

    _tuples = load.experiment_groups_as_tuples(_experiment)
    _tuples = organize.by_experiment(_tuples)[_experiment]
    _tuples = filtering.by_real_pairs(_tuples)
    _tuples = filtering.by_real_fake_pairs(_tuples, _real_fake_pairs=False)
    _tuples = filtering.by_series_id(_tuples, _series_id)

    for _tuple in _tuples:
        _experiment, _series_id, _group = _tuple
        _cell_1_id, _cell_2_id = [
            int(_value) for _value in _group.split('_')[1:]
        ]
        process_group(
            _experiment=_experiment,
            _series_id=_series_id,
            _cells_coordinates=_cells_coordinates,
            _cell_1_id=_cell_1_id,
            _cell_2_id=_cell_2_id,
            _series_image_by_time_frames=_series_image_by_time_frames,
            _resolutions=_image_properties['resolutions'],
            _image_properties=_image_properties,
            _overwrite=_overwrite)
示例#6
0
def setup(
    first_img_path,
    soma_diameter,
    ball_xy_size,
    ball_z_size,
    ball_overlap_fraction=0.6,
    z_offset=0,
):
    plane = tifffile.imread(first_img_path)
    plane = plane.T

    max_value = get_max_value(plane)
    clipping_value = max_value - 2
    thrsh_val = max_value - 1
    soma_centre_val = max_value

    tile_width = soma_diameter * 2
    layer_width, layer_height = plane.shape

    ball_filter = BallFilter(
        layer_width,
        layer_height,
        ball_xy_size,
        ball_z_size,
        overlap_fraction=ball_overlap_fraction,
        tile_step_width=tile_width,
        tile_step_height=tile_width,
        threshold_value=thrsh_val,
        soma_centre_value=soma_centre_val,
    )
    start_z = z_offset + int(math.floor(ball_z_size / 2))
    cell_detector = CellDetector(layer_width, layer_height, start_z=start_z)

    return clipping_value, thrsh_val, ball_filter, cell_detector
def process_fake_following(_experiment,
                           _series_id,
                           _cell_1_id,
                           _cell_2_id,
                           _x_change,
                           _y_change,
                           _z_change=0,
                           _overwrite=False):
    _series_image_path = paths.serieses(_experiment, _series_id)
    _image_properties = load.image_properties(_experiment, _series_id)
    _series_image = tifffile.imread(_series_image_path)
    _cells_coordinates = load.cell_coordinates_tracked_series_file_data(
        _experiment, _series_id)
    _series_image_by_time_frames = [
        np.array([
            _z[IMAGE_FIBER_CHANNEL_INDEX] for _z in _series_image[_time_frame]
        ]) for _time_frame in range(_series_image.shape[0])
    ]
    process_group(_experiment=_experiment,
                  _series_id=_series_id,
                  _cells_coordinates=_cells_coordinates,
                  _cell_1_id=_cell_1_id,
                  _cell_2_id=_cell_2_id,
                  _series_image_by_time_frames=_series_image_by_time_frames,
                  _resolutions=_image_properties['resolutions'],
                  _real_cells=False,
                  _x_change=_x_change,
                  _y_change=_y_change,
                  _z_change=_z_change,
                  _overwrite=_overwrite)
def process_fake_static(_experiment,
                        _series_id,
                        _cell_1_id,
                        _cell_2_id,
                        _x1,
                        _y1,
                        _z1,
                        _x2,
                        _y2,
                        _z2,
                        _overwrite=False):
    _series_image_path = paths.serieses(_experiment, _series_id)
    _image_properties = load.image_properties(_experiment, _series_id)
    _series_image = tifffile.imread(_series_image_path)
    _cells_coordinates = [[
        (_x1, _y1, _z1) for _time_frame in range(_series_image.shape[0])
    ], [(_x2, _y2, _z2) for _time_frame in range(_series_image.shape[0])]]
    _series_image_by_time_frames = [
        np.array([
            _z[IMAGE_FIBER_CHANNEL_INDEX] for _z in _series_image[_time_frame]
        ]) for _time_frame in range(_series_image.shape[0])
    ]
    process_group(_experiment=_experiment,
                  _series_id=_series_id,
                  _cells_coordinates=_cells_coordinates,
                  _cell_1_id=0,
                  _cell_2_id=1,
                  _series_image_by_time_frames=_series_image_by_time_frames,
                  _resolutions=_image_properties['resolutions'],
                  _real_cells=False,
                  _fake_cell_1_id=_cell_1_id,
                  _fake_cell_2_id=_cell_2_id,
                  _overwrite=_overwrite)
示例#9
0
def read_image(filepath_or_buffer: typing.Union[str, io.BytesIO]):
    """Read a file into an image object
    Args:
        filepath_or_buffer: The path to the file, a URL, or any object
            with a `read` method (such as `io.BytesIO`)
    """
    import cv2
    import tifffile.tifffile
    import validators

    if isinstance(filepath_or_buffer, np.ndarray):
        return filepath_or_buffer
    if hasattr(filepath_or_buffer, 'read'):
        image = np.asarray(bytearray(filepath_or_buffer.read()),
                           dtype=np.uint8)
        image = cv2.imdecode(image, cv2.IMREAD_UNCHANGED)
    elif isinstance(filepath_or_buffer, str):
        if validators.url(filepath_or_buffer):
            return read(urllib.request.urlopen(filepath_or_buffer))
        assert os.path.isfile(filepath_or_buffer), \
            'Could not find image at path: ' + filepath_or_buffer
        if filepath_or_buffer.endswith('.tif') or filepath_or_buffer.endswith(
                '.tiff'):
            image = tifffile.imread(filepath_or_buffer)
        else:
            image = cv2.imread(filepath_or_buffer)

    return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
示例#10
0
def main():
    parser = argparse.ArgumentParser(description=_description)
    parser.add_argument('--output', '-o', metavar='normalized.tif',
                        default='normalized.tif', help='Output filename')
    parser.add_argument(
        '--subtract', action='store_true',
        help='Subtract background instead of dividing and truncating')
    parser.add_argument('infile')
    args = parser.parse_args()

    infile = args.infile
    print 'Reading image stack'
    t = tf.TiffFile(infile)
    ar = tf.stack_pages(t.pages)
    n = ar.shape[0]

    percentile = 0.01 if args.subtract else 0.05

    if os.path.exists('background.tif'):
        print 'Reading background image'
        bg = tf.imread('background.tif')
    else:
        print 'Computing background image'
        sorted_ar = ar.copy()
        sorted_ar.sort(0)
        bg = sorted_ar[int(round(percentile*n, 0))]
        print 'Saving background image'
        tf.imsave('background.tif', bg)
        del sorted_ar

    print 'Performing background normalization'
    if not args.subtract:
        ar = ar.astype(np.double)
        for i in range(n):
            ar[i] /= bg

        print 'Converting to 16-bit TIFF'
        max_normed = (4095.0 / bg.min()) - 1
        ar -= 1
        ar *= 65535
        ar /= max_normed
        ar = ar.round()
    else:
        ar = ar.astype(np.int16)
        for i in range(n):
            ar[i] -= bg
    ar[ar < 0] = 0
    ar = ar.astype(np.uint16)

    print 'Writing normalized image'
    with tf.TiffWriter(args.output) as out:
        for i in range(n):
            if (i % 100) == 0:
                print i,
                sys.stdout.flush()
            out.save(ar[i])
    print
示例#11
0
def setup_tile_filtering(first_img_path):
    plane = tifffile.imread(first_img_path)
    plane = plane.T

    max_value = get_max_value(plane)
    clipping_value = max_value - 2
    thrsh_val = max_value - 1

    return clipping_value, thrsh_val
示例#12
0
def convert_tiff_tiling(input_filename, description):
    tile_size = (256, 256)
    image = tifffile.imread(input_filename)[:, :, 0:3]
    path_ext = os.path.splitext(input_filename)
    output_filename = path_ext[0] + '.jpeg' + path_ext[1]
    with tifffile.TiffWriter(output_filename) as tiff:
        tiff.save(image,
                  tile=tile_size,
                  compression='JPEG',
                  description=description)
    def for_each_file(f):
    # for f in tqdm(glob.glob('../Data/LongTermStudy/Orthomosaics/Georeferenced/' + '*.tif')):

        ##Getting filenames
        fullname = os.path.split(f)[1]
        filename = os.path.splitext(fullname)[0]

        ##Getting Day of Year
        find_date = re.compile(r"_([0-9]+-[0-9]+-[0-9]+)_")
        date = find_date.search(filename).group(1)
        doy = datetime.strptime(date, '%d-%m-%y').timetuple().tm_yday

        ##Reading 16 bit ITC tiff file and bgr image
        treecrowns = tiff.imread("../Data/LongTermStudy/Templates/NewGeoref/" + filename + "_ITC.tif")
        img_bgr = cv2.imread(f)
        img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)

        ##Extract tree crowns
        print("Extracting Tree Crowns")
        def extract_trees(tree):
        # for tree in tqdm(set(rawdata["Tree_Crown_ID"])):

            ##Find tree in image
            locations = np.where(treecrowns == tree)
            xmin = locations[0].min()
            xmax = locations[0].max()
            ymin = locations[1].min()
            ymax = locations[1].max()

            ##Find boundary
            #Take tree from boundary
            boundary = treecrowns[xmin:xmax, ymin:ymax] != tree
            values = img[xmin:xmax, ymin:ymax]
            values[boundary, ] = 0

            ##Finding largest shape
            # shapes.append(boundary.shape)

            ##Expand Mask dynamically
            Max = (340, 320)
            bshape = boundary.shape
            xchange = Max[0] - bshape[0]
            ychange = Max[1] - bshape[1]
            expval = np.pad(values, ((0, xchange),(0,ychange), (0,0)), 'constant', constant_values=0)
            # expval = np.concatenate((values, np.zeros(xchange, ychange)),-1)

            ##Convert back to rgb
            Values = cv2.cvtColor(expval, cv2.COLOR_BGR2RGB)

            ##Saving file if does not exist
            newfile = "../Data/LongTermStudy/CNN/ExtractedTrees/"+format(tree, '03d')+"_"+date+"_"+str(format(doy, '03d'))+".png"
            if not os.path.isfile(newfile):
                cv2.imwrite(newfile, Values)
        [extract_trees(tree) for tree in tqdm(set(rawdata["Tree_Crown_ID"]))]
示例#14
0
def series_image(_experiment, _series_id, _fiber_channel=True):
    _series_image_path = paths.serieses(_experiment, _series_id)
    _series_image = tifffile.imread(_series_image_path)

    if _fiber_channel:
        return np.array([
            np.array([
                _z[IMAGE_FIBER_CHANNEL_INDEX]
                for _z in _series_image[_time_frame]
            ]) for _time_frame in range(_series_image.shape[0])
        ])
    else:
        return _series_image
示例#15
0
 def load_mask_one_layer(self, image_id, relabel=False):
     mask = tifffile.imread(self.mask_path[self.ids[image_id]])
     if (mask.ndim > 2):
         mask = mask[:, :, 0]
     if (relabel):
         mask_tmp = np.zeros((mask.shape[0], mask.shape[1]))
         running = 1
         for i in np.unique(mask):
             if i > 0:
                 mask_tmp = mask_tmp + running * (mask == i)
                 running = running + 1
         mask = mask_tmp.astype(np.float)
     return mask  #mask.astype(np.float)
示例#16
0
    def __getitem__(self, index):
        patch_dir, label, name = self.images[index]
        image = tifffile.imread(os.path.join(patch_dir, 'image.tif'))

        out = {'name': name, 'label': torch.tensor(label).long()}
        if self.is_segmentation:
            mask = self.load_mask(patch_dir)
            transformed = self.transform(image=image, mask=mask)
            out['img'] = transformed['image']
            out['mask'] = transformed['mask']
        else:
            out['img'] = self.transform(image=image)['image']
        return out
def try_image_to_kitti():
    # loading data
    directory = r'D:\output-datasets\offroad-14\1'
    base_name = '000084'
    rgb_file = os.path.join(directory, '{}.jpg'.format(base_name))
    depth_file = os.path.join(directory, '{}-depth.tiff'.format(base_name))
    stencil_file = os.path.join(directory, '{}-stencil.png'.format(base_name))
    json_file = os.path.join(directory, '{}.json'.format(base_name))
    rgb = np.array(Image.open(rgb_file))
    depth = tifffile.imread(depth_file)
    stencil = np.array(Image.open(stencil_file))
    with open(json_file) as f:
        data = json.load(f)

    # creating pointcloud for original data
    csv_name = base_name + '-orig'
    vecs, _ = points_to_homo(data, depth, tresholding=False)
    vecs_p = ndc_to_view(vecs, data['proj_matrix'])
    vecs_p_world = view_to_world(vecs_p, np.array(data['view_matrix']))
    a = np.asarray(vecs_p_world[0:3, :].T)
    np.savetxt(os.path.join('kitti-format', "points-{}.csv".format(csv_name)), a, delimiter=",")

    # whole gta to kitti transformation
    rgb, depth, stencil = image_gta_to_kitti(rgb, depth, stencil, data['width'], data['height'], data['camera_fov'])
    data['proj_matrix'] = get_kitti_proj_matrix(np.array(data['proj_matrix'])).tolist()
    data['width'], data['height'] = get_kitti_img_size()

    # saving new images
    Image.fromarray(rgb).convert(mode="RGB").save(os.path.join('kitti-format', '{}.jpg'.format(base_name)))
    tifffile.imsave(os.path.join('kitti-format', '{}-depth-orig.tiff'.format(base_name)), depth)
    tifffile.imsave(os.path.join('kitti-format', '{}-depth-lzma.tiff'.format(base_name)), depth, compress='lzma')
    tifffile.imsave(os.path.join('kitti-format', '{}-depth-zip-5.tiff'.format(base_name)), depth, compress=5)
    tifffile.imsave(os.path.join('kitti-format', '{}-depth-zip-9.tiff'.format(base_name)), depth, compress=9)
    tifffile.imsave(os.path.join('kitti-format', '{}-depth-zip-zstd.tiff'.format(base_name)), depth, compress='zstd')
    Image.fromarray(stencil).save(os.path.join('kitti-format', '{}-stencil.jpg'.format(base_name)))
    with open(os.path.join('kitti-format', '{}.json'.format(base_name)), 'w+') as f:
        json.dump(data, f)

    data['view_matrix'] = np.array(data['view_matrix'])

    check_proj_matrices(depth, data)

    # creating pointcloud for kitti format data
    csv_name = base_name + '-kitti'
    vecs, _ = points_to_homo(data, depth, tresholding=False)
    vecs_p = ndc_to_view(vecs, data['proj_matrix'])
    vecs_p_world = view_to_world(vecs_p, np.array(data['view_matrix']))
    a = np.asarray(vecs_p_world[0:3, :].T)
    np.savetxt(os.path.join('kitti-format', "points-{}.csv".format(csv_name)), a, delimiter=",")
示例#18
0
def loadimage(filename: str) -> np.array:
    # Read image.
    img = tiff.imread(filename)

    # Crop images.
    # img = img[0:30, 80:-100, 80:-100]
    img = img[0:3, 100:200, 100:200]

    # Filter each frame.
    for k in range(img.shape[0]):
        img[k] = ndimage.gaussian_filter(img[k], sigma=1)

    # Normalise to [0, 1].
    img = np.array(img, dtype=float)
    img = (img - img.min()) / (img.max() - img.min())
    return img
def main():
    _experiment = 'SN41'
    _series_id = 3
    _group = 'static_0_1'
    _x1, _y1, _z1 = 23, 226, 10
    _x2, _y2, _z2 = 228, 226, 10
    _time_frame = 34
    _series_image_path = paths.serieses(_experiment, _series_id)
    _image_properties = load.image_properties(_experiment, _series_id)
    _series_image = tifffile.imread(_series_image_path)
    _series_image_by_time_frames = [
        np.array([
            _z[IMAGE_FIBER_CHANNEL_INDEX] for _z in _series_image[_time_frame]
        ]) for _time_frame in range(_series_image.shape[0])
    ]
    plt.imshow(_series_image_by_time_frames[_time_frame][30])
    plt.show()
示例#20
0
 def load_image(self, image_id):
     info = self.image_info[image_id]
     if self.img_postfix == '.jpg':
         img_final = cv2.imread(self.image_path[self.ids[image_id]])
     else:
         img_final = tifffile.imread(self.image_path[self.ids[image_id]])
     try:
         img_final = img_final[:, :, 0]
     except:
         None
     #return img_final / 255.0
     if self.settings.network_info[
             "netinfo"] == 'maskrcnn':  # mask rcnn need an rgb image
         img_new = np.zeros((img_final.shape[0], img_final.shape[1], 3))
         img_new[:, :, 0] = img_new[:, :, 1] = img_new[:, :, 2] = img_final
         img_final = img_new
     return img_final
示例#21
0
def return_opm_psf(wavelength_um):
    """
    Load pre-generated OPM psf

    TO DO: write checks and generate PSF if it does not exist on disk

    :param wavelength: float
        wavelength in um
        
    :return psf: ndarray
        pre-generated skewed PSF
    """

    wavelength_nm = int(np.round(wavelength_um * 1000, 0))

    psf_path = Path('opm_psf_' + str(wavelength_nm).zfill(0) + '_nm.tif')
    opm_psf = tifffile.imread(psf_path)

    return opm_psf
示例#22
0
    def process(
        self,
        plane_id,
        path,
        previous_lock,
        self_lock,
        clipping_value,
        threshold_value,
        soma_diameter,
        log_sigma_size,
        n_sds_above_mean_thresh,
    ):
        laplace_gaussian_sigma = log_sigma_size * soma_diameter
        plane = tifffile.imread(path)
        plane = plane.T
        np.clip(plane, 0, clipping_value, out=plane)

        walker = TileWalker(plane, soma_diameter, threshold_value)

        walker.walk_out_of_brain_only()

        thresholded_img = enhance_peaks(
            walker.thresholded_img,
            clipping_value,
            gaussian_sigma=laplace_gaussian_sigma,
        )

        # threshold
        avg = thresholded_img.ravel().mean()
        sd = thresholded_img.ravel().std()

        plane[
            thresholded_img > avg + n_sds_above_mean_thresh * sd
        ] = threshold_value
        tile_mask = walker.good_tiles_mask.astype(np.uint8)

        with previous_lock:
            pass
        self.ball_filter_q.put((plane_id, plane, tile_mask))
        self.thread_q.put(plane_id)
        self_lock.release()
示例#23
0
    def load_mask(self, image_id):
        """Generate instance masks for shapes of the given image ID.
        """
        info = self.image_info[image_id]
        mask = tifffile.imread(self.mask_path[self.ids[image_id]])
        count = 0
        for i in range(1, int(mask.max()) + 1):
            if ((mask == i).sum() > 0):
                count = count + 1
        # prepare image for net
        #count = int(mask.max())
        mask_new = np.zeros([info['height'], info['width'], count + 1],
                            dtype=np.uint8)  # one more for background
        running = 0
        for i in np.unique(mask):  #range(1, count):
            if ((i > 0) & ((mask == i).sum() > 0)):
                mask_new[:, :, running] = (mask == i)
                running = running + 1
        # Map class names to class IDs.
        class_ids = np.ones(count)

        return mask_new, class_ids.astype(np.int32)
示例#24
0
    def load_mask(self, image_id):
        """Generate instance masks for shapes of the given image ID.
        """
        info = self.image_info[image_id]
        mask = tifffile.imread(self.mask_path[self.ids[image_id]])

        if np.unique(mask).__len__() > 1:
            count = np.unique(mask).__len__() - 1  # one less because of 0

            mask_new = np.zeros([info['height'], info['width'], count],
                                dtype=np.uint8)  # one more for background
            running = 0
            for i in np.unique(mask):  #range(1, count):
                if ((i > 0) & ((mask == i).sum() > 0)):
                    mask_new[:, :, running] = (mask == i)
                    running = running + 1
            # Map class names to class IDs.
            class_ids = np.ones(count)
        else:
            mask_new = np.zeros([info['height'], info['width'], 1],
                                dtype=np.uint8)
            class_ids = np.zeros([1])
        return mask_new, class_ids.astype(np.int32)
示例#25
0
    colored_membrane = cv2.cvtColor(layers['membrane'], cv2.COLOR_GRAY2BGR)
    cv2.drawContours(colored_membrane,
                     [np.array(line, np.int32) for line in all_revised_lines],
                     -1, (255, 100, 0), 1)

    # draw each length
    for i in range(len(all_revised_lines)):
        cv2.putText(colored_membrane, str(all_widths[i]),
                    (all_revised_lines[i][0][0], all_revised_lines[i][0][1]),
                    cv2.FONT_HERSHEY_PLAIN, 1, (0, 100, 255), 1, cv2.LINE_AA,
                    False)

    show(colored_membrane)
    # show(testing)


def proc_layers(image):
    return {'membrane': image[0], 'edge': image[2]}


if __name__ == '__main__':
    files = list(glob.glob(FOLDER + os.path.sep + '*.tiff'))
    for file in files:
        name = os.path.basename(file)
        print('Testing on file %s' % name)

        # open image
        image = tifffile.imread(file)
        layers = proc_layers(image)
        simplify_membrane_edge(layers)
示例#26
0
def test_tiff_io(tmpdir, layer):
    folder = str(tmpdir)
    dest_path = os.path.join(folder, "layer.tiff")
    tifffile.imsave(dest_path, layer)
    reloaded = tifffile.imread(dest_path)
    assert (reloaded == layer).all()
示例#27
0
def test_cube_extraction(tmpdir, depth=20):
    tmpdir = str(tmpdir)
    args = CubeExtractArgs(tmpdir)

    planes_paths = {}
    planes_paths[0] = get_sorted_file_paths(signal_data_dir,
                                            file_extension="tif")
    planes_paths[1] = get_sorted_file_paths(background_data_dir,
                                            file_extension="tif")

    extract_cubes.main(
        get_cells(args.paths.cells_file_path),
        args.paths.tmp__cubes_output_dir,
        planes_paths,
        args.cube_depth,
        args.cube_width,
        args.cube_height,
        args.voxel_sizes,
        args.network_voxel_sizes,
        args.max_ram,
        args.n_free_cpus,
        args.save_empty_cubes,
    )

    validation_cubes = load_cubes_in_dir(validate_cubes_dir)
    test_cubes = load_cubes_in_dir(tmpdir)

    for idx, test_cube in enumerate(test_cubes):
        assert (validation_cubes[idx] == test_cube).all()

    delete_directory_contents(tmpdir)

    # test cube scaling
    args.voxel_sizes = [7.25, 2, 2]
    args.x_pixel_um = 2
    args.y_pixel_um = 2
    args.z_pixel_um = 7.25

    extract_cubes.main(
        get_cells(args.paths.cells_file_path),
        args.paths.tmp__cubes_output_dir,
        planes_paths,
        args.cube_depth,
        args.cube_width,
        args.cube_height,
        args.voxel_sizes,
        args.network_voxel_sizes,
        args.max_ram,
        args.n_free_cpus,
        args.save_empty_cubes,
    )

    validation_cubes_scale = load_cubes_in_dir(validate_cubes_scale_dir)
    test_cubes = load_cubes_in_dir(tmpdir)
    for idx, test_cube in enumerate(test_cubes):
        assert (validation_cubes_scale[idx] == test_cube).all()

    #  test edge of data errors
    cell = Cell("x0y0z10", 2)
    plane_paths = os.listdir(signal_data_dir)
    first_plane = tifffile.imread(os.path.join(signal_data_dir,
                                               plane_paths[0]))
    stack_shape = first_plane.shape + (depth, )
    stacks = {}
    stacks[0] = np.zeros(stack_shape, dtype=np.uint16)
    stacks[0][:, :, 0] = first_plane

    for plane in range(1, depth):
        im_path = os.path.join(signal_data_dir, plane_paths[plane])
        stacks[0][:, :, plane] = tifffile.imread(im_path)

    cube = extract_cubes.Cube(cell, 0, stacks)
    assert (cube.data == 0).all()

    cell = Cell("x2500y2500z10", 2)
    cube = extract_cubes.Cube(cell, 0, stacks)
    assert (cube.data == 0).all()

    # test insufficient z-planes for a specific cube
    stacks[0] = stacks[0][:, :, 1:]
    cube = extract_cubes.Cube(cell, 0, stacks)
    assert (cube.data == 0).all()

    # test insufficient z-planes for any cube to be extracted at all.
    delete_directory_contents(tmpdir)
    # args.z_pixel_um = 0.1
    args.voxel_sizes[0] = 0.1

    with pytest.raises(extract_cubes.StackSizeError):

        extract_cubes.main(
            get_cells(args.paths.cells_file_path),
            args.paths.tmp__cubes_output_dir,
            planes_paths,
            args.cube_depth,
            args.cube_width,
            args.cube_height,
            args.voxel_sizes,
            args.network_voxel_sizes,
            args.max_ram,
            args.n_free_cpus,
            args.save_empty_cubes,
        )
示例#28
0
 def __init__(self, paths, fn, fn_mapping, has_alpha):
     super().__init__(paths, fn, fn_mapping, has_alpha)
     # print(Path(self.paths['images']).joinpath(self.fn).resolve())
     # self.im = imread(os.path.join(self.paths['images'], self.fn), mode='RGB')
     self.im = tifffile.imread(os.path.join(self.paths['images'], self.fn))
     self.im = np.dstack((self.im, self.im, self.im))
示例#29
0
 def __get_image_size(self):
     self.image_z_size = len(self.signal_planes)
     first_plane = tifffile.imread(self.signal_planes[0])
     self.image_height, self.image_width = first_plane.shape
示例#30
0
    img = np.array(img, dtype=float)
    img = (img - img.min()) / (img.max() - img.min())
    return img


# Figure 1: output frames for one dataset.
gen = 'SqAX3_SqhGFP42_GAP43_TM6B'
dat = 'E2PSB1'
frames = [4, 6, 10, 20, 40, 60, 80, 90]

# Output frames.
datfolder = os.path.join(datapath, os.path.join(gen, dat))
seq = glob.glob('{0}/{1}*.tif'.format(datfolder, dat))
if len(seq) != 1:
    warnings.warn("No sequence found!")
img = tiff.imread(seq)

# Output each frame.
for k in frames:
    if len(img.shape) is 4:
        frame = img[0, k]
    else:
        frame = img[k]
    filepath = os.path.join(os.path.join(resultpath, gen), dat)
    ph.saveimage_nolegend(filepath, '{0}-{1}'.format(dat, k), frame)

# Figure 2: load and output kymograph.
img, name = load_kymo(datfolder, dat)

# Plot and save figures.
ph.saveimage(os.path.join(*[resultpath, gen, dat]), name, img)
示例#31
0
def main(
    cells,
    cubes_output_dir,
    planes_paths,
    cube_depth,
    cube_width,
    cube_height,
    voxel_sizes,
    network_voxel_sizes,
    max_ram,
    n_free_cpus=4,
    save_empty_cubes=False,
):

    start_time = datetime.now()

    if voxel_sizes[0] != network_voxel_sizes[0]:
        plane_scaling_factor = float(network_voxel_sizes[0]) / float(
            voxel_sizes[0]
        )
        num_planes_needed_for_cube = round(cube_depth * plane_scaling_factor)
    else:
        num_planes_needed_for_cube = cube_depth

    if num_planes_needed_for_cube > len(planes_paths[0]):
        raise StackSizeError(
            "The number of planes provided is not sufficient "
            "for any cubes to be extracted. Please check the "
            "input data"
        )

    first_plane = tifffile.imread(list(planes_paths.values())[0][0])

    planes_shape = first_plane.shape
    brain_depth = len(list(planes_paths.values())[0])

    # TODO: use to assert all centre planes processed
    center_planes = sorted(list(set([cell.z for cell in cells])))

    # REFACTOR: rename (clashes with different meaning of planes_to_read below)
    planes_to_read = np.zeros(brain_depth, dtype=np.bool)

    if is_even(num_planes_needed_for_cube):
        half_nz = num_planes_needed_for_cube // 2
        # WARNING: not centered because even
        for p in center_planes:
            planes_to_read[p - half_nz : p + half_nz] = 1
    else:
        half_nz = num_planes_needed_for_cube // 2
        # centered
        for p in center_planes:
            planes_to_read[p - half_nz : p + half_nz + 1] = 1

    planes_to_read = np.where(planes_to_read)[0]

    if not planes_to_read.size:
        logging.error(
            f"No planes found, you need at the very least "
            f"{num_planes_needed_for_cube} "
            f"planes to proceed (i.e. cube z size)"
            f"Brain z dimension is {brain_depth}.",
            stack_info=True,
        )
        raise ValueError(
            f"No planes found, you need at the very least "
            f"{num_planes_needed_for_cube} "
            f"planes to proceed (i.e. cube z size)"
            f"Brain z dimension is {brain_depth}."
        )
    # TODO: check if needs to flip args.cube_width and args.cube_height
    cells_groups = group_cells_by_z(cells)

    # copies=2 is set because at all times there is a plane queue (deque)
    # and an array passed to `Cube`
    ram_per_process = get_ram_requirement_per_process(
        planes_paths[0][0],
        num_planes_needed_for_cube,
        copies=2,
    )
    n_processes = get_num_processes(
        min_free_cpu_cores=n_free_cpus,
        ram_needed_per_process=ram_per_process,
        n_max_processes=len(planes_to_read),
        fraction_free_ram=0.2,
        max_ram_usage=system.memory_in_bytes(max_ram, "GB"),
    )
    # TODO: don't need to extract cubes from all channels if
    #  n_signal_channels>1
    with ProcessPoolExecutor(max_workers=n_processes) as executor:
        n_planes_per_chunk = len(planes_to_read) // n_processes
        for i in range(n_processes):
            start_idx = i * n_planes_per_chunk
            end_idx = (
                start_idx + n_planes_per_chunk + num_planes_needed_for_cube - 1
            )
            if end_idx > planes_to_read[-1]:
                end_idx = None
            sub_planes_to_read = planes_to_read[start_idx:end_idx]

            executor.submit(
                save_cubes,
                cells_groups,
                planes_paths,
                sub_planes_to_read,
                planes_shape,
                voxel_sizes,
                network_voxel_sizes,
                num_planes_for_cube=num_planes_needed_for_cube,
                cube_width=cube_width,
                cube_height=cube_height,
                cube_depth=cube_depth,
                thread_id=i,
                output_dir=cubes_output_dir,
                save_empty_cubes=save_empty_cubes,
            )

    total_cubes = system.get_number_of_files_in_dir(cubes_output_dir)
    time_taken = datetime.now() - start_time
    logging.info(
        "All cubes ({}) extracted in: {}".format(total_cubes, time_taken)
    )
示例#32
0
rings_per_frame = {}
for t in tracks:
    for n,x,y in t:
        # if n != 5:
        #     continue
        if n not in rings_per_frame.keys():
            rings_per_frame[n] = [(x,y)]
        else:
            rings_per_frame[n].append((x,y))


for f in sorted(rings_per_frame.keys()):

    img_path = '../data/raw/sample_b'
    img_file_name = '{:05d}.tif'.format(int(f))
    img_data = tif.imread(os.path.join(img_path, img_file_name))
    centers = rings_per_frame[f]

    for c in centers:

        cx, cy = map(int, c)

        for xx, yy, est_r in rings[int(f)]:
            if abs(xx-cx) < 5 and abs(yy-cy) < 5:
                break

        plist = []
        for _x in range(cx-1, cx+2):
            for _y in range(cy-1, cy+2):
                _, r, peak = radial_profile_simple(img_data, (_x,_y), est_r)
                plist.append([r, peak, _x, _y])
示例#33
0
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 10 03:29:03 2014

@author: Indranil
"""
from __future__ import print_function, division
import numpy as np
import tifffile.tifffile as tf
import matplotlib.pyplot as plt

image = tf.imread('test.tif')

print(type(image))
print(image.shape)
print(image.dtype)
for i in range(3):
    print(np.max(image[:,:,i]))
    print(np.min(image[:,:,i]))


plt.imshow(image[:,:,1])
plt.show()