def create_texture_slicer(texture, mask, slice_index, value_range=None, orientation='axial', opacity=1.0, offset=0.5, interpolation='nearest'): """ Create a texture displayed behind the fODF. The texture is applied on a plane with a given offset for the fODF grid. """ affine = _get_affine_for_texture(orientation, offset) if mask is not None: masked_texture = np.zeros_like(texture) masked_texture[mask] = texture[mask] else: masked_texture = texture slicer_actor = actor.slicer(masked_texture, affine=affine, value_range=value_range, opacity=opacity, interpolation=interpolation) set_display_extent(slicer_actor, orientation, texture.shape, slice_index) return slicer_actor
def create_texture_slicer(texture, orientation, slice_index, mask=None, value_range=None, opacity=1.0, offset=0.5, interpolation='nearest'): """ Create a texture displayed behind the fODF. The texture is applied on a plane with a given offset for the fODF grid. Parameters ---------- texture : np.ndarray (3d or 4d) Texture image. Can be 3d for scalar data of 4d for RGB data, in which case the values must be between 0 and 255. orientation : str Name of the axis to visualize. Choices are axial, coronal and sagittal. slice_index : int Index of the slice to visualize along the chosen orientation. mask : np.ndarray, optional Only the data inside the mask will be displayed. Defaults to None. value_range : tuple (2,), optional The range of values mapped to range [0, 1] for the texture image. If None, it equals to (bg.min(), bg.max()). Defaults to None. opacity : float, optional The opacity of the texture image. Opacity of 0.0 means transparent and 1.0 is completely visible. Defaults to 1.0. offset : float, optional The offset of the texture image. Defaults to 0.5. interpolation : str, optional Interpolation mode for the texture image. Choices are nearest or linear. Defaults to nearest. Returns ------- slicer_actor : actor.slicer Fury object containing the texture information. """ affine = _get_affine_for_texture(orientation, offset) if mask is not None: texture[np.where(mask == 0)] = 0 if value_range: texture = np.clip((texture - value_range[0]) / value_range[1] * 255, 0, 255) slicer_actor = actor.slicer(texture, affine=affine, opacity=opacity, interpolation=interpolation) set_display_extent(slicer_actor, orientation, texture.shape, slice_index) return slicer_actor
def create_texture_slicer(texture, value_range=None, orientation='axial', opacity=1.0, offset=0.5, interpolation='nearest'): """ Create a texture displayed behind the fODF. The texture is applied on a plane with a given offset for the fODF grid. """ affine = _get_affine_for_texture(orientation, offset) slicer_actor = actor.slicer(texture, affine=affine, value_range=value_range, opacity=opacity, interpolation=interpolation) set_display_extent(slicer_actor, orientation, texture.shape) return slicer_actor
def test_slicer(verbose=False): scene = window.Scene() data = (255 * np.random.rand(50, 50, 50)) affine = np.eye(4) slicer = actor.slicer(data, affine, value_range=[data.min(), data.max()]) slicer.display(None, None, 25) scene.add(slicer) scene.reset_camera() scene.reset_clipping_range() # window.show(scene) # copy pixels in numpy array directly arr = window.snapshot(scene, 'test_slicer.png', offscreen=True) if verbose: print(arr.sum()) print(np.sum(arr == 0)) print(np.sum(arr > 0)) print(arr.shape) print(arr.dtype) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 1) # print(arr[..., 0]) # The slicer can cut directly a smaller part of the image slicer.display_extent(10, 30, 10, 30, 35, 35) scene.ResetCamera() scene.add(slicer) # save pixels in png file not a numpy array with InTemporaryDirectory() as tmpdir: fname = os.path.join(tmpdir, 'slice.png') window.snapshot(scene, fname, offscreen=True) report = window.analyze_snapshot(fname, find_objects=True) npt.assert_equal(report.objects, 1) # Test Errors data_4d = (255 * np.random.rand(50, 50, 50, 50)) npt.assert_raises(ValueError, actor.slicer, data_4d) npt.assert_raises(ValueError, actor.slicer, np.ones(10)) scene.clear() rgb = np.zeros((30, 30, 30, 3), dtype='f8') rgb[..., 0] = 255 rgb_actor = actor.slicer(rgb) scene.add(rgb_actor) scene.reset_camera() scene.reset_clipping_range() arr = window.snapshot(scene, offscreen=True) report = window.analyze_snapshot(arr, colors=[(255, 0, 0)]) npt.assert_equal(report.objects, 1) npt.assert_equal(report.colors_found, [True]) lut = actor.colormap_lookup_table(scale_range=(0, 255), hue_range=(0.4, 1.), saturation_range=(1, 1.), value_range=(0., 1.)) scene.clear() slicer_lut = actor.slicer(data, lookup_colormap=lut) slicer_lut.display(10, None, None) slicer_lut.display(None, 10, None) slicer_lut.display(None, None, 10) slicer_lut.opacity(0.5) slicer_lut.tolerance(0.03) slicer_lut2 = slicer_lut.copy() npt.assert_equal(slicer_lut2.GetOpacity(), 0.5) npt.assert_equal(slicer_lut2.picker.GetTolerance(), 0.03) slicer_lut2.opacity(1) slicer_lut2.tolerance(0.025) slicer_lut2.display(None, None, 10) scene.add(slicer_lut2) scene.reset_clipping_range() arr = window.snapshot(scene, offscreen=True) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 1) scene.clear() data = (255 * np.random.rand(50, 50, 50)) affine = np.diag([1, 3, 2, 1]) slicer = actor.slicer(data, affine, interpolation='nearest') slicer.display(None, None, 25) scene.add(slicer) scene.reset_camera() scene.reset_clipping_range() arr = window.snapshot(scene, offscreen=True) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 1) npt.assert_equal(data.shape, slicer.shape) slicer2 = slicer.copy() npt.assert_equal(slicer2.shape, slicer.shape)
def test_odf_slicer(interactive=False): sphere = get_sphere('symmetric362') shape = (11, 11, 11, sphere.vertices.shape[0]) fid, fname = mkstemp(suffix='_odf_slicer.mmap') print(fid) print(fname) odfs = np.memmap(fname, dtype=np.float64, mode='w+', shape=shape) odfs[:] = 1 affine = np.eye(4) renderer = window.Renderer() mask = np.ones(odfs.shape[:3]) mask[:4, :4, :4] = 0 odfs[..., 0] = 1 odf_actor = actor.odf_slicer(odfs, affine, mask=mask, sphere=sphere, scale=.25, colormap='plasma') fa = 0. * np.zeros(odfs.shape[:3]) fa[:, 0, :] = 1. fa[:, -1, :] = 1. fa[0, :, :] = 1. fa[-1, :, :] = 1. fa[5, 5, 5] = 1 k = 5 I, J, K = odfs.shape[:3] fa_actor = actor.slicer(fa, affine) fa_actor.display_extent(0, I, 0, J, k, k) renderer.add(odf_actor) renderer.reset_camera() renderer.reset_clipping_range() odf_actor.display_extent(0, I, 0, J, k, k) odf_actor.GetProperty().SetOpacity(1.0) if interactive: window.show(renderer, reset_camera=False) arr = window.snapshot(renderer) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 11 * 11) renderer.clear() renderer.add(fa_actor) renderer.reset_camera() renderer.reset_clipping_range() if interactive: window.show(renderer) mask[:] = 0 mask[5, 5, 5] = 1 fa[5, 5, 5] = 0 fa_actor = actor.slicer(fa, None) fa_actor.display(None, None, 5) odf_actor = actor.odf_slicer(odfs, None, mask=mask, sphere=sphere, scale=.25, colormap='plasma', norm=False, global_cm=True) renderer.clear() renderer.add(fa_actor) renderer.add(odf_actor) renderer.reset_camera() renderer.reset_clipping_range() if interactive: window.show(renderer) renderer.clear() renderer.add(odf_actor) renderer.add(fa_actor) odfs[:, :, :] = 1 mask = np.ones(odfs.shape[:3]) odf_actor = actor.odf_slicer(odfs, None, mask=mask, sphere=sphere, scale=.25, colormap='plasma', norm=False, global_cm=True) renderer.clear() renderer.add(odf_actor) renderer.add(fa_actor) renderer.add(actor.axes((11, 11, 11))) for i in range(11): odf_actor.display(i, None, None) fa_actor.display(i, None, None) if interactive: window.show(renderer) for j in range(11): odf_actor.display(None, j, None) fa_actor.display(None, j, None) if interactive: window.show(renderer) # with mask equal to zero everything should be black mask = np.zeros(odfs.shape[:3]) odf_actor = actor.odf_slicer(odfs, None, mask=mask, sphere=sphere, scale=.25, colormap='plasma', norm=False, global_cm=True) renderer.clear() renderer.add(odf_actor) renderer.reset_camera() renderer.reset_clipping_range() if interactive: window.show(renderer) report = window.analyze_renderer(renderer) npt.assert_equal(report.actors, 1) npt.assert_equal(report.actors_classnames[0], 'vtkLODActor') del odf_actor odfs._mmap.close() del odfs os.close(fid) os.remove(fname)
def test_slicer(): renderer = window.renderer() data = (255 * np.random.rand(50, 50, 50)) affine = np.eye(4) slicer = actor.slicer(data, affine) slicer.display(None, None, 25) renderer.add(slicer) renderer.reset_camera() renderer.reset_clipping_range() # window.show(renderer) # copy pixels in numpy array directly arr = window.snapshot(renderer, 'test_slicer.png', offscreen=True) import scipy print(scipy.__version__) print(scipy.__file__) print(arr.sum()) print(np.sum(arr == 0)) print(np.sum(arr > 0)) print(arr.shape) print(arr.dtype) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 1) # print(arr[..., 0]) # The slicer can cut directly a smaller part of the image slicer.display_extent(10, 30, 10, 30, 35, 35) renderer.ResetCamera() renderer.add(slicer) # save pixels in png file not a numpy array with InTemporaryDirectory() as tmpdir: fname = os.path.join(tmpdir, 'slice.png') # window.show(renderer) window.snapshot(renderer, fname, offscreen=True) report = window.analyze_snapshot(fname, find_objects=True) npt.assert_equal(report.objects, 1) npt.assert_raises(ValueError, actor.slicer, np.ones(10)) renderer.clear() rgb = np.zeros((30, 30, 30, 3)) rgb[..., 0] = 1. rgb_actor = actor.slicer(rgb) renderer.add(rgb_actor) renderer.reset_camera() renderer.reset_clipping_range() arr = window.snapshot(renderer, offscreen=True) report = window.analyze_snapshot(arr, colors=[(255, 0, 0)]) npt.assert_equal(report.objects, 1) npt.assert_equal(report.colors_found, [True]) lut = actor.colormap_lookup_table(scale_range=(0, 255), hue_range=(0.4, 1.), saturation_range=(1, 1.), value_range=(0., 1.)) renderer.clear() slicer_lut = actor.slicer(data, lookup_colormap=lut) slicer_lut.display(10, None, None) slicer_lut.display(None, 10, None) slicer_lut.display(None, None, 10) slicer_lut.opacity(0.5) slicer_lut.tolerance(0.03) slicer_lut2 = slicer_lut.copy() npt.assert_equal(slicer_lut2.GetOpacity(), 0.5) npt.assert_equal(slicer_lut2.picker.GetTolerance(), 0.03) slicer_lut2.opacity(1) slicer_lut2.tolerance(0.025) slicer_lut2.display(None, None, 10) renderer.add(slicer_lut2) renderer.reset_clipping_range() arr = window.snapshot(renderer, offscreen=True) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 1) renderer.clear() data = (255 * np.random.rand(50, 50, 50)) affine = np.diag([1, 3, 2, 1]) slicer = actor.slicer(data, affine, interpolation='nearest') slicer.display(None, None, 25) renderer.add(slicer) renderer.reset_camera() renderer.reset_clipping_range() arr = window.snapshot(renderer, offscreen=True) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 1) npt.assert_equal(data.shape, slicer.shape) renderer.clear() data = (255 * np.random.rand(50, 50, 50)) affine = np.diag([1, 3, 2, 1]) from dipy.align.reslice import reslice data2, affine2 = reslice(data, affine, zooms=(1, 3, 2), new_zooms=(1, 1, 1)) slicer = actor.slicer(data2, affine2, interpolation='linear') slicer.display(None, None, 25) renderer.add(slicer) renderer.reset_camera() renderer.reset_clipping_range() # window.show(renderer, reset_camera=False) arr = window.snapshot(renderer, offscreen=True) report = window.analyze_snapshot(arr, find_objects=True) npt.assert_equal(report.objects, 1) npt.assert_array_equal([1, 3, 2] * np.array(data.shape), np.array(slicer.shape))
def plot_peak_slice(odf_4d, sphere, background_data, out_file, axis, slicenum, mask_data, tile_size=1200, normalize_peaks=True): from fury import actor, window view_up = [(0., 0., 1.), (0., 0., 1.), (0., -1., 0.)] # Make a slice mask to reduce memory new_shape = list(odf_4d.shape) new_shape[axis] = 1 image_shape = new_shape[:3] midpoint = (new_shape[0] / 2., new_shape[1] / 2., new_shape[2] / 2.) if axis == 0: odf_slice = odf_4d[slicenum, :, :, :].reshape(new_shape) image_slice = background_data[slicenum, :, :].reshape(image_shape) mask_slice = mask_data[slicenum, :, :].reshape(image_shape) camera_dist = max(midpoint[1], midpoint[2]) * np.pi elif axis == 1: odf_slice = odf_4d[:, slicenum, :, :].reshape(new_shape) image_slice = background_data[:, slicenum, :].reshape(image_shape) mask_slice = mask_data[:, slicenum, :].reshape(image_shape) camera_dist = max(midpoint[0], midpoint[2]) * np.pi elif axis == 2: odf_slice = odf_4d[:, :, slicenum, :].reshape(new_shape) image_slice = background_data[:, :, slicenum].reshape(image_shape) mask_slice = mask_data[:, :, slicenum].reshape(image_shape) camera_dist = max(midpoint[0], midpoint[1]) * np.pi position = list(midpoint) position[axis] += camera_dist # Find the actual peaks peak_dirs, peak_values = peaks_from_odfs(odf_slice, sphere, relative_peak_threshold=.1, min_separation_angle=15, mask=mask_slice, normalize_peaks=normalize_peaks, npeaks=3) if normalize_peaks: peak_values = peak_values / peak_values.max() * np.pi peak_actor = actor.peak_slicer(peak_dirs, peak_values, colors=None) image_actor = actor.slicer(image_slice, opacity=0.6, interpolation='nearest') image_size = (tile_size, tile_size) scene = window.Scene() scene.add(image_actor) scene.add(peak_actor) xfov_min, xfov_max = 0, new_shape[0] - 1 yfov_min, yfov_max = 0, new_shape[1] - 1 zfov_min, zfov_max = 0, new_shape[2] - 1 peak_actor.display_extent(xfov_min, xfov_max, yfov_min, yfov_max, zfov_min, zfov_max) image_actor.display_extent(xfov_min, xfov_max, yfov_min, yfov_max, zfov_min, zfov_max) scene.set_camera(focal_point=tuple(midpoint), position=tuple(position), view_up=view_up[axis]) window.record(scene, out_path=out_file, reset_camera=False, size=image_size) scene.clear()
def plot_an_odf_slice(odf_4d, full_sphere, background_data, tile_size, filename, centroid, axis, camera_distance, subtract_iso, mask_image): from fury import actor, window view_up = [(0., 0., 1.), (0., 0., 1.), (0., -1., 0.)] # Adjust the centroid so it's only a single slice slicenum = int(np.round(centroid)[axis]) centroid[axis] = 0 position = centroid.copy() position[axis] = position[axis] + camera_distance # Roll if viewing an axial slice roll = 3 if axis == 2 else 0 position[1] = position[1] - roll # Ensure the dimensions reflect that there is only one slice new_shape = list(odf_4d.shape) new_shape[axis] = 1 image_shape = new_shape[:3] if axis == 0: odf_slice = odf_4d[slicenum, :, :, :].reshape(new_shape) image_slice = background_data[slicenum, :, :].reshape(image_shape) elif axis == 1: odf_slice = odf_4d[:, slicenum, :, :].reshape(new_shape) image_slice = background_data[:, slicenum, :].reshape(image_shape) elif axis == 2: odf_slice = odf_4d[:, :, slicenum, :].reshape(new_shape) image_slice = background_data[:, :, slicenum].reshape(image_shape) # Tile to get the whole ODF odf_slice = np.tile(odf_slice, (1, 1, 1, 2)) if subtract_iso: odf_slice = odf_slice - odf_slice.min(3, keepdims=True) # Make graphics objects odf_actor = actor.odf_slicer(odf_slice, sphere=full_sphere, colormap=None, scale=0.6, mask=image_slice) image_actor = actor.slicer(image_slice, opacity=0.6, interpolation='nearest') image_size = (tile_size, tile_size) scene = window.Scene() scene.add(image_actor) scene.add(odf_actor) xfov_min, xfov_max = 0, new_shape[0] - 1 yfov_min, yfov_max = 0, new_shape[1] - 1 zfov_min, zfov_max = 0, new_shape[2] - 1 odf_actor.display_extent(xfov_min, xfov_max, yfov_min, yfov_max, zfov_min, zfov_max) image_actor.display_extent(xfov_min, xfov_max, yfov_min, yfov_max, zfov_min, zfov_max) scene.set_camera(focal_point=tuple(centroid), position=tuple(position), view_up=view_up[axis]) window.record(scene, out_path=filename, reset_camera=False, size=image_size) scene.clear()
def screenshot_tracking(tracking, t1, directory="."): """ Compute 3 view screenshot with streamlines on T1. Parameters ---------- tracking : string tractogram filename. t1 : string t1 filename. directory : string Directory to save the mosaic. Returns ------- name : string Path of the mosaic """ tractogram = nib.streamlines.load(tracking, True).tractogram t1 = nib.load(t1) t1_data = t1.get_data() slice_name = ['sagittal', 'coronal', 'axial'] img_center = [(int(t1_data.shape[0] / 2) + 5, None, None), (None, int(t1_data.shape[1] / 2), None), (None, None, int(t1_data.shape[2] / 2))] center = [(330, 90, 60), (70, 330, 60), (70, 90, 400)] viewup = [(0, 0, -1), (0, 0, -1), (0, -1, 0)] size = (1920, 1080) image = np.array([]) for i, _axis in enumerate(slice_name): streamlines = [] it = 0 slice_idx = img_center[i][i] for streamline in tractogram: if it > 10000: break stream = streamline.streamline if slice_idx in np.array(stream, dtype=int)[:, i]: it += 1 idx = np.where(np.array(stream, dtype=int)[:, i] == \ slice_idx)[0][0] lower = idx - 2 if lower < 0: lower = 0 upper = idx + 2 if upper > len(stream) - 1: upper = len(stream) - 1 streamlines.append(stream[lower:upper]) ren = window.Renderer() streamline_actor = actor.line(streamlines, linewidth=0.2) ren.add(streamline_actor) min_val = np.min(t1_data[t1_data > 0]) max_val = np.percentile(t1_data[t1_data > 0], 99) t1_color = np.float32(t1_data - min_val) \ / np.float32(max_val - min_val) * 255.0 slice_actor = actor.slicer(t1_color, opacity=0.8, value_range=(0, 255), interpolation='nearest') ren.add(slice_actor) slice_actor.display(img_center[i][0], img_center[i][1], img_center[i][2]) camera = ren.GetActiveCamera() camera.SetViewUp(viewup[i]) center_cam = streamline_actor.GetCenter() camera.SetPosition(center[i]) camera.SetFocalPoint((center_cam)) img2 = renderer_to_arr(ren, size) if image.size == 0: image = img2 else: image = np.hstack((image, img2)) streamlines = [] it = 0 for streamline in tractogram: if it > 10000: break it += 1 streamlines.append(streamline.streamline) ren = window.Renderer() streamline_actor = actor.streamtube(streamlines, linewidth=0.2) ren.add(streamline_actor) camera = ren.GetActiveCamera() camera.SetViewUp(0, 0, -1) center = streamline_actor.GetCenter() camera.SetPosition(center[0], 350, center[2]) camera.SetFocalPoint(center) img2 = renderer_to_arr(ren, (3 * 1920, 1920)) image = np.vstack((image, img2)) imgs_comb = Image.fromarray(image) imgs_comb = imgs_comb.resize((3 * 1920, 1920 + 1080)) image_name = os.path.basename(str(tracking)).split(".")[0] name = os.path.join(directory, image_name + '.png') imgs_comb.save(name) return name
def main(): parser = _build_arg_parser() args = parser.parse_args() # The number of labels maps must be equal to the number of bundles tmp = args.in_bundles + args.in_labels args.in_labels = args.in_bundles[(len(tmp) // 2):] + args.in_labels args.in_bundles = args.in_bundles[0:len(tmp) // 2] assert_inputs_exist(parser, args.in_bundles + args.in_labels) assert_output_dirs_exist_and_empty(parser, args, [], optional=args.save_rendering) stats = {} num_digits_labels = 3 scene = window.Scene() scene.background(tuple(map(int, args.background))) for i, filename in enumerate(args.in_bundles): sft = load_tractogram_with_reference(parser, args, filename) sft.to_vox() sft.to_corner() img_labels = nib.load(args.in_labels[i]) # same subject: same header or coregistered subjects: same header if not is_header_compatible(sft, args.in_bundles[0]) \ or not is_header_compatible(img_labels, args.in_bundles[0]): parser.error('All headers must be identical.') data_labels = img_labels.get_fdata() bundle_name, _ = os.path.splitext(os.path.basename(filename)) unique_labels = np.unique(data_labels)[1:].astype(int) # Empty bundle should at least return a json if not len(sft): tmp_dict = {} for label in unique_labels: tmp_dict['{}'.format(label).zfill(num_digits_labels)] \ = {'mean': 0.0, 'std': 0.0} stats[bundle_name] = {'diameter': tmp_dict} continue counter = 0 labels_dict = {label: ([], []) for label in unique_labels} pts_labels = map_coordinates(data_labels, sft.streamlines._data.T - 0.5, order=0) # For each label, all positions and directions are needed to get # a tube estimation per label. for streamline in sft.streamlines: direction = np.gradient(streamline, axis=0).tolist() curr_labels = pts_labels[counter:counter + len(streamline)].tolist() for i, label in enumerate(curr_labels): if label > 0: labels_dict[label][0].append(streamline[i]) labels_dict[label][1].append(direction[i]) counter += len(streamline) centroid = np.zeros((len(unique_labels), 3)) radius = np.zeros((len(unique_labels), 1)) error = np.zeros((len(unique_labels), 1)) for key in unique_labels: key = int(key) c, d, e = fit_circle_in_space(labels_dict[key][0], labels_dict[key][1], args.fitting_func) centroid[key - 1], radius[key - 1], error[key - 1] = c, d, e # Spatial smoothing to avoid degenerate estimation centroid_smooth = gaussian_filter(centroid, sigma=[1, 0], mode='nearest') centroid_smooth[::len(centroid) - 1] = centroid[::len(centroid) - 1] radius = gaussian_filter(radius, sigma=1, mode='nearest') error = gaussian_filter(error, sigma=1, mode='nearest') tmp_dict = {} for label in unique_labels: tmp_dict['{}'.format(label).zfill(num_digits_labels)] \ = {'mean': float(radius[label-1])*2, 'std': float(error[label-1])} stats[bundle_name] = {'diameter': tmp_dict} if args.show_rendering or args.save_rendering: tube_actor = create_tube_with_radii( centroid_smooth, radius, error, wireframe=args.wireframe, error_coloring=args.error_coloring) scene.add(tube_actor) cmap = plt.get_cmap('jet') coloring = cmap(pts_labels / np.max(pts_labels))[:, 0:3] streamlines_actor = actor.streamtube(sft.streamlines, linewidth=args.width, opacity=args.opacity, colors=coloring) scene.add(streamlines_actor) slice_actor = actor.slicer(data_labels, np.eye(4)) slice_actor.opacity(0.0) scene.add(slice_actor) # If there's actually streamlines to display if args.show_rendering: showm = window.ShowManager(scene, reset_camera=True) showm.initialize() showm.start() elif args.save_rendering: scene.reset_camera() snapshot(scene, os.path.join(args.save_rendering, 'superior.png'), size=(1920, 1080), offscreen=True) scene.pitch(180) scene.reset_camera() snapshot(scene, os.path.join(args.save_rendering, 'inferior.png'), size=(1920, 1080), offscreen=True) scene.pitch(90) scene.set_camera(view_up=(0, 0, 1)) scene.reset_camera() snapshot(scene, os.path.join(args.save_rendering, 'posterior.png'), size=(1920, 1080), offscreen=True) scene.pitch(180) scene.set_camera(view_up=(0, 0, 1)) scene.reset_camera() snapshot(scene, os.path.join(args.save_rendering, 'anterior.png'), size=(1920, 1080), offscreen=True) scene.yaw(90) scene.reset_camera() snapshot(scene, os.path.join(args.save_rendering, 'right.png'), size=(1920, 1080), offscreen=True) scene.yaw(180) scene.reset_camera() snapshot(scene, os.path.join(args.save_rendering, 'left.png'), size=(1920, 1080), offscreen=True) print(json.dumps(stats, indent=args.indent, sort_keys=args.sort_keys))
def main(): parser = _build_arg_parser() args = parser.parse_args() required = [args.in_bundle, args.in_anat] assert_inputs_exist(parser, required, args.target_template) if args.verbose: logging.basicConfig(level=logging.DEBUG) output_filenames_3d = [] output_filenames_glass = [] for axis_name in ['sagittal', 'coronal', 'axial']: if args.output_suffix: output_filenames_3d.append( os.path.join( args.out_dir, '{0}_{1}_3d.png'.format(axis_name, args.output_suffix))) output_filenames_glass.append( os.path.join( args.out_dir, '{0}_{1}_glass.png'.format(axis_name, args.output_suffix))) else: output_filenames_3d.append( os.path.join(args.out_dir, '{0}_3d.png'.format(axis_name))) output_filenames_glass.append( os.path.join(args.out_dir, '{0}_glass.png'.format(axis_name))) assert_outputs_exist(parser, args, output_filenames_3d + output_filenames_glass) if args.out_dir and not os.path.isdir(args.out_dir): os.mkdir(args.out_dir) if args.anat_opacity < 0.0 or args.anat_opacity > 1.0: parser.error('Opacity must be between 0 and 1') if args.uniform_coloring: for val in args.uniform_coloring: if val < 0 or val > 255: parser.error('{0} is not a valid RGB value'.format(val)) # Get the relevant slices from the template if args.target_template: mni_space_img = nib.load(args.target_template) affine = nib.load(args.target_template).affine else: mni_space_img = nib.load(args.in_anat) affine = nib.load(args.in_anat).affine x_slice = int(mni_space_img.shape[0] / 2) y_slice = int(mni_space_img.shape[1] / 2) z_slice = int(mni_space_img.shape[2] / 2) slices_choice = (x_slice, y_slice, z_slice) subject_data = prepare_data_for_actors(args.in_bundle, args.in_anat, args.target_template) # Create actors from each dataset for Dipy sft, reference_data = subject_data streamlines = sft.streamlines volume_actor = actor.slicer(reference_data, affine=affine, opacity=args.anat_opacity, interpolation='nearest') if args.local_coloring: colors = [] for i in streamlines: local_color = np.gradient(i, axis=0) local_color = np.abs(local_color) local_color = (local_color.T / np.max(local_color, axis=1)).T colors.append(local_color) elif args.uniform_coloring: colors = (args.uniform_coloring[0] / 255.0, args.uniform_coloring[1] / 255.0, args.uniform_coloring[2] / 255.0) elif args.reference_coloring: sft.to_vox() streamlines_vox = sft.get_streamlines_copy() sft.to_rasmm() colors = [] normalized_data = reference_data / np.max(reference_data) cmap = plt.get_cmap(args.reference_coloring) for points in streamlines_vox: values = map_coordinates(normalized_data, points.T, order=1, mode='nearest') colors.append(cmap(values)[:, 0:3]) else: colors = None streamlines_actor = actor.line(streamlines, colors=colors, linewidth=0.2) # Take a snapshot of each dataset, camera settings are fixed for the # known template, won't work with another. if args.right: side_pos = (300, -10, 10) else: side_pos = (-300, 10, 10) display_slices(volume_actor, slices_choice, output_filenames_3d[0], 'sagittal', view_position=tuple([x for x in side_pos]), focal_point=tuple([x for x in (0, -10, 10)]), streamlines_actor=streamlines_actor) display_slices(volume_actor, slices_choice, output_filenames_3d[1], 'coronal', view_position=tuple([x for x in (0, -300, 15)]), focal_point=tuple([x for x in (0, 0, 15)]), streamlines_actor=streamlines_actor) display_slices(volume_actor, slices_choice, output_filenames_3d[2], 'axial', view_position=tuple([x for x in (0, -15, 350)]), focal_point=tuple([x for x in (0, -15, 0)]), streamlines_actor=streamlines_actor) plot_glass_brain(args, sft, mni_space_img, output_filenames_glass)
def main(): parser = _build_arg_parser() args = parser.parse_args() assert_inputs_exist(parser, [args.anat_reference]) assert_outputs_exist(parser, args, [args.output_name]) output_names = [ 'axial_superior', 'axial_inferior', 'coronal_posterior', 'coronal_anterior', 'sagittal_left', 'sagittal_right' ] list_of_bundles = [f for f in args.inputs] # output_dir: where temporary files will be created output_dir = os.path.dirname(args.output_name) # ----------------------------------------------------------------------- # # Mosaic, column 0: orientation names and data description # ----------------------------------------------------------------------- # width = args.resolution_of_thumbnails height = args.resolution_of_thumbnails rows = 6 cols = len(list_of_bundles) text_pos_x = 50 text_pos_y = 50 # Creates a new empty image, RGB mode mosaic = Image.new('RGB', ((cols + 1) * width, (rows + 1) * height)) # Prepare draw and font objects to render text draw = ImageDraw.Draw(mosaic) font = get_font(args) # Data of the image used as background ref_img = nib.load(args.anat_reference) data = ref_img.get_data() affine = ref_img.affine mean, std = data[data > 0].mean(), data[data > 0].std() value_range = (mean - 0.5 * std, mean + 1.5 * std) # First column with rows description draw_column_with_names(draw, output_names, text_pos_x, text_pos_y, height, font) # ----------------------------------------------------------------------- # # Columns with bundles # ----------------------------------------------------------------------- # for idx_bundle, bundle_file in enumerate(list_of_bundles): bundle_file_name = os.path.basename(bundle_file) bundle_name, _ = os.path.splitext(bundle_file_name) # !! It creates a temporary folder to create # the images to concatenate in the mosaic !! output_bundle_dir = os.path.join(output_dir, bundle_name) if not os.path.isdir(output_bundle_dir): os.makedirs(output_bundle_dir) output_paths = [ os.path.join(output_bundle_dir, '{}_' + os.path.basename(output_bundle_dir)).format(name) for name in output_names ] i = (idx_bundle + 1) * width if not os.path.isfile(bundle_file): print('\nInput file {} doesn\'t exist.'.format(bundle_file)) number_streamlines = 0 view_number = 6 j = height * view_number draw_bundle_information(draw, bundle_file_name, number_streamlines, i + text_pos_x, j + text_pos_y, font) else: # Select the streamlines to plot bundle_tractogram_file = nib.streamlines.load(bundle_file) streamlines = bundle_tractogram_file.streamlines tubes = actor.line(streamlines) number_streamlines = len(streamlines) # Render ren = window.Renderer() zoom = args.zoom opacity = args.opacity_background # Structural data slice_actor = actor.slicer(data, affine, value_range) slice_actor.opacity(opacity) ren.add(slice_actor) # Streamlines ren.add(tubes) ren.reset_camera() ren.zoom(zoom) view_number = 0 set_img_in_cell(mosaic, ren, view_number, output_paths[view_number], width, height, i) ren.pitch(180) ren.reset_camera() ren.zoom(zoom) view_number = 1 set_img_in_cell(mosaic, ren, view_number, output_paths[view_number], width, height, i) ren.rm(slice_actor) slice_actor2 = slice_actor.copy() slice_actor2.display(None, slice_actor2.shape[1] // 2, None) slice_actor2.opacity(opacity) ren.add(slice_actor2) ren.pitch(90) ren.set_camera(view_up=(0, 0, 1)) ren.reset_camera() ren.zoom(zoom) view_number = 2 set_img_in_cell(mosaic, ren, view_number, output_paths[view_number], width, height, i) ren.pitch(180) ren.set_camera(view_up=(0, 0, 1)) ren.reset_camera() ren.zoom(zoom) view_number = 3 set_img_in_cell(mosaic, ren, view_number, output_paths[view_number], width, height, i) ren.rm(slice_actor2) slice_actor3 = slice_actor.copy() slice_actor3.display(slice_actor3.shape[0] // 2, None, None) slice_actor3.opacity(opacity) ren.add(slice_actor3) ren.yaw(90) ren.reset_camera() ren.zoom(zoom) view_number = 4 set_img_in_cell(mosaic, ren, view_number, output_paths[view_number], width, height, i) ren.yaw(180) ren.reset_camera() ren.zoom(zoom) view_number = 5 set_img_in_cell(mosaic, ren, view_number, output_paths[view_number], width, height, i) view_number = 6 j = height * view_number draw_bundle_information(draw, bundle_file_name, number_streamlines, i + text_pos_x, j + text_pos_y, font) shutil.rmtree(output_bundle_dir) # Save image to file mosaic.save(args.output_name)
def main(): parser = _build_arg_parser() args = parser.parse_args() required = [args.dwi, args.bval, args.bvec, args.target_template] assert_inputs_exist(parser, required) output_filenames = [] for axis_name in ['sagittal', 'coronal', 'axial']: if args.output_suffix: output_filenames.append(os.path.join(args.output_dir, '{0}_{1}.png'.format( axis_name, args.output_suffix))) else: output_filenames.append(os.path.join(args.output_dir, '{0}.png'.format(axis_name))) assert_outputs_exist(parser, args, output_filenames) if args.output_dir and not os.path.isdir(args.output_dir): os.mkdir(args.output_dir) # Get the relevant slices from the template target_template_img = nib.load(args.target_template) zooms = 1 / float(target_template_img.header.get_zooms()[0]) x_slice = int(target_template_img.shape[0] / 2 + zooms*30) y_slice = int(target_template_img.shape[1] / 2) z_slice = int(target_template_img.shape[2] / 2) slices_choice = (x_slice, y_slice, z_slice) FA, evals, evecs = prepare_data_for_actors(args.dwi, args.bval, args.bvec, args.target_template, slices_choice, shells=args.shells) # Create actors from each dataset for Dipy volume_actor = actor.slicer(FA, affine=nib.load(args.target_template).affine, opacity=0.3, interpolation='nearest') peaks_actor = actor.peak_slicer(evecs, affine=nib.load( args.target_template).affine, peaks_values=evals, colors=None, linewidth=1) # Take a snapshot of each dataset, camera setting are fixed for the # known template, won't work with another. display_slices(volume_actor, slices_choice, output_filenames[0], 'sagittal', view_position=tuple([x for x in (-125, 10, 10)]), focal_point=tuple([x for x in (0, -10, 10)]), peaks_actor=peaks_actor) display_slices(volume_actor, slices_choice, output_filenames[1], 'coronal', view_position=tuple([x for x in (0, 150, 30)]), focal_point=tuple([x for x in (0, 0, 30)]), peaks_actor=peaks_actor) display_slices(volume_actor, slices_choice, output_filenames[2], 'axial', view_position=tuple([x for x in (0, 25, 150)]), focal_point=tuple([x for x in (0, 25, 0)]), peaks_actor=peaks_actor)
fetch_bundles_2_subjects() fname_t1 = os.path.join(os.path.expanduser('~'), '.dipy', 'exp_bundles_and_maps', 'bundles_2_subjects', 'subj_1', 't1_warped.nii.gz') img = nib.load(fname_t1) data = img.get_data() affine = img.affine scene = window.Scene() scene.background((0.5, 0.5, 0.5)) mean, std = data[data > 0].mean(), data[data > 0].std() value_range = (mean - 0.5 * std, mean + 1.5 * std) slice_actor = actor.slicer(data, affine, value_range) scene.add(slice_actor) slice_actor2 = slice_actor.copy() slice_actor2.display(slice_actor2.shape[0] // 2, None, None) scene.add(slice_actor2) #scene.reset_camera() #scene.zoom(1.4) showm = window.ShowManager(scene, size=(1000, 1000)) showm.initialize() showm.start() window.record(scene, out_path='slices.png',
############################################################################### # Render slices from T1 with a specific value range # ================================================= # # The T1 has usually a higher range of values than what can be visualized in an # image. We can set the range that we would like to see. mean, std = data[data > 0].mean(), data[data > 0].std() value_range = (mean - 0.5 * std, mean + 1.5 * std) ############################################################################### # The ``slice`` function will read data and resample the data using an affine # transformation matrix. The default behavior of this function is to show the # middle slice of the last dimension of the resampled data. slice_actor = actor.slicer(data, affine, value_range) ############################################################################### # The ``slice_actor`` contains an axial slice. scene.add(slice_actor) ############################################################################### # The same actor can show any different slice from the given data using its # ``display`` function. However, if we want to show multiple slices we need to # copy the actor first. slice_actor2 = slice_actor.copy() ############################################################################### # Now we have a new ``slice_actor`` which displays the middle slice of sagittal
def main(): parser = _build_arg_parser() args = parser.parse_args() assert_inputs_exist(parser, args.in_volume) assert_outputs_exist(parser, args, args.out_image) output_names = [ 'axial_superior', 'axial_inferior', 'coronal_posterior', 'coronal_anterior', 'sagittal_left', 'sagittal_right' ] for filename in args.in_bundles: _, ext = os.path.splitext(filename) if ext == '.tck': tractogram = load_tractogram_with_reference(parser, args, filename) else: tractogram = filename if not is_header_compatible(args.in_volume, tractogram): parser.error('{} does not have a compatible header with {}'.format( filename, args.in_volume)) # Delete temporary tractogram else: del tractogram output_dir = os.path.dirname(args.out_image) if output_dir: assert_output_dirs_exist_and_empty(parser, args, output_dir, create_dir=True) _, extension = os.path.splitext(args.out_image) # ----------------------------------------------------------------------- # # Mosaic, column 0: orientation names and data description # ----------------------------------------------------------------------- # width = args.resolution_of_thumbnails height = args.resolution_of_thumbnails rows = 6 cols = len(args.in_bundles) text_pos_x = 50 text_pos_y = 50 # Creates a new empty image, RGB mode mosaic = Image.new('RGB', ((cols + 1) * width, (rows + 1) * height)) # Prepare draw and font objects to render text draw = ImageDraw.Draw(mosaic) font = get_font(args) # Data of the volume used as background ref_img = nib.load(args.in_volume) data = ref_img.get_fdata(dtype=np.float32) affine = ref_img.affine mean, std = data[data > 0].mean(), data[data > 0].std() value_range = (mean - 0.5 * std, mean + 1.5 * std) # First column with rows description draw_column_with_names(draw, output_names, text_pos_x, text_pos_y, height, font) # ----------------------------------------------------------------------- # # Columns with bundles # ----------------------------------------------------------------------- # random.seed(args.random_coloring) for idx_bundle, bundle_file in enumerate(args.in_bundles): bundle_file_name = os.path.basename(bundle_file) bundle_name, bundle_ext = split_name_with_nii(bundle_file_name) i = (idx_bundle + 1) * width if not os.path.isfile(bundle_file): print('\nInput file {} doesn\'t exist.'.format(bundle_file)) number_streamlines = 0 view_number = 6 j = height * view_number draw_bundle_information(draw, bundle_file_name, number_streamlines, i + text_pos_x, j + text_pos_y, font) else: if args.uniform_coloring: colors = args.uniform_coloring elif args.random_coloring is not None: colors = random_rgb() # Select the streamlines to plot if bundle_ext in ['.tck', '.trk']: if (args.random_coloring is None and args.uniform_coloring is None): colors = None bundle_tractogram_file = nib.streamlines.load(bundle_file) streamlines = bundle_tractogram_file.streamlines bundle_actor = actor.line(streamlines, colors) nbr_of_elem = len(streamlines) # Select the volume to plot elif bundle_ext in ['.nii.gz', '.nii']: if not args.random_coloring and not args.uniform_coloring: colors = [1.0, 1.0, 1.0] bundle_img_file = nib.load(bundle_file) roi = get_data_as_mask(bundle_img_file) bundle_actor = actor.contour_from_roi(roi, bundle_img_file.affine, colors) nbr_of_elem = np.count_nonzero(roi) # Render ren = window.Scene() zoom = args.zoom opacity = args.opacity_background # Structural data slice_actor = actor.slicer(data, affine, value_range) slice_actor.opacity(opacity) ren.add(slice_actor) # Streamlines ren.add(bundle_actor) ren.reset_camera() ren.zoom(zoom) view_number = 0 set_img_in_cell(mosaic, ren, view_number, width, height, i) ren.pitch(180) ren.reset_camera() ren.zoom(zoom) view_number = 1 set_img_in_cell(mosaic, ren, view_number, width, height, i) ren.rm(slice_actor) slice_actor2 = slice_actor.copy() slice_actor2.display(None, slice_actor2.shape[1] // 2, None) slice_actor2.opacity(opacity) ren.add(slice_actor2) ren.pitch(90) ren.set_camera(view_up=(0, 0, 1)) ren.reset_camera() ren.zoom(zoom) view_number = 2 set_img_in_cell(mosaic, ren, view_number, width, height, i) ren.pitch(180) ren.set_camera(view_up=(0, 0, 1)) ren.reset_camera() ren.zoom(zoom) view_number = 3 set_img_in_cell(mosaic, ren, view_number, width, height, i) ren.rm(slice_actor2) slice_actor3 = slice_actor.copy() slice_actor3.display(slice_actor3.shape[0] // 2, None, None) slice_actor3.opacity(opacity) ren.add(slice_actor3) ren.yaw(90) ren.reset_camera() ren.zoom(zoom) view_number = 4 set_img_in_cell(mosaic, ren, view_number, width, height, i) ren.yaw(180) ren.reset_camera() ren.zoom(zoom) view_number = 5 set_img_in_cell(mosaic, ren, view_number, width, height, i) view_number = 6 j = height * view_number draw_bundle_information(draw, bundle_file_name, nbr_of_elem, i + text_pos_x, j + text_pos_y, font) # Save image to file mosaic.save(args.out_image)
# objects which are currently in world coordinates are transformed back to # native space using the inverse of the affine. if not world_coords: from dipy.tracking.streamline import transform_streamlines streamlines = transform_streamlines(streamlines, np.linalg.inv(affine)) ############################################################################### # Now we create, a ``Renderer`` object and add the streamlines using the # ``line`` function and an image plane using the ``slice`` function. ren = window.Renderer() stream_actor = actor.line(streamlines) if not world_coords: image_actor_z = actor.slicer(data, affine=np.eye(4)) else: image_actor_z = actor.slicer(data, affine) ############################################################################### # We can also change also the opacity of the slicer. slicer_opacity = 0.6 image_actor_z.opacity(slicer_opacity) ############################################################################### # We can add additonal slicers by copying the original and adjusting the # ``display_extent``. image_actor_x = image_actor_z.copy() x_midpoint = int(np.round(shape[0] / 2))
def screenshot_fa_peaks(fa, peaks, directory='.'): """ Compute 3 view screenshot with peaks on FA. Parameters ---------- fa : string FA filename. peaks : string Peak filename. directory : string Directory to save the mosaic. Returns ------- name : string Path of the mosaic """ slice_name = ['sagittal', 'coronal', 'axial'] data = nib.load(fa).get_data() evecs_data = nib.load(peaks).get_data() evecs = np.zeros(data.shape + (1, 3)) evecs[:, :, :, 0, :] = evecs_data[...] middle = [data.shape[0] // 2 + 4, data.shape[1] // 2, data.shape[2] // 2] slice_display = [(middle[0], None, None), (None, middle[1], None), (None, None, middle[2])] concat = [] for j, slice_name in enumerate(slice_name): image_name = os.path.basename(str(peaks)).split(".")[0] name = os.path.join(directory, image_name + '.png') slice_actor = actor.slicer(data, interpolation='nearest', opacity=0.3) peak_actor = actor.peak_slicer(evecs, colors=None) peak_actor.GetProperty().SetLineWidth(2.5) slice_actor.display(slice_display[j][0], slice_display[j][1], slice_display[j][2]) peak_actor.display(slice_display[j][0], slice_display[j][1], slice_display[j][2]) renderer = window.ren() renderer.add(slice_actor) renderer.add(peak_actor) center = slice_actor.GetCenter() pos = None viewup = None if slice_name == "sagittal": pos = (center[0] - 350, center[1], center[2]) viewup = (0, 0, -1) elif slice_name == "coronal": pos = (center[0], center[1] + 350, center[2]) viewup = (0, 0, -1) elif slice_name == "axial": pos = (center[0], center[1], center[2] + 350) viewup = (0, -1, 1) camera = renderer.GetActiveCamera() camera.SetViewUp(viewup) camera.SetPosition(pos) camera.SetFocalPoint(center) img = renderer_to_arr(renderer, (1080, 1080)) if len(concat) == 0: concat = img else: concat = np.hstack((concat, img)) imgs_comb = Image.fromarray(concat) imgs_comb.save(name) return name